Spaces:
Sleeping
Sleeping
Kieran Fraser
commited on
Commit
•
c484046
1
Parent(s):
a84e7e3
Fixing noise, adding notebook.
Browse filesSigned-off-by: Kieran Fraser <[email protected]>
app.py
CHANGED
@@ -158,11 +158,11 @@ def clf_evasion_evaluate(*args):
|
|
158 |
for i, im in enumerate(x_adv):
|
159 |
adv_gallery_out.append(( im.transpose(1,2,0), label_names[np.argmax(outputs[i])] ))
|
160 |
|
161 |
-
delta = ((x_subset - x_adv) + attack_eps)
|
162 |
-
delta
|
163 |
-
|
164 |
-
delta = (delta-np.min(delta))/(np.max(delta)-np.min(delta))'''
|
165 |
delta[delta>1] = 1
|
|
|
166 |
delta_gallery_out = delta.transpose(0, 2, 3, 1)
|
167 |
|
168 |
if attack == "Adversarial Patch":
|
@@ -221,7 +221,9 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
|
|
221 |
common red-team workflow to assess model vulnerability to evasion attacks ⚔️</p>''')
|
222 |
|
223 |
gr.Markdown('''<p style="font-size: 18px; text-align: justify"><i>Check out the full suite of features provided by ART <a href="https://github.com/Trusted-AI/adversarial-robustness-toolbox"
|
224 |
-
target="blank_">here</a
|
|
|
|
|
225 |
|
226 |
gr.Markdown('''<hr/>''')
|
227 |
|
@@ -253,7 +255,7 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
|
|
253 |
gr.Markdown('''<hr/>''')
|
254 |
|
255 |
gr.Markdown('''<p style="text-align: justify; font-size: 18px">ℹ️ Now as a responsible AI expert, you wish to assert that your model is not vulnerable to
|
256 |
-
attacks which might manipulate the prediction. For instance, ships become classified as birds. To do this, you will
|
257 |
adversarial attacks against your own model and assess its performance.</p>''')
|
258 |
|
259 |
gr.Markdown('''<p style="text-align: justify; font-size: 18px">ℹ️ Below are two common types of evasion attack. Both create adversarial images, which at first glance, seem the same as the original images,
|
@@ -271,8 +273,8 @@ with gr.Blocks(css=css, theme='Tshackelton/IBMPlex-DenseReadable') as demo:
|
|
271 |
with gr.Column(scale=1):
|
272 |
attack = gr.Textbox(visible=True, value="PGD", label="Attack", interactive=False)
|
273 |
max_iter = gr.Slider(minimum=1, maximum=10, label="Max iterations", value=4)
|
274 |
-
eps = gr.Slider(minimum=0.0001, maximum=1, label="Epslion", value=0.
|
275 |
-
eps_steps = gr.Slider(minimum=0.0001, maximum=1, label="Epsilon steps", value=0.
|
276 |
bt_eval_pgd = gr.Button("Evaluate")
|
277 |
|
278 |
# Evaluation Output. Visualisations of success/failures of running evaluation attacks.
|
|
|
158 |
for i, im in enumerate(x_adv):
|
159 |
adv_gallery_out.append(( im.transpose(1,2,0), label_names[np.argmax(outputs[i])] ))
|
160 |
|
161 |
+
delta = ((x_subset - x_adv) + attack_eps) * 10 # shift to 0 and make perturbations 10x larger to visualise them
|
162 |
+
if delta.max()>1:
|
163 |
+
delta = (delta-np.min(delta))/(np.max(delta)-np.min(delta))
|
|
|
164 |
delta[delta>1] = 1
|
165 |
+
delta[delta<0] = 0
|
166 |
delta_gallery_out = delta.transpose(0, 2, 3, 1)
|
167 |
|
168 |
if attack == "Adversarial Patch":
|
|
|
221 |
common red-team workflow to assess model vulnerability to evasion attacks ⚔️</p>''')
|
222 |
|
223 |
gr.Markdown('''<p style="font-size: 18px; text-align: justify"><i>Check out the full suite of features provided by ART <a href="https://github.com/Trusted-AI/adversarial-robustness-toolbox"
|
224 |
+
target="blank_">here</a>. To dive further into evasion attacks with Hugging Face and ART, check out our
|
225 |
+
<a href="https://github.com/Trusted-AI/adversarial-robustness-toolbox/blob/main/notebooks/hugging_face_evasion.ipynb"
|
226 |
+
target="_blank">notebook</a>. Also feel free to contribute and give our repo a ⭐.</i></p>''')
|
227 |
|
228 |
gr.Markdown('''<hr/>''')
|
229 |
|
|
|
255 |
gr.Markdown('''<hr/>''')
|
256 |
|
257 |
gr.Markdown('''<p style="text-align: justify; font-size: 18px">ℹ️ Now as a responsible AI expert, you wish to assert that your model is not vulnerable to
|
258 |
+
attacks which might manipulate the prediction. For instance, ships become classified as birds. To do this, you will deploy
|
259 |
adversarial attacks against your own model and assess its performance.</p>''')
|
260 |
|
261 |
gr.Markdown('''<p style="text-align: justify; font-size: 18px">ℹ️ Below are two common types of evasion attack. Both create adversarial images, which at first glance, seem the same as the original images,
|
|
|
273 |
with gr.Column(scale=1):
|
274 |
attack = gr.Textbox(visible=True, value="PGD", label="Attack", interactive=False)
|
275 |
max_iter = gr.Slider(minimum=1, maximum=10, label="Max iterations", value=4)
|
276 |
+
eps = gr.Slider(minimum=0.0001, maximum=1, label="Epslion", value=0.3)
|
277 |
+
eps_steps = gr.Slider(minimum=0.0001, maximum=1, label="Epsilon steps", value=0.03)
|
278 |
bt_eval_pgd = gr.Button("Evaluate")
|
279 |
|
280 |
# Evaluation Output. Visualisations of success/failures of running evaluation attacks.
|