Spaces:
Runtime error
Runtime error
RamAnanth1
commited on
Commit
•
d23d4b7
1
Parent(s):
87f92ff
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
from PIL import Image, ImageOps
|
4 |
+
import matplotlib.pyplot as plt
|
5 |
+
import numpy as np
|
6 |
+
import torch
|
7 |
+
import requests
|
8 |
+
from tqdm import tqdm
|
9 |
+
|
10 |
+
from diffusers import StableDiffusionImg2ImgPipeline
|
11 |
+
import torchvision.transforms as T
|
12 |
+
|
13 |
+
from utils import preprocess, recover_image
|
14 |
+
|
15 |
+
to_pil = T.ToPILImage()
|
16 |
+
|
17 |
+
title = "Interactive demo: Raising the Cost of Malicious AI-Powered Image Editing"
|
18 |
+
|
19 |
+
model_id_or_path = "runwayml/stable-diffusion-v1-5"
|
20 |
+
# model_id_or_path = "CompVis/stable-diffusion-v1-4"
|
21 |
+
# model_id_or_path = "CompVis/stable-diffusion-v1-3"
|
22 |
+
# model_id_or_path = "CompVis/stable-diffusion-v1-2"
|
23 |
+
# model_id_or_path = "CompVis/stable-diffusion-v1-1"
|
24 |
+
|
25 |
+
pipe_img2img = StableDiffusionImg2ImgPipeline.from_pretrained(
|
26 |
+
model_id_or_path,
|
27 |
+
revision="fp16",
|
28 |
+
torch_dtype=torch.float16,
|
29 |
+
)
|
30 |
+
pipe_img2img = pipe_img2img.to("cuda")
|
31 |
+
def pgd(X, model, eps=0.1, step_size=0.015, iters=40, clamp_min=0, clamp_max=1, mask=None):
|
32 |
+
X_adv = X.clone().detach() + (torch.rand(*X.shape)*2*eps-eps).cuda()
|
33 |
+
pbar = tqdm(range(iters))
|
34 |
+
for i in pbar:
|
35 |
+
actual_step_size = step_size - (step_size - step_size / 100) / iters * i
|
36 |
+
|
37 |
+
X_adv.requires_grad_(True)
|
38 |
+
|
39 |
+
loss = (model(X_adv).latent_dist.mean).norm()
|
40 |
+
|
41 |
+
pbar.set_description(f"[Running attack]: Loss {loss.item():.5f} | step size: {actual_step_size:.4}")
|
42 |
+
|
43 |
+
grad, = torch.autograd.grad(loss, [X_adv])
|
44 |
+
|
45 |
+
X_adv = X_adv - grad.detach().sign() * actual_step_size
|
46 |
+
X_adv = torch.minimum(torch.maximum(X_adv, X - eps), X + eps)
|
47 |
+
X_adv.data = torch.clamp(X_adv, min=clamp_min, max=clamp_max)
|
48 |
+
X_adv.grad = None
|
49 |
+
|
50 |
+
if mask is not None:
|
51 |
+
X_adv.data *= mask
|
52 |
+
|
53 |
+
return X_adv
|
54 |
+
|
55 |
+
def process_image(raw_image,prompt):
|
56 |
+
resize = T.transforms.Resize(512)
|
57 |
+
center_crop = T.transforms.CenterCrop(512)
|
58 |
+
init_image = center_crop(resize(raw_image))
|
59 |
+
with torch.autocast('cuda'):
|
60 |
+
X = preprocess(init_image).half().cuda()
|
61 |
+
adv_X = pgd(X,
|
62 |
+
model=pipe_img2img.vae.encode,
|
63 |
+
clamp_min=-1,
|
64 |
+
clamp_max=1,
|
65 |
+
eps=0.06, # The higher, the less imperceptible the attack is
|
66 |
+
step_size=0.02, # Set smaller than eps
|
67 |
+
iters=100, # The higher, the stronger your attack will be
|
68 |
+
)
|
69 |
+
|
70 |
+
# convert pixels back to [0,1] range
|
71 |
+
adv_X = (adv_X / 2 + 0.5).clamp(0, 1)
|
72 |
+
|
73 |
+
adv_image = to_pil(adv_X[0]).convert("RGB")
|
74 |
+
prompt = "dog under heavy rain and muddy ground real"
|
75 |
+
|
76 |
+
# a good seed (uncomment the line below to generate new images)
|
77 |
+
SEED = 9222
|
78 |
+
# SEED = np.random.randint(low=0, high=10000)
|
79 |
+
|
80 |
+
# Play with these for improving generated image quality
|
81 |
+
STRENGTH = 0.5
|
82 |
+
GUIDANCE = 7.5
|
83 |
+
NUM_STEPS = 50
|
84 |
+
|
85 |
+
with torch.autocast('cuda'):
|
86 |
+
torch.manual_seed(SEED)
|
87 |
+
image_nat = pipe_img2img(prompt=prompt, image=init_image, strength=STRENGTH, guidance_scale=GUIDANCE, num_inference_steps=NUM_STEPS).images[0]
|
88 |
+
torch.manual_seed(SEED)
|
89 |
+
image_adv = pipe_img2img(prompt=prompt, image=adv_image, strength=STRENGTH, guidance_scale=GUIDANCE, num_inference_steps=NUM_STEPS).images[0]
|
90 |
+
|
91 |
+
return [(init_image,"Source Image"), (adv_image, "Adv Image"), (image_nat,"Gen. Image Nat"), (image_adv, "Gen. Image Adv")]
|
92 |
+
|
93 |
+
|
94 |
+
|
95 |
+
interface = gr.Interface(fn=process_image,
|
96 |
+
inputs=[gr.Image(type="pil"), gr.Textbox(label="Prompt")],
|
97 |
+
outputs=[gr.Gallery(
|
98 |
+
label="Generated images", show_label=False, elem_id="gallery"
|
99 |
+
).style(grid=[2], height="auto")
|
100 |
+
],
|
101 |
+
title=title
|
102 |
+
)
|
103 |
+
|
104 |
+
interface.launch(debug=True)
|