RamAnanth1 commited on
Commit
b5dea27
1 Parent(s): a16cb0b

Include advanced options to modify guidance scale, seed

Browse files
Files changed (1) hide show
  1. app.py +19 -14
app.py CHANGED
@@ -82,7 +82,7 @@ def pgd_inpaint(X, target, model, criterion, eps=0.1, step_size=0.015, iters=40,
82
 
83
  return X_adv
84
 
85
- def process_image_img2img(raw_image,prompt):
86
  resize = T.transforms.Resize(512)
87
  center_crop = T.transforms.CenterCrop(512)
88
  init_image = center_crop(resize(raw_image))
@@ -103,13 +103,13 @@ def process_image_img2img(raw_image,prompt):
103
  adv_image = to_pil(adv_X[0]).convert("RGB")
104
 
105
  # a good seed (uncomment the line below to generate new images)
106
- SEED = 9222
107
  # SEED = np.random.randint(low=0, high=10000)
108
 
109
  # Play with these for improving generated image quality
110
  STRENGTH = 0.5
111
- GUIDANCE = 7.5
112
- NUM_STEPS = 50
113
 
114
  with torch.autocast('cuda'):
115
  torch.manual_seed(SEED)
@@ -119,7 +119,7 @@ def process_image_img2img(raw_image,prompt):
119
 
120
  return [(init_image,"Source Image"), (adv_image, "Adv Image"), (image_nat,"Gen. Image Nat"), (image_adv, "Gen. Image Adv")]
121
 
122
- def process_image_inpaint(raw_image,mask, prompt):
123
  init_image = raw_image.convert('RGB').resize((512,512))
124
  mask_image = mask.convert('RGB')
125
  mask_image = ImageOps.invert(mask_image).resize((512,512))
@@ -156,7 +156,7 @@ def process_image_inpaint(raw_image,mask, prompt):
156
  adv_image = recover_image(adv_image, init_image, mask_image, background=True)
157
 
158
  # A good seed
159
- SEED = 9209
160
 
161
  # Uncomment the below to generated other images
162
  # SEED = np.random.randint(low=0, high=100000)
@@ -165,8 +165,8 @@ def process_image_inpaint(raw_image,mask, prompt):
165
  print(SEED)
166
 
167
  #strength = 0.7
168
- guidance_scale = 7.5
169
- num_inference_steps = 100
170
 
171
  image_nat = pipe_inpaint(prompt=prompt,
172
  image=init_image,
@@ -192,7 +192,7 @@ def process_image_inpaint(raw_image,mask, prompt):
192
  return [(init_image,"Source Image"), (adv_image, "Adv Image"), (image_nat,"Gen. Image Nat"), (image_adv, "Gen. Image Adv")]
193
 
194
 
195
- examples_list = [["dog.png", "dog under heavy rain and muddy ground real"]]
196
 
197
 
198
  with gr.Blocks() as demo:
@@ -203,7 +203,7 @@ with gr.Blocks() as demo:
203
  <p style="margin-bottom: 10px; font-size: 94%">This is an unofficial demo for Photoguard, which is an approach to safeguarding images against manipulation by ML-powered photo-editing models such as stable diffusion through immunization of images. The demo is based on the <a href='https://github.com/MadryLab/photoguard' style='text-decoration: underline;' target='_blank'> Github </a> implementation provided by the authors.</p>
204
  ''')
205
  gr.HTML('''
206
- <p align="center"><img src="https://raw.githubusercontent.com/MadryLab/photoguard/main/assets/hero_fig.PNG" style="width:40%"/></p>
207
  ''')
208
  gr.HTML('''
209
  <p style="margin-bottom: 10px; font-size: 94%"> A malevolent actor might download
@@ -224,15 +224,20 @@ we disrupt their ability to successfully perform such edits forcing them to gene
224
  mask_image_inpaint = gr.Image(type="pil", label = "Mask")
225
  input_prompt_inpaint = gr.Textbox(label="Prompt")
226
  run_btn_inpaint = gr.Button('Run')
227
-
 
 
 
 
 
228
  with gr.Row():
229
  result_gallery = gr.Gallery(
230
  label="Generated images", show_label=False, elem_id="gallery"
231
  ).style(grid=[2], height="auto")
232
 
233
- run_btn_img2img.click(process_image_img2img, inputs = [input_image_img2img,input_prompt_img2img], outputs = [result_gallery])
234
- examples = gr.Examples(examples=examples_list,inputs = [input_image_img2img,input_prompt_img2img], outputs = [result_gallery], cache_examples = True, fn = process_image_img2img)
235
- run_btn_inpaint.click(process_image_inpaint, inputs = [input_image_inpaint,mask_image_inpaint,input_prompt_inpaint], outputs = [result_gallery])
236
 
237
 
238
  demo.launch(debug=True)
 
82
 
83
  return X_adv
84
 
85
+ def process_image_img2img(raw_image,prompt, scale, num_steps, seed):
86
  resize = T.transforms.Resize(512)
87
  center_crop = T.transforms.CenterCrop(512)
88
  init_image = center_crop(resize(raw_image))
 
103
  adv_image = to_pil(adv_X[0]).convert("RGB")
104
 
105
  # a good seed (uncomment the line below to generate new images)
106
+ SEED = seed# Default is 9222
107
  # SEED = np.random.randint(low=0, high=10000)
108
 
109
  # Play with these for improving generated image quality
110
  STRENGTH = 0.5
111
+ GUIDANCE = scale # Default is 7.5
112
+ NUM_STEPS = num_steps # Default is 50
113
 
114
  with torch.autocast('cuda'):
115
  torch.manual_seed(SEED)
 
119
 
120
  return [(init_image,"Source Image"), (adv_image, "Adv Image"), (image_nat,"Gen. Image Nat"), (image_adv, "Gen. Image Adv")]
121
 
122
+ def process_image_inpaint(raw_image,mask, prompt,scale, num_steps, seed):
123
  init_image = raw_image.convert('RGB').resize((512,512))
124
  mask_image = mask.convert('RGB')
125
  mask_image = ImageOps.invert(mask_image).resize((512,512))
 
156
  adv_image = recover_image(adv_image, init_image, mask_image, background=True)
157
 
158
  # A good seed
159
+ SEED = seed #Default is 9209
160
 
161
  # Uncomment the below to generated other images
162
  # SEED = np.random.randint(low=0, high=100000)
 
165
  print(SEED)
166
 
167
  #strength = 0.7
168
+ guidance_scale = scale# Default is 7.5
169
+ num_inference_steps = num_steps # Default is 100
170
 
171
  image_nat = pipe_inpaint(prompt=prompt,
172
  image=init_image,
 
192
  return [(init_image,"Source Image"), (adv_image, "Adv Image"), (image_nat,"Gen. Image Nat"), (image_adv, "Gen. Image Adv")]
193
 
194
 
195
+ examples_list = [["dog.png", "dog under heavy rain and muddy ground real", 7.5, 50, 9222]]
196
 
197
 
198
  with gr.Blocks() as demo:
 
203
  <p style="margin-bottom: 10px; font-size: 94%">This is an unofficial demo for Photoguard, which is an approach to safeguarding images against manipulation by ML-powered photo-editing models such as stable diffusion through immunization of images. The demo is based on the <a href='https://github.com/MadryLab/photoguard' style='text-decoration: underline;' target='_blank'> Github </a> implementation provided by the authors.</p>
204
  ''')
205
  gr.HTML('''
206
+ <p align="center"><img src="https://raw.githubusercontent.com/MadryLab/photoguard/main/assets/hero_fig.PNG" style="width:60%"/></p>
207
  ''')
208
  gr.HTML('''
209
  <p style="margin-bottom: 10px; font-size: 94%"> A malevolent actor might download
 
224
  mask_image_inpaint = gr.Image(type="pil", label = "Mask")
225
  input_prompt_inpaint = gr.Textbox(label="Prompt")
226
  run_btn_inpaint = gr.Button('Run')
227
+
228
+ with gr.Accordion("Advanced options", open=False):
229
+ scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
230
+ num_steps = gr.Slider(label="Number of Inference Steps", minimum=5, maximum=125, value=100, step=5)
231
+ seed = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, randomize=True)
232
+
233
  with gr.Row():
234
  result_gallery = gr.Gallery(
235
  label="Generated images", show_label=False, elem_id="gallery"
236
  ).style(grid=[2], height="auto")
237
 
238
+ run_btn_img2img.click(process_image_img2img, inputs = [input_image_img2img,input_prompt_img2img, scale, num_steps, seed], outputs = [result_gallery])
239
+ examples = gr.Examples(examples=examples_list,inputs = [input_image_img2img,input_prompt_img2img,scale, num_steps, seed], outputs = [result_gallery], cache_examples = True, fn = process_image_img2img)
240
+ run_btn_inpaint.click(process_image_inpaint, inputs = [input_image_inpaint,mask_image_inpaint,input_prompt_inpaint,scale, num_steps, seed], outputs = [result_gallery])
241
 
242
 
243
  demo.launch(debug=True)