radames commited on
Commit
b6a0d59
1 Parent(s): f734c44

hardware change, cache examples

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. app.py +12 -6
README.md CHANGED
@@ -7,7 +7,7 @@ sdk: gradio
7
  sdk_version: 3.35.2
8
  app_file: app.py
9
  pinned: false
10
- suggested_hardware: t4-small
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
7
  sdk_version: 3.35.2
8
  app_file: app.py
9
  pinned: false
10
+ suggested_hardware: t4-medium
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -72,10 +72,11 @@ def inference(
72
  seed: int = -1,
73
  num_inference_steps: int = 30,
74
  ):
 
75
  if prompt is None or prompt == "":
76
  raise gr.Error("Prompt is required")
77
 
78
- if qrcode_image is None and qr_code_content is None:
79
  raise gr.Error("QR Code Image or QR Code Content is required")
80
 
81
  generator = torch.manual_seed(seed) if seed != -1 else torch.Generator()
@@ -92,8 +93,11 @@ def inference(
92
  ) # type: ignore
93
 
94
  init_image = out.images[0]
 
 
 
95
 
96
- if qr_code_content is not None or qr_code_content != "":
97
  print("Generating QR Code from content")
98
  qr = qrcode.QRCode(
99
  version=1,
@@ -110,8 +114,6 @@ def inference(
110
  print("Using QR Code Image")
111
  qrcode_image = resize_for_condition_image(qrcode_image, 768)
112
 
113
- init_image = resize_for_condition_image(init_image, 768)
114
-
115
  out = pipe(
116
  prompt=prompt,
117
  negative_prompt=negative_prompt,
@@ -163,6 +165,9 @@ model: https://huggingface.co/DionTimmer/controlnet_qrcode-control_v1p_sd15
163
  )
164
 
165
  with gr.Accordion(label="Params"):
 
 
 
166
  guidance_scale = gr.Slider(
167
  minimum=0.0,
168
  maximum=50.0,
@@ -240,7 +245,7 @@ model: https://huggingface.co/DionTimmer/controlnet_qrcode-control_v1p_sd15
240
  10.0,
241
  2.7,
242
  0.8,
243
- 2313123,
244
  ],
245
  [
246
  None,
@@ -251,7 +256,7 @@ model: https://huggingface.co/DionTimmer/controlnet_qrcode-control_v1p_sd15
251
  10.0,
252
  2.7,
253
  0.8,
254
- 2313123,
255
  ],
256
  ],
257
  fn=inference,
@@ -267,6 +272,7 @@ model: https://huggingface.co/DionTimmer/controlnet_qrcode-control_v1p_sd15
267
  seed,
268
  ],
269
  outputs=[result_image],
 
270
  )
271
 
272
  blocks.queue()
 
72
  seed: int = -1,
73
  num_inference_steps: int = 30,
74
  ):
75
+ print(init_image, qrcode_image, qr_code_content, prompt, negative_prompt)
76
  if prompt is None or prompt == "":
77
  raise gr.Error("Prompt is required")
78
 
79
+ if qrcode_image is None and qr_code_content == "":
80
  raise gr.Error("QR Code Image or QR Code Content is required")
81
 
82
  generator = torch.manual_seed(seed) if seed != -1 else torch.Generator()
 
93
  ) # type: ignore
94
 
95
  init_image = out.images[0]
96
+ else:
97
+ print("Using provided init image")
98
+ init_image = resize_for_condition_image(init_image, 768)
99
 
100
+ if qr_code_content != "":
101
  print("Generating QR Code from content")
102
  qr = qrcode.QRCode(
103
  version=1,
 
114
  print("Using QR Code Image")
115
  qrcode_image = resize_for_condition_image(qrcode_image, 768)
116
 
 
 
117
  out = pipe(
118
  prompt=prompt,
119
  negative_prompt=negative_prompt,
 
165
  )
166
 
167
  with gr.Accordion(label="Params"):
168
+ gr.Markdown(
169
+ "**Note: The QR Code Image functionality is highly dependent on the params below.**"
170
+ )
171
  guidance_scale = gr.Slider(
172
  minimum=0.0,
173
  maximum=50.0,
 
245
  10.0,
246
  2.7,
247
  0.8,
248
+ 7878952477,
249
  ],
250
  [
251
  None,
 
256
  10.0,
257
  2.7,
258
  0.8,
259
+ 23123124123,
260
  ],
261
  ],
262
  fn=inference,
 
272
  seed,
273
  ],
274
  outputs=[result_image],
275
+ cache_examples=True,
276
  )
277
 
278
  blocks.queue()