nsfwalex commited on
Commit
887d072
1 Parent(s): c1469e0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -30
app.py CHANGED
@@ -233,36 +233,36 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
233
  seed = random.randint(0, MAX_SEED)
234
  return seed
235
 
236
- @spaces.GPU(duration=60)
237
- def generate(p, progress=gr.Progress(track_tqdm=True)):
238
- negative_prompt = cfg.get("negative_prompt", "")
239
- style_selection = ""
240
- use_negative_prompt = True
241
- seed = 0
242
- width = cfg.get("width", 1024)
243
- height = cfg.get("width", 768)
244
- inference_steps = cfg.get("inference_steps", 30)
245
- randomize_seed = True
246
- guidance_scale = cfg.get("guidance_scale", 7.5)
247
- prompt_str = cfg.get("prompt", "{prompt}").replace("{prompt}", p)
248
- seed = int(randomize_seed_fn(seed, randomize_seed))
249
- generator = torch.Generator(pipe.device).manual_seed(seed)
250
-
251
- images = pipe(
252
- prompt=prompt_str,
253
- negative_prompt=negative_prompt,
254
- width=width,
255
- height=height,
256
- guidance_scale=guidance_scale,
257
- num_inference_steps=inference_steps,
258
- generator=generator,
259
- num_images_per_prompt=NUM_IMAGES_PER_PROMPT,
260
- output_type="pil",
261
- ).images
262
- images = [save_image(img) for img in images]
263
- image_paths = [i[1] for i in images]
264
- print(prompt_str, image_paths)
265
- return [i[0] for i in images]
266
 
267
  with gr.Blocks(css=css,head=js,fill_height=True) as demo:
268
  with gr.Row(equal_height=False):
 
233
  seed = random.randint(0, MAX_SEED)
234
  return seed
235
 
236
+ @spaces.GPU(duration=60)
237
+ def generate(p, progress=gr.Progress(track_tqdm=True)):
238
+ negative_prompt = cfg.get("negative_prompt", "")
239
+ style_selection = ""
240
+ use_negative_prompt = True
241
+ seed = 0
242
+ width = cfg.get("width", 1024)
243
+ height = cfg.get("width", 768)
244
+ inference_steps = cfg.get("inference_steps", 30)
245
+ randomize_seed = True
246
+ guidance_scale = cfg.get("guidance_scale", 7.5)
247
+ prompt_str = cfg.get("prompt", "{prompt}").replace("{prompt}", p)
248
+ seed = int(randomize_seed_fn(seed, randomize_seed))
249
+ generator = torch.Generator(pipe.device).manual_seed(seed)
250
+
251
+ images = pipe(
252
+ prompt=prompt_str,
253
+ negative_prompt=negative_prompt,
254
+ width=width,
255
+ height=height,
256
+ guidance_scale=guidance_scale,
257
+ num_inference_steps=inference_steps,
258
+ generator=generator,
259
+ num_images_per_prompt=NUM_IMAGES_PER_PROMPT,
260
+ output_type="pil",
261
+ ).images
262
+ images = [save_image(img) for img in images]
263
+ image_paths = [i[1] for i in images]
264
+ print(prompt_str, image_paths)
265
+ return [i[0] for i in images]
266
 
267
  with gr.Blocks(css=css,head=js,fill_height=True) as demo:
268
  with gr.Row(equal_height=False):