JoPmt commited on
Commit
5173f57
1 Parent(s): bc04be1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -5
app.py CHANGED
@@ -14,20 +14,18 @@ pipe = accelerator.prepare(AmusedPipeline.from_pretrained("amused/amused-512", v
14
  pipe.vqvae.to(torch.float32)
15
  pipe.to("cpu")
16
  apol=[]
17
- def plex(prompt, neg_prompt, fifth, twice):
18
  gc.collect()
19
  apol=[]
20
- prompt = prompt
21
- negative_prompt = neg_prompt
22
  nm = random.randint(1, 4836928)
23
  while nm % 32 != 0:
24
  nm = random.randint(1, 4836928)
25
  generator = torch.Generator(device="cpu").manual_seed(nm)
26
- image = pipe([prompt]*fifth,guidance_scale=4,num_inference_steps=twice,generator=generator)
27
  for a, imze in enumerate(image["images"]):
28
  apol.append(imze)
29
  return apol
30
 
31
- iface = gr.Interface(fn=plex, inputs=[gr.Textbox(label="prompt",),gr.Textbox(label="negative prompt", value="low quality,blurry, low resolution, image artifacts"),gr.Slider(label="num images", minimum=1, step=1, maximum=4, value=1), gr.Slider(label="num inference steps", minimum=1, step=1, maximum=20, value=12)], outputs=gr.Gallery(label="out", columns=2),description="Running on cpu, very slow! by JoPmt.")
32
  iface.queue(max_size=1,api_open=False)
33
  iface.launch(max_threads=1)
 
14
  pipe.vqvae.to(torch.float32)
15
  pipe.to("cpu")
16
  apol=[]
17
+ def plex(prompt, guod, fifth, twice):
18
  gc.collect()
19
  apol=[]
 
 
20
  nm = random.randint(1, 4836928)
21
  while nm % 32 != 0:
22
  nm = random.randint(1, 4836928)
23
  generator = torch.Generator(device="cpu").manual_seed(nm)
24
+ image = pipe(prompt=prompt,guidance_scale=guod,num_inference_steps=twice,num_images_per_prompt=fifth,generator=generator)
25
  for a, imze in enumerate(image["images"]):
26
  apol.append(imze)
27
  return apol
28
 
29
+ iface = gr.Interface(fn=plex, inputs=[gr.Textbox(label="prompt",),gr.Slider(label="guidance scale",minimum=1,step=1,maximum=10,value=4),gr.Slider(label="num images", minimum=1, step=1, maximum=4, value=1), gr.Slider(label="num inference steps", minimum=1, step=1, maximum=20, value=12)], outputs=gr.Gallery(label="out", columns=2),description="Running on cpu, very slow! by JoPmt.")
30
  iface.queue(max_size=1,api_open=False)
31
  iface.launch(max_threads=1)