Update app.py
Browse files
app.py
CHANGED
@@ -14,9 +14,9 @@ pipe = pipe.to(device)
|
|
14 |
def genie (prompt, scale, steps, seed):
|
15 |
generator = torch.Generator(device=device).manual_seed(seed)
|
16 |
#images = pipe(prompt, num_inference_steps=steps, guidance_scale=scale, generator=generator).images[0]
|
17 |
-
low_res_latents = pipe(prompt, num_inference_steps=steps, guidance_scale=scale, generator=generator, output_type="latent").images
|
18 |
upscaled_image = upscaler(prompt=prompt, image=low_res_latents, num_inference_steps=20, guidance_scale=0, generator=generator).images[0]
|
19 |
-
return
|
20 |
|
21 |
gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
|
22 |
gr.Slider(1, maximum=15, value=10, step=.25),
|
|
|
14 |
def genie (prompt, scale, steps, seed):
|
15 |
generator = torch.Generator(device=device).manual_seed(seed)
|
16 |
#images = pipe(prompt, num_inference_steps=steps, guidance_scale=scale, generator=generator).images[0]
|
17 |
+
low_res_latents = pipe(prompt, num_inference_steps=steps, guidance_scale=scale, generator=generator, output_type="latent").images
|
18 |
upscaled_image = upscaler(prompt=prompt, image=low_res_latents, num_inference_steps=20, guidance_scale=0, generator=generator).images[0]
|
19 |
+
return upscaled_image
|
20 |
|
21 |
gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
|
22 |
gr.Slider(1, maximum=15, value=10, step=.25),
|