OJ-V4-CPU / app.py
Manjushri's picture
Update app.py
2442d9d
raw
history blame
1.77 kB
import gradio as gr
import torch
import numpy as np
import modin.pandas as pd
from PIL import Image
from diffusers import DiffusionPipeline, StableDiffusionLatentUpscalePipeline
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = DiffusionPipeline.from_pretrained("prompthero/openjourney-v4", safety_checker=None)
upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, safety_checker=None)
upscaler = upscaler.to(device)
pipe = pipe.to(device)
def genie (prompt, scale, steps, seed):
generator = torch.Generator(device=device).manual_seed(seed)
images = pipe(prompt, num_inference_steps=steps, guidance_scale=scale, generator=generator).images[0]
low_res_latents = pipe(prompt, generator=generator, output_type="latent").images
upscaled_image = upscaler(prompt=prompt, image=low_res_latents, num_inference_steps=20, guidance_scale=0, generator=generator).images[0]
return (images, upscaled_image)
gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
gr.Slider(1, maximum=15, value=10, step=.25),
gr.Slider(1, maximum=50, value=25, step=1),
gr.Slider(minimum=1, step=1, maximum=987654321, randomize=True)],
outputs=['image', 'image'],
title = 'OpenJourney V4 CPU',
description = "OJ V4 CPU. <b>WARNING:</b> Extremely Slow. 35s/Iteration. Expect 8-16mins an image for 15-30 iterations respectively. 50 iterations takes ~28mins.",
article = "Code Monkey: <a href=\"https://huggingface.co./Manjushri\">Manjushri</a>").launch(debug=True, max_threads=True)