JoPmt's picture
Update app.py
5173f57
raw
history blame
1.39 kB
import torch
from diffusers import AmusedPipeline
from transformers import pipeline
import PIL.Image
from diffusers.utils import load_image
import gradio as gr
from PIL import Image
import cv2
import os, random, gc
import numpy as np
from accelerate import Accelerator
accelerator = Accelerator(cpu=True)
pipe = accelerator.prepare(AmusedPipeline.from_pretrained("amused/amused-512", variant=None, torch_dtype=torch.float32, use_safetensors=True))
pipe.vqvae.to(torch.float32)
pipe.to("cpu")
apol=[]
def plex(prompt, guod, fifth, twice):
gc.collect()
apol=[]
nm = random.randint(1, 4836928)
while nm % 32 != 0:
nm = random.randint(1, 4836928)
generator = torch.Generator(device="cpu").manual_seed(nm)
image = pipe(prompt=prompt,guidance_scale=guod,num_inference_steps=twice,num_images_per_prompt=fifth,generator=generator)
for a, imze in enumerate(image["images"]):
apol.append(imze)
return apol
iface = gr.Interface(fn=plex, inputs=[gr.Textbox(label="prompt",),gr.Slider(label="guidance scale",minimum=1,step=1,maximum=10,value=4),gr.Slider(label="num images", minimum=1, step=1, maximum=4, value=1), gr.Slider(label="num inference steps", minimum=1, step=1, maximum=20, value=12)], outputs=gr.Gallery(label="out", columns=2),description="Running on cpu, very slow! by JoPmt.")
iface.queue(max_size=1,api_open=False)
iface.launch(max_threads=1)