Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers.utils.hub import move_cache | |
import torch | |
from torch import autocast | |
from diffusers import StableDiffusionPipeline, DDIMScheduler | |
move_cache() | |
model_path = "slymnyldrm/dreambooth_usecase_weights" | |
pipe = StableDiffusionPipeline.from_pretrained(model_path, use_safetensors=True, safety_checker=None, torch_dtype=torch.float32) | |
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) | |
g_cuda = torch.Generator() | |
def inference(prompt, negative_prompt, num_samples, height=512, width=512, num_inference_steps=50, guidance_scale=7.5, g_seed=52362): | |
with torch.inference_mode(): | |
return pipe( | |
prompt, height=int(height), width=int(width), | |
negative_prompt=negative_prompt, | |
num_images_per_prompt=int(num_samples), | |
num_inference_steps=int(num_inference_steps), guidance_scale=guidance_scale, | |
generator=g_cuda.manual_seed(g_seed) | |
).images | |
with gr.Blocks() as demo: | |
with gr.Row(): | |
with gr.Column(): | |
prompt = gr.Textbox(label="Prompt", value="concept art portrait of suleyman_person person, hyper realistic face, intricate, detailed, evil, strong face") | |
negative_prompt = gr.Textbox(label="Negative Prompt", value="") | |
run = gr.Button(value="Generate") | |
with gr.Row(): | |
num_samples = gr.Number(label="Number of Samples", value=1) | |
guidance_scale = gr.Number(label="Guidance Scale", value=7.5) | |
with gr.Row(): | |
height = gr.Number(label="Height", value=512) | |
width = gr.Number(label="Width", value=512) | |
num_inference_steps = gr.Slider(label="Steps", value=50) | |
with gr.Column(): | |
gallery = gr.Gallery() | |
run.click(inference, inputs=[prompt, negative_prompt, num_samples, height, width, num_inference_steps, guidance_scale], outputs=gallery) | |
demo.launch() |