File size: 1,958 Bytes
7990be7
681ac98
7990be7
 
 
 
681ac98
 
fef2ff0
7990be7
3967dfa
7990be7
 
12a94f5
7990be7
 
20c8d29
7990be7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3967dfa
7990be7
 
 
 
 
 
 
 
7e11645
7990be7
0c7e57a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import gradio as gr
from transformers.utils.hub import move_cache
import torch
from torch import autocast
from diffusers import StableDiffusionPipeline, DDIMScheduler

move_cache()

model_path = "slymnyldrm/dreambooth_usecase_weights"

pipe = StableDiffusionPipeline.from_pretrained(model_path, use_safetensors=True, safety_checker=None, torch_dtype=torch.float32)
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)

g_cuda = torch.Generator()

def inference(prompt, negative_prompt, num_samples, height=512, width=512, num_inference_steps=50, guidance_scale=7.5, g_seed=52362):
    with torch.inference_mode():
        return pipe(
                prompt, height=int(height), width=int(width),
                negative_prompt=negative_prompt,
                num_images_per_prompt=int(num_samples),
                num_inference_steps=int(num_inference_steps), guidance_scale=guidance_scale,
                generator=g_cuda.manual_seed(g_seed)
            ).images

with gr.Blocks() as demo:
    with gr.Row():
        with gr.Column():
            prompt = gr.Textbox(label="Prompt", value="concept art portrait of suleyman_person person, hyper realistic face, intricate, detailed, evil, strong face")
            negative_prompt = gr.Textbox(label="Negative Prompt", value="")
            run = gr.Button(value="Generate")
            with gr.Row():
                num_samples = gr.Number(label="Number of Samples", value=1)
                guidance_scale = gr.Number(label="Guidance Scale", value=7.5)
            with gr.Row():
                height = gr.Number(label="Height", value=512)
                width = gr.Number(label="Width", value=512)
            num_inference_steps = gr.Slider(label="Steps", value=50)
        with gr.Column():
            gallery = gr.Gallery()

    run.click(inference, inputs=[prompt, negative_prompt, num_samples, height, width, num_inference_steps, guidance_scale], outputs=gallery)

demo.launch()