File size: 1,238 Bytes
b6b08f6
3353a40
af2f25b
b6eb1a4
075f386
 
 
 
 
 
 
 
b6eb1a4
 
 
3353a40
5c0f9d0
a2ffcba
b6eb1a4
 
af2f25b
ad1a1ef
a2ffcba
 
 
31df11f
a2ffcba
 
b6eb1a4
075f386
 
b6eb1a4
 
 
 
 
 
 
 
 
a2ffcba
 
b6eb1a4
 
 
 
 
 
 
 
 
a2ffcba
af2f25b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import gradio as gr
import requests
import os
import gradio as gr
from diffusers import StableDiffusionPipeline
import torch

model_id = "runwayml/stable-diffusion-v1-5"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipe = pipe.to("cpu")






name = "andite/anything-v4.0"
model = gr.Interface.load(f"models/{name}")


o = os.getenv("P")
h = "Q"
def ac():
    def im_fn(put):
        if h == o:
            return model(put,negative_prompt = "blury")
        elif h != o:
            return(None)
    def im_pipe(put):
        return image = pipe(prompt, negative_prompt="blury").images[0]  

        '''           
        num_images_per_prompt=n_images,
        num_inference_steps = int(steps),
        guidance_scale = guidance,
        width = width,
        height = height,
        generator = generator,
        callback=pipe_callback)
        '''
    with gr.Blocks() as b:
        put = gr.Textbox()
        with gr.Row():
            out1 = gr.Image()
            out2 = gr.Image()
        with gr.Row():
            btn1 = gr.Button()
            btn2 = gr.Button()
        btn1.click(im_fn,put,out1)
        btn2.click(im_pipe,put,out2)

    b.queue(concurrency_count=100).launch()
ac()