Spaces:
Running
Running
yizhangliu
commited on
Commit
•
a4122d2
1
Parent(s):
1dcf1c6
Update app.py
Browse files
app.py
CHANGED
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
model_ids = {"models/runwayml/stable-diffusion-v1-5":"stable-diffusion-v1-5",
|
4 |
+
"models/stabilityai/stable-diffusion-2":"stable-diffusion-2",
|
5 |
+
"models/prompthero/openjourney":"openjourney",
|
6 |
+
}
|
7 |
+
tab_actions = []
|
8 |
+
tab_titles = []
|
9 |
+
for model_id in model_ids.keys():
|
10 |
+
print(model_id, model_ids[model_id])
|
11 |
+
try:
|
12 |
+
tab = gr.Interface.load(model_id)
|
13 |
+
tab_actions.append(tab)
|
14 |
+
tab_titles.append(model_ids[model_id])
|
15 |
+
except:
|
16 |
+
pass
|
17 |
+
|
18 |
+
def infer(prompt):
|
19 |
+
# gr.Interface.load("models/runwayml/stable-diffusion-v1-5",prompt=prompt).launch()
|
20 |
+
return prompt
|
21 |
+
|
22 |
+
start_work = """async() => {
|
23 |
+
function isMobile() {
|
24 |
+
try {
|
25 |
+
document.createEvent("TouchEvent"); return true;
|
26 |
+
} catch(e) {
|
27 |
+
return false;
|
28 |
+
}
|
29 |
+
}
|
30 |
+
|
31 |
+
function getClientHeight()
|
32 |
+
{
|
33 |
+
var clientHeight=0;
|
34 |
+
if(document.body.clientHeight&&document.documentElement.clientHeight) {
|
35 |
+
var clientHeight = (document.body.clientHeight<document.documentElement.clientHeight)?document.body.clientHeight:document.documentElement.clientHeight;
|
36 |
+
} else {
|
37 |
+
var clientHeight = (document.body.clientHeight>document.documentElement.clientHeight)?document.body.clientHeight:document.documentElement.clientHeight;
|
38 |
+
}
|
39 |
+
return clientHeight;
|
40 |
+
}
|
41 |
+
|
42 |
+
function setNativeValue(element, value) {
|
43 |
+
const valueSetter = Object.getOwnPropertyDescriptor(element.__proto__, 'value').set;
|
44 |
+
const prototype = Object.getPrototypeOf(element);
|
45 |
+
const prototypeValueSetter = Object.getOwnPropertyDescriptor(prototype, 'value').set;
|
46 |
+
|
47 |
+
if (valueSetter && valueSetter !== prototypeValueSetter) {
|
48 |
+
prototypeValueSetter.call(element, value);
|
49 |
+
} else {
|
50 |
+
valueSetter.call(element, value);
|
51 |
+
}
|
52 |
+
}
|
53 |
+
|
54 |
+
var gradioEl = document.querySelector('body > gradio-app').shadowRoot;
|
55 |
+
if (!gradioEl) {
|
56 |
+
gradioEl = document.querySelector('body > gradio-app');
|
57 |
+
}
|
58 |
+
|
59 |
+
if (typeof window['gradioEl'] === 'undefined') {
|
60 |
+
window['gradioEl'] = gradioEl;
|
61 |
+
|
62 |
+
tabitems = window['gradioEl'].querySelectorAll('.tabitem');
|
63 |
+
for (var i = 0; i < tabitems.length; i++) {
|
64 |
+
tabitems[i].childNodes[0].children[0].style.display='none';
|
65 |
+
tabitems[i].childNodes[0].children[1].children[0].style.display='none';
|
66 |
+
tabitems[i].childNodes[0].children[1].children[1].children[0].children[1].style.display="none";
|
67 |
+
}
|
68 |
+
tab_demo = window['gradioEl'].querySelectorAll('#tab_demo')[0];
|
69 |
+
tab_demo.style.display = "block";
|
70 |
+
tab_demo.setAttribute('style', 'height: 100%;');
|
71 |
+
|
72 |
+
const page1 = window['gradioEl'].querySelectorAll('#page_1')[0];
|
73 |
+
const page2 = window['gradioEl'].querySelectorAll('#page_2')[0];
|
74 |
+
|
75 |
+
page1.style.display = "none";
|
76 |
+
page2.style.display = "block";
|
77 |
+
|
78 |
+
window['prevPrompt'] = '';
|
79 |
+
window['doCheckPrompt'] = 0;
|
80 |
+
window['checkPrompt'] = function checkPrompt() {
|
81 |
+
try {
|
82 |
+
texts = window['gradioEl'].querySelectorAll('textarea');
|
83 |
+
text0 = texts[0];
|
84 |
+
text1 = texts[1];
|
85 |
+
if (window['doCheckPrompt'] == 0 && window['prevPrompt'] != text1.value) {
|
86 |
+
window['doCheckPrompt'] = 1;
|
87 |
+
window['prevPrompt'] = text1.value;
|
88 |
+
for (var i = 2; i < texts.length; i++) {
|
89 |
+
setNativeValue(texts[i], text1.value);
|
90 |
+
texts[i].dispatchEvent(new Event('input', { bubbles: true }));
|
91 |
+
}
|
92 |
+
setTimeout(function() {
|
93 |
+
text1 = window['gradioEl'].querySelectorAll('textarea')[1];
|
94 |
+
//console.log('do_click()_1_' + text1.value);
|
95 |
+
|
96 |
+
btns = window['gradioEl'].querySelectorAll('button');
|
97 |
+
for (var i = 0; i < btns.length; i++) {
|
98 |
+
if (btns[i].innerText == 'Submit') {
|
99 |
+
btns[i].focus();
|
100 |
+
btns[i].click();
|
101 |
+
//break;
|
102 |
+
}
|
103 |
+
}
|
104 |
+
//console.log('do_click()_3_');
|
105 |
+
window['doCheckPrompt'] = 0;
|
106 |
+
}, 10);
|
107 |
+
}
|
108 |
+
} catch(e) {
|
109 |
+
}
|
110 |
+
}
|
111 |
+
window['checkPrompt_interval'] = window.setInterval("window.checkPrompt()", 100);
|
112 |
+
}
|
113 |
+
|
114 |
+
/*
|
115 |
+
texts = gradioEl.querySelectorAll('textarea');
|
116 |
+
text0 = gradioEl.querySelectorAll('textarea')[0];
|
117 |
+
text1 = gradioEl.querySelectorAll('textarea')[0];
|
118 |
+
|
119 |
+
for (var i = 1; i < texts.length; i++) {
|
120 |
+
setNativeValue(texts[i], text0.value);
|
121 |
+
texts[i].dispatchEvent(new Event('input', { bubbles: true }));
|
122 |
+
}
|
123 |
+
|
124 |
+
var st = setTimeout(function() {
|
125 |
+
text1 = window['gradioEl'].querySelectorAll('textarea')[1];
|
126 |
+
console.log('do_click()_1_' + text1.value);
|
127 |
+
|
128 |
+
btns = window['gradioEl'].querySelectorAll('button');
|
129 |
+
for (var i = 0; i < btns.length; i++) {
|
130 |
+
if (btns[i].innerText == 'Submit') {
|
131 |
+
btns[i].focus();
|
132 |
+
btns[i].click();
|
133 |
+
//break;
|
134 |
+
}
|
135 |
+
}
|
136 |
+
console.log('do_click()_3_');
|
137 |
+
}, 10);
|
138 |
+
*/
|
139 |
+
|
140 |
+
return false;
|
141 |
+
}"""
|
142 |
+
|
143 |
+
with gr.Blocks(title='Text to Image') as demo:
|
144 |
+
with gr.Group(elem_id="page_1", visible=True) as page_1:
|
145 |
+
with gr.Box():
|
146 |
+
with gr.Row():
|
147 |
+
start_button = gr.Button("Let's GO!", elem_id="start-btn", visible=True)
|
148 |
+
start_button.click(fn=None, inputs=[], outputs=[], _js=start_work)
|
149 |
+
|
150 |
+
with gr.Group(elem_id="page_2", visible=False) as page_2:
|
151 |
+
with gr.Row(elem_id="prompt_row"):
|
152 |
+
prompt_input0 = gr.Textbox(lines=4, label="prompt")
|
153 |
+
prompt_input1 = gr.Textbox(lines=4, label="prompt", visible=False)
|
154 |
+
with gr.Row():
|
155 |
+
submit_btn = gr.Button(value = "submit",elem_id="erase-btn").style(
|
156 |
+
margin=True,
|
157 |
+
rounded=(True, True, True, True),
|
158 |
+
)
|
159 |
+
with gr.Row(elem_id='tab_demo', visible=True).style(height=5):
|
160 |
+
tab_demo = gr.TabbedInterface(tab_actions, tab_titles)
|
161 |
+
|
162 |
+
submit_btn.click(fn=infer, inputs=[prompt_input0], outputs=[prompt_input1])
|
163 |
+
|
164 |
+
# prompt_input = gr.Textbox(lines=4, label="Input prompt")
|
165 |
+
# tab_demo = gr.TabbedInterface([sd15_demo, sd20_demo, openjourney_demo], ["stable-diffusion-v1-5", "stable-diffusion-2", "openjourney"])
|
166 |
+
|
167 |
+
# demo = gr.Interface(fn=infer,
|
168 |
+
# inputs=[prompt_input],
|
169 |
+
# outputs=[tab_demo],
|
170 |
+
# )
|
171 |
+
|
172 |
+
if __name__ == "__main__":
|
173 |
+
demo.launch()
|
174 |
+
|
175 |
+
|
176 |
+
|
177 |
+
# import os
|
178 |
+
# os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
|
179 |
+
# from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy
|
180 |
+
|
181 |
+
# import gradio as gr
|
182 |
+
# import PIL.Image
|
183 |
+
# import numpy as np
|
184 |
+
# import random
|
185 |
+
# import torch
|
186 |
+
# import subprocess
|
187 |
+
|
188 |
+
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
189 |
+
# # print('Using device:', device)
|
190 |
+
|
191 |
+
# HF_TOKEN_SD=os.environ.get('HF_TOKEN_SD')
|
192 |
+
|
193 |
+
# if 0==0:
|
194 |
+
# model_id = "runwayml/stable-diffusion-v1-5"
|
195 |
+
|
196 |
+
# model_id = "prompthero/openjourney"
|
197 |
+
|
198 |
+
# # pipeClass = StableDiffusionImg2ImgPipeline
|
199 |
+
# pipeClass = StableDiffusionPipeline
|
200 |
+
# className = pipeClass.__name__
|
201 |
+
# if className == 'StableDiffusionInpaintPipeline':
|
202 |
+
# model_id = "runwayml/stable-diffusion-inpainting"
|
203 |
+
|
204 |
+
# sd_pipe = pipeClass.from_pretrained(
|
205 |
+
# model_id,
|
206 |
+
# # revision="fp16",
|
207 |
+
# torch_dtype=torch.float16,
|
208 |
+
# # use_auth_token=HF_TOKEN_SD
|
209 |
+
# ) # .to(device)
|
210 |
+
|
211 |
+
# def predict(prompt, steps=100, seed=42, guidance_scale=6.0):
|
212 |
+
# #torch.cuda.empty_cache()
|
213 |
+
# # print(subprocess.check_output(["nvidia-smi"], stderr=subprocess.STDOUT).decode("utf8"))
|
214 |
+
# generator = torch.manual_seed(seed)
|
215 |
+
# images = sd_pipe([prompt],
|
216 |
+
# generator=generator,
|
217 |
+
# num_inference_steps=steps,
|
218 |
+
# eta=0.3,
|
219 |
+
# guidance_scale=guidance_scale)["sample"]
|
220 |
+
# # print(subprocess.check_output(["nvidia-smi"], stderr=subprocess.STDOUT).decode("utf8"))
|
221 |
+
# return images[0]
|
222 |
+
|
223 |
+
# random_seed = random.randint(0, 2147483647)
|
224 |
+
# gr.Interface(
|
225 |
+
# predict,
|
226 |
+
# inputs=[
|
227 |
+
# gr.inputs.Textbox(label='Prompt', default='a chalk pastel drawing of a llama wearing a wizard hat'),
|
228 |
+
# gr.inputs.Slider(1, 100, label='Inference Steps', default=50, step=1),
|
229 |
+
# gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1),
|
230 |
+
# gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=6.0, step=0.1),
|
231 |
+
# ],
|
232 |
+
# outputs=gr.Image(shape=[256,256], type="pil", elem_id="output_image"),
|
233 |
+
# css="#output_image{width: 256px}",
|
234 |
+
# title="Text-to-Image_Latent_Diffusion",
|
235 |
+
# # description="This Spaces contains a text-to-image Latent Diffusion process for the <a href=\"https://huggingface.co/CompVis/ldm-text2im-large-256\">ldm-text2im-large-256</a> model by <a href=\"https://huggingface.co/CompVis\">CompVis</a> using the <a href=\"https://github.com/huggingface/diffusers\">diffusers library</a>. The goal of this demo is to showcase the diffusers library and you can check how the code works here. If you want the state-of-the-art experience with Latent Diffusion text-to-image check out the <a href=\"https://huggingface.co/spaces/multimodalart/latentdiffusion\">main Spaces</a>.",
|
236 |
+
# ).launch()
|