Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -10,6 +10,7 @@ from threading import RLock
|
|
10 |
lock = RLock()
|
11 |
HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
|
12 |
|
|
|
13 |
def load_fn(models):
|
14 |
global models_load
|
15 |
models_load = {}
|
@@ -23,7 +24,6 @@ def load_fn(models):
|
|
23 |
m = gr.Interface(lambda: None, ['text'], ['image'])
|
24 |
models_load.update({model: m})
|
25 |
|
26 |
-
|
27 |
load_fn(models)
|
28 |
|
29 |
|
@@ -31,6 +31,8 @@ num_models = 1
|
|
31 |
default_models = models[:num_models]
|
32 |
inference_timeout = 600
|
33 |
|
|
|
|
|
34 |
|
35 |
|
36 |
def extend_choices(choices):
|
@@ -41,13 +43,57 @@ def update_imgbox(choices):
|
|
41 |
choices_plus = extend_choices(choices)
|
42 |
return [gr.Image(None, label = m, visible = (m != 'NA')) for m in choices_plus]
|
43 |
|
44 |
-
|
45 |
def gen_fn(model_str, prompt):
|
46 |
if model_str == 'NA':
|
47 |
return None
|
48 |
noise = str('') #str(randint(0, 99999999999))
|
49 |
return models_load[model_str](f'{prompt} {noise}')
|
50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
def gen_fnsix(model_str, prompt):
|
52 |
if model_str == 'NA':
|
53 |
return None
|
@@ -64,7 +110,7 @@ with gr.Blocks() as demo:
|
|
64 |
gr.HTML(
|
65 |
"""
|
66 |
<div>
|
67 |
-
<p> <center>For negative prompts,
|
68 |
</p></div>
|
69 |
"""
|
70 |
)
|
@@ -91,7 +137,41 @@ with gr.Blocks() as demo:
|
|
91 |
gr.HTML(
|
92 |
"""
|
93 |
<div class="footer">
|
94 |
-
<p> Based on the <a href="https://huggingface.co/spaces/derwahnsinn/TestGen">TestGen</a> Space by derwahnsinn, the <a href="https://huggingface.co/spaces/RdnUser77/SpacIO_v1">SpacIO</a> Space by RdnUser77, Omnibus's Maximum Multiplier,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
</p>
|
96 |
"""
|
97 |
)
|
|
|
10 |
lock = RLock()
|
11 |
HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
|
12 |
|
13 |
+
|
14 |
def load_fn(models):
|
15 |
global models_load
|
16 |
models_load = {}
|
|
|
24 |
m = gr.Interface(lambda: None, ['text'], ['image'])
|
25 |
models_load.update({model: m})
|
26 |
|
|
|
27 |
load_fn(models)
|
28 |
|
29 |
|
|
|
31 |
default_models = models[:num_models]
|
32 |
inference_timeout = 600
|
33 |
|
34 |
+
MAX_SEED=3999999999
|
35 |
+
|
36 |
|
37 |
|
38 |
def extend_choices(choices):
|
|
|
43 |
choices_plus = extend_choices(choices)
|
44 |
return [gr.Image(None, label = m, visible = (m != 'NA')) for m in choices_plus]
|
45 |
|
|
|
46 |
def gen_fn(model_str, prompt):
|
47 |
if model_str == 'NA':
|
48 |
return None
|
49 |
noise = str('') #str(randint(0, 99999999999))
|
50 |
return models_load[model_str](f'{prompt} {noise}')
|
51 |
|
52 |
+
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
|
57 |
+
|
58 |
+
|
59 |
+
|
60 |
+
async def infer(model_str, prompt, seed=1, timeout=inference_timeout):
|
61 |
+
from pathlib import Path
|
62 |
+
kwargs = {}
|
63 |
+
noise = ""
|
64 |
+
kwargs["seed"] = seed
|
65 |
+
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
|
66 |
+
prompt=f'{prompt} {noise}', **kwargs, token=HF_TOKEN))
|
67 |
+
await asyncio.sleep(0)
|
68 |
+
try:
|
69 |
+
result = await asyncio.wait_for(task, timeout=timeout)
|
70 |
+
except (Exception, asyncio.TimeoutError) as e:
|
71 |
+
print(e)
|
72 |
+
print(f"Task timed out: {model_str}")
|
73 |
+
if not task.done(): task.cancel()
|
74 |
+
result = None
|
75 |
+
if task.done() and result is not None:
|
76 |
+
with lock:
|
77 |
+
png_path = "image.png"
|
78 |
+
result.save(png_path)
|
79 |
+
image = str(Path(png_path).resolve())
|
80 |
+
return image
|
81 |
+
return None
|
82 |
+
|
83 |
+
def gen_fnseed(model_str, prompt, seed=1):
|
84 |
+
if model_str == 'NA':
|
85 |
+
return None
|
86 |
+
try:
|
87 |
+
loop = asyncio.new_event_loop()
|
88 |
+
result = loop.run_until_complete(infer(model_str, prompt, seed, inference_timeout))
|
89 |
+
except (Exception, asyncio.CancelledError) as e:
|
90 |
+
print(e)
|
91 |
+
print(f"Task aborted: {model_str}")
|
92 |
+
result = None
|
93 |
+
finally:
|
94 |
+
loop.close()
|
95 |
+
return result
|
96 |
+
|
97 |
def gen_fnsix(model_str, prompt):
|
98 |
if model_str == 'NA':
|
99 |
return None
|
|
|
110 |
gr.HTML(
|
111 |
"""
|
112 |
<div>
|
113 |
+
<p> <center>For negative prompts, Width and Height, and other features visit John6666's <a href="https://huggingface.co/spaces/John6666/PrintingPress4">Printing Press 4</a>!</center>
|
114 |
</p></div>
|
115 |
"""
|
116 |
)
|
|
|
137 |
gr.HTML(
|
138 |
"""
|
139 |
<div class="footer">
|
140 |
+
<p> Based on the <a href="https://huggingface.co/spaces/derwahnsinn/TestGen">TestGen</a> Space by derwahnsinn, the <a href="https://huggingface.co/spaces/RdnUser77/SpacIO_v1">SpacIO</a> Space by RdnUser77, Omnibus's Maximum Multiplier, and <a href="https://huggingface.co/spaces/Yntec/ToyWorld">Toy World</a>!
|
141 |
+
</p>
|
142 |
+
"""
|
143 |
+
)
|
144 |
+
with gr.Tab('Seed it!'):
|
145 |
+
model_choiceseed = gr.Dropdown(models, label = f'Choose a model from the {len(models)} available! Try clearing the box and typing on it to filter them!', value = models[0], filterable = True)
|
146 |
+
txt_inputseed = gr.Textbox(label = 'Your prompt:')
|
147 |
+
seed = gr.Slider(label="Use a seed to replicate the same image later", info="Max 3999999999", minimum=0, maximum=MAX_SEED, step=1, value=1)
|
148 |
+
|
149 |
+
max_imagesseed = 1
|
150 |
+
num_imagesseed = gr.Slider(1, max_imagesone, value = max_imagesone, step = 1, label = 'One, because more would make it produce identical images with the seed', visible = False)
|
151 |
+
|
152 |
+
gen_buttonseed = gr.Button('Generate an image using the seed')
|
153 |
+
#stop_button = gr.Button('Stop', variant = 'secondary', interactive = False)
|
154 |
+
gen_button.click(lambda s: gr.update(interactive = True), None)
|
155 |
+
|
156 |
+
with gr.Row():
|
157 |
+
outputseed = [gr.Image(label = '') for _ in range(max_imagesseed)]
|
158 |
+
|
159 |
+
for i, o in enumerate(outputseed):
|
160 |
+
img_is = gr.Number(i, visible = False)
|
161 |
+
num_imagesseed.change(lambda i, n: gr.update(visible = (i < n)), [img_is, num_imagesseed], o, show_progress = False)
|
162 |
+
#gen_eventseed = gen_buttonseed.click(lambda i, n, m, t, n1: gen_fnseed(m, t, n1) if (i < n) else None, [img_is, num_imagesseed, model_choiceseed, txt_inputseed, useseed], o, concurrency_limit=None, queue=False)
|
163 |
+
|
164 |
+
gen_eventseed = gr.on(triggers=[gen_buttonseed.click, txt_inputseed.submit],
|
165 |
+
fn=lambda i, n, m, t, n1: gen_fnseed(m, t, n1) if (i < n) else None,
|
166 |
+
inputs=[img_is, num_imagesseed, model_choiceseed, txt_inputseed, seed], outputs=[o],
|
167 |
+
concurrency_limit=None, queue=False) # Be sure to delete ", queue=False" when activating the stop button
|
168 |
+
|
169 |
+
#stop_button.click(lambda s: gr.update(interactive = False), None, stop_button, cancels = [gen_event])
|
170 |
+
with gr.Row():
|
171 |
+
gr.HTML(
|
172 |
+
"""
|
173 |
+
<div class="footer">
|
174 |
+
<p> Based on the <a href="https://huggingface.co/spaces/derwahnsinn/TestGen">TestGen</a> Space by derwahnsinn, the <a href="https://huggingface.co/spaces/RdnUser77/SpacIO_v1">SpacIO</a> Space by RdnUser77, Omnibus's Maximum Multiplier, and <a href="https://huggingface.co/spaces/Yntec/ToyWorld">Toy World</a>!
|
175 |
</p>
|
176 |
"""
|
177 |
)
|