Yntec commited on
Commit
9845003
·
verified ·
1 Parent(s): 71b3cf1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +131 -140
app.py CHANGED
@@ -1,144 +1,135 @@
1
  import gradio as gr
2
- # import os
3
- # import sys
4
- # from pathlib import Path
5
- import time
6
-
7
- models =[
8
- "Yntec/elldrethSDreamMix",
9
- "Yntec/ResidentCNZCartoon3D",
10
- "Yntec/Based64",
11
- "Yntec/GoFish",
12
- "Yntec/IsThisArt",
13
- "Yntec/mixRealisticFantasy",
14
- "Yntec/iffyMix",
15
- "Yntec/GoldenEra",
16
- "Yntec/Hassanim",
17
- ]
18
-
19
-
20
- model_functions = {}
21
- model_idx = 1
22
- for model_path in models:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  try:
24
- model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False)
25
- except Exception as error:
26
- def the_fn(txt):
27
- return None
28
- model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"])
29
- model_idx+=1
30
-
31
-
32
- def send_it_idx(idx):
33
- def send_it_fn(prompt):
34
- output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt)
35
- return output
36
- return send_it_fn
37
-
38
- def get_prompts(prompt_text):
39
- return prompt_text
40
-
41
- def clear_it(val):
42
- if int(val) != 0:
43
- val = 0
44
- else:
45
- val = 0
46
- pass
47
- return val
48
-
49
- def all_task_end(cnt,t_stamp):
50
- to = t_stamp + 360
51
- et = time.time()
52
- if et > to and t_stamp != 0:
53
- d = gr.update(value=0)
54
- tog = gr.update(value=1)
55
- #print(f'to: {to} et: {et}')
56
- else:
57
- if cnt != 0:
58
- d = gr.update(value=et)
59
- else:
60
- d = gr.update(value=0)
61
- tog = gr.update(value=0)
62
- #print (f'passing: to: {to} et: {et}')
63
- pass
64
- return d, tog
65
-
66
- def all_task_start():
67
- print("\n\n\n\n\n\n\n")
68
- t = time.gmtime()
69
- t_stamp = time.time()
70
- current_time = time.strftime("%H:%M:%S", t)
71
- return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0)
72
-
73
- def clear_fn():
74
- nn = len(models)
75
- return tuple([None, *[None for _ in range(nn)]])
76
-
77
-
78
-
79
- with gr.Blocks(title="SD Models") as my_interface:
80
- with gr.Column(scale=12):
81
- # with gr.Row():
82
- # gr.Markdown("""- Primary prompt: 你想画的内容(英文单词,如 a cat, 加英文逗号效果更好;点 Improve 按钮进行完善)\n- Real prompt: 完善后的提示词,出现后再点右边的 Run 按钮开始运行""")
83
  with gr.Row():
84
- with gr.Row(scale=6):
85
- primary_prompt=gr.Textbox(label="Prompt (2024 is the year this space was released, not the amount of models)", value="")
86
- # real_prompt=gr.Textbox(label="Real prompt")
87
- with gr.Row(scale=6):
88
- # improve_prompts_btn=gr.Button("Improve")
89
- with gr.Row():
90
- run=gr.Button("Run",variant="primary")
91
- clear_btn=gr.Button("Clear")
 
 
 
 
92
  with gr.Row():
93
- sd_outputs = {}
94
- model_idx = 1
95
- for model_path in models:
96
- with gr.Column(scale=3, min_width=320):
97
- with gr.Box():
98
- sd_outputs[model_idx] = gr.Image(label=model_path)
99
- pass
100
- model_idx += 1
101
- pass
102
- pass
103
-
104
- with gr.Row(visible=False):
105
- start_box=gr.Number(interactive=False)
106
- end_box=gr.Number(interactive=False)
107
- tog_box=gr.Textbox(value=0,interactive=False)
108
-
109
- start_box.change(
110
- all_task_end,
111
- [start_box, end_box],
112
- [start_box, tog_box],
113
- every=1,
114
- show_progress=True)
115
-
116
- primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box])
117
- run.click(all_task_start, None, [start_box, end_box, tog_box])
118
- runs_dict = {}
119
- model_idx = 1
120
- for model_path in models:
121
- runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]])
122
- model_idx += 1
123
- pass
124
- pass
125
-
126
- # improve_prompts_btn_clicked=improve_prompts_btn.click(
127
- # get_prompts,
128
- # inputs=[primary_prompt],
129
- # outputs=[primary_prompt],
130
- # cancels=list(runs_dict.values()))
131
- clear_btn.click(
132
- clear_fn,
133
- None,
134
- [primary_prompt, *list(sd_outputs.values())],
135
- cancels=[*list(runs_dict.values())])
136
- tog_box.change(
137
- clear_it,
138
- tog_box,
139
- tog_box,
140
- cancels=[*list(runs_dict.values())])
141
-
142
- my_interface.queue(concurrency_count=600, status_update_rate=1)
143
- my_interface.launch(inline=True, show_api=False)
144
-
 
1
  import gradio as gr
2
+ from random import randint
3
+ from all_models import models
4
+
5
+ from externalmod import gr_Interface_load, randomize_seed
6
+
7
+ import asyncio
8
+ import os
9
+ from threading import RLock
10
+ lock = RLock()
11
+ HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
12
+
13
+
14
+ def load_fn(models):
15
+ global models_load
16
+ models_load = {}
17
+
18
+ for model in models:
19
+ if model not in models_load.keys():
20
+ try:
21
+ m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
22
+ except Exception as error:
23
+ print(error)
24
+ m = gr.Interface(lambda: None, ['text'], ['image'])
25
+ models_load.update({model: m})
26
+
27
+
28
+ load_fn(models)
29
+
30
+
31
+ num_models = 9
32
+
33
+ default_models = models[:num_models]
34
+ inference_timeout = 600
35
+ MAX_SEED=666666666
36
+ starting_seed = randint(666666000, 666666666)
37
+
38
+ def extend_choices(choices):
39
+ return choices[:num_models] + (num_models - len(choices[:num_models])) * ['NA']
40
+
41
+
42
+ def update_imgbox(choices):
43
+ choices_plus = extend_choices(choices[:num_models])
44
+ return [gr.Image(None, label=m, visible=(m!='NA')) for m in choices_plus]
45
+
46
+ async def infer(model_str, prompt, seed=1, timeout=inference_timeout):
47
+ from pathlib import Path
48
+ kwargs = {}
49
+ noise = ""
50
+ kwargs["seed"] = seed
51
+ task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
52
+ prompt=f'{prompt} {noise}', **kwargs, token=HF_TOKEN))
53
+ await asyncio.sleep(0)
54
+ try:
55
+ result = await asyncio.wait_for(task, timeout=timeout)
56
+ except (Exception, asyncio.TimeoutError) as e:
57
+ print(e)
58
+ print(f"Task timed out: {model_str}")
59
+ if not task.done(): task.cancel()
60
+ result = None
61
+ if task.done() and result is not None:
62
+ with lock:
63
+ png_path = "image.png"
64
+ result.save(png_path)
65
+ image = str(Path(png_path).resolve())
66
+ return image
67
+ return None
68
+
69
+
70
+
71
+
72
+ def gen_fnseed(model_str, prompt, seed=1):
73
+ if model_str == 'NA':
74
+ return None
75
  try:
76
+ loop = asyncio.new_event_loop()
77
+ result = loop.run_until_complete(infer(model_str, prompt, seed, inference_timeout))
78
+ except (Exception, asyncio.CancelledError) as e:
79
+ print(e)
80
+ print(f"Task aborted: {model_str}")
81
+ result = None
82
+ with lock:
83
+ image = "https://huggingface.co/spaces/Yntec/ToyWorld/resolve/main/error.png"
84
+ result = image
85
+ finally:
86
+ loop.close()
87
+ return result
88
+
89
+ with gr.Blocks(theme='Yntec/HaleyCH_Theme_Blue_Teal') as demo:
90
+ with gr.Tab('🤗 2024 is the year this space was launched not the number of models! (they were brand new back then!) 🤗'):
91
+ txt_input = gr.Textbox(label='Your prompt:', lines=4)
92
+ gen_button = gr.Button('Generate up to 9 images in up to 3 minutes total')
93
+ with gr.Row():
94
+ seed = gr.Slider(label="Use a seed to replicate the same image later (maximum 666666666)", minimum=0, maximum=MAX_SEED, step=1, value=starting_seed, scale=3)
95
+ seed_rand = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary", scale=1)
96
+ seed_rand.click(randomize_seed, None, [seed], queue=False)
97
+ #stop_button = gr.Button('Stop', variant = 'secondary', interactive = False)
98
+
99
+ gen_button.click(lambda s: gr.update(interactive = True), None)
100
+ gr.HTML(
101
+ """
102
+ <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
103
+ <div>
104
+ <body>
105
+ <div class="center"><p style="margin-bottom: 10px; color: #000000;">Scroll down to see more images and select models.</p>
106
+ </div>
107
+ </body>
108
+ </div>
109
+ </div>
110
+ """
111
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  with gr.Row():
113
+ output = [gr.Image(label = m, min_width=480) for m in default_models]
114
+ current_models = [gr.Textbox(m, visible = False) for m in default_models]
115
+
116
+ for m, o in zip(current_models, output):
117
+ gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fnseed,
118
+ inputs=[m, txt_input, seed], outputs=[o], concurrency_limit=None, queue=False)
119
+ #stop_button.click(lambda s: gr.update(interactive = False), None, stop_button, cancels = [gen_event])
120
+ with gr.Accordion('Model selection'):
121
+ model_choice = gr.CheckboxGroup(models, label = 'Untick the models you will not be using', value=default_models, interactive=True)
122
+ #model_choice = gr.CheckboxGroup(models, label = f'Choose up to {num_models} different models from the 2 available! Untick them to only use one!', value = default_models, multiselect = True, max_choices = num_models, interactive = True, filterable = False)
123
+ model_choice.change(update_imgbox, model_choice, output)
124
+ model_choice.change(extend_choices, model_choice, current_models)
125
  with gr.Row():
126
+ gr.HTML(
127
+ """
128
+ <div class="footer">
129
+ <p> For more than a hundred times more models (that's not a typo) check out <a href="https://huggingface.co/spaces/Yntec/ToyWorld">Toy World</a>!</a>
130
+ </p>
131
+ """
132
+ )
133
+
134
+ demo.queue(default_concurrency_limit=200, max_size=200)
135
+ demo.launch(show_api=False, max_threads=400)