Upload 3 files
Browse files- app.py +17 -41
- model.py +28 -0
- multit2i.py +57 -18
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import gradio as gr
|
|
|
2 |
from multit2i import (
|
3 |
load_models,
|
4 |
-
find_model_list,
|
5 |
infer_multi,
|
6 |
infer_multi_random,
|
7 |
save_gallery_images,
|
@@ -14,37 +14,11 @@ from multit2i import (
|
|
14 |
get_negative_suffix,
|
15 |
get_recom_prompt_type,
|
16 |
set_recom_prompt_preset,
|
|
|
17 |
)
|
18 |
|
19 |
|
20 |
-
models
|
21 |
-
'yodayo-ai/kivotos-xl-2.0',
|
22 |
-
'yodayo-ai/holodayo-xl-2.1',
|
23 |
-
'cagliostrolab/animagine-xl-3.1',
|
24 |
-
'votepurchase/ponyDiffusionV6XL',
|
25 |
-
'eienmojiki/Anything-XL',
|
26 |
-
'eienmojiki/Starry-XL-v5.2',
|
27 |
-
'digiplay/majicMIX_sombre_v2',
|
28 |
-
'digiplay/majicMIX_realistic_v7',
|
29 |
-
'votepurchase/counterfeitV30_v30',
|
30 |
-
'Meina/MeinaMix_V11',
|
31 |
-
'KBlueLeaf/Kohaku-XL-Epsilon-rev3',
|
32 |
-
'kayfahaarukku/UrangDiffusion-1.1',
|
33 |
-
'Raelina/Rae-Diffusion-XL-V2',
|
34 |
-
'Raelina/Raemu-XL-V4',
|
35 |
-
]
|
36 |
-
|
37 |
-
|
38 |
-
# Examples:
|
39 |
-
#models = ['yodayo-ai/kivotos-xl-2.0', 'yodayo-ai/holodayo-xl-2.1'] # specific models
|
40 |
-
#models = find_model_list("John6666", [], "", "last_modified", 20) # John6666's latest 20 models
|
41 |
-
#models = find_model_list("John6666", ["anime"], "", "last_modified", 20) # John6666's latest 20 models with 'anime' tag
|
42 |
-
#models = find_model_list("John6666", [], "anime", "last_modified", 20) # John6666's latest 20 models without 'anime' tag
|
43 |
-
#models = find_model_list("", [], "", "last_modified", 20) # latest 20 text-to-image models of huggingface
|
44 |
-
#models = find_model_list("", [], "", "downloads", 20) # monthly most downloaded 20 text-to-image models of huggingface
|
45 |
-
|
46 |
-
|
47 |
-
load_models(models, 10)
|
48 |
#load_models(models, 20) # Fetching 20 models at the same time. default: 5
|
49 |
|
50 |
|
@@ -54,18 +28,21 @@ css = """
|
|
54 |
|
55 |
with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", css=css) as demo:
|
56 |
with gr.Column():
|
57 |
-
with gr.Accordion("Advanced settings", open=
|
58 |
-
with gr.Accordion("Recommended Prompt"):
|
59 |
recom_prompt_preset = gr.Radio(label="Set Presets", choices=get_recom_prompt_type(), value="Common")
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
|
|
|
|
|
|
|
|
64 |
with gr.Group():
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
neg_prompt = gr.Text(label="Negative Prompt", lines=1, max_lines=8, placeholder="", visible=False)
|
69 |
with gr.Row():
|
70 |
run_button = gr.Button("Generate Image", scale=6)
|
71 |
random_button = gr.Button("Random Model 🎲", scale=3)
|
@@ -88,8 +65,6 @@ with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", css=css) as demo:
|
|
88 |
f"""This demo was created in reference to the following demos.
|
89 |
- [Nymbo/Flood](https://huggingface.co/spaces/Nymbo/Flood).
|
90 |
- [Yntec/ToyWorldXL](https://huggingface.co/spaces/Yntec/ToyWorldXL).
|
91 |
-
<br>The first startup takes a mind-boggling amount of time, but not so much after the second.
|
92 |
-
This is due to the time it takes for Gradio to generate an example image to cache.
|
93 |
"""
|
94 |
)
|
95 |
gr.DuplicateButton(value="Duplicate Space")
|
@@ -115,6 +90,7 @@ This is due to the time it takes for Gradio to generate an example image to cach
|
|
115 |
show_progress="full",
|
116 |
show_api=True,
|
117 |
).success(save_gallery_images, [results], [results, image_files], queue=False, show_api=False)
|
|
|
118 |
clear_results.click(lambda: (None, None), None, [results, image_files], queue=False, show_api=False)
|
119 |
recom_prompt_preset.change(set_recom_prompt_preset, [recom_prompt_preset],
|
120 |
[positive_prefix, positive_suffix, negative_prefix, negative_suffix], queue=False, show_api=False)
|
|
|
1 |
import gradio as gr
|
2 |
+
from model import models
|
3 |
from multit2i import (
|
4 |
load_models,
|
|
|
5 |
infer_multi,
|
6 |
infer_multi_random,
|
7 |
save_gallery_images,
|
|
|
14 |
get_negative_suffix,
|
15 |
get_recom_prompt_type,
|
16 |
set_recom_prompt_preset,
|
17 |
+
get_tag_type,
|
18 |
)
|
19 |
|
20 |
|
21 |
+
load_models(models, 5)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
#load_models(models, 20) # Fetching 20 models at the same time. default: 5
|
23 |
|
24 |
|
|
|
28 |
|
29 |
with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", css=css) as demo:
|
30 |
with gr.Column():
|
31 |
+
with gr.Accordion("Advanced settings", open=True):
|
32 |
+
with gr.Accordion("Recommended Prompt", open=False):
|
33 |
recom_prompt_preset = gr.Radio(label="Set Presets", choices=get_recom_prompt_type(), value="Common")
|
34 |
+
with gr.Row():
|
35 |
+
positive_prefix = gr.CheckboxGroup(label="Use Positive Prefix", choices=get_positive_prefix(), value=[])
|
36 |
+
positive_suffix = gr.CheckboxGroup(label="Use Positive Suffix", choices=get_positive_suffix(), value=["Common"])
|
37 |
+
negative_prefix = gr.CheckboxGroup(label="Use Negative Prefix", choices=get_negative_prefix(), value=[], visible=False)
|
38 |
+
negative_suffix = gr.CheckboxGroup(label="Use Negative Suffix", choices=get_negative_suffix(), value=["Common"], visible=False)
|
39 |
+
with gr.Accordion("Model", open=True):
|
40 |
+
model_name = gr.Dropdown(label="Select Model", show_label=False, choices=list(loaded_models.keys()), value=list(loaded_models.keys())[0], allow_custom_value=True)
|
41 |
+
model_info = gr.Markdown(value=get_model_info_md(list(loaded_models.keys())[0]), elem_id="model_info")
|
42 |
with gr.Group():
|
43 |
+
clear_prompt = gr.Button(value="Clear Prompt 🗑️", size="sm", scale=1)
|
44 |
+
prompt = gr.Text(label="Prompt", lines=1, max_lines=8, placeholder="1girl, solo, ...", show_copy_button=True)
|
45 |
+
neg_prompt = gr.Text(label="Negative Prompt", lines=1, max_lines=8, placeholder="", visible=False)
|
|
|
46 |
with gr.Row():
|
47 |
run_button = gr.Button("Generate Image", scale=6)
|
48 |
random_button = gr.Button("Random Model 🎲", scale=3)
|
|
|
65 |
f"""This demo was created in reference to the following demos.
|
66 |
- [Nymbo/Flood](https://huggingface.co/spaces/Nymbo/Flood).
|
67 |
- [Yntec/ToyWorldXL](https://huggingface.co/spaces/Yntec/ToyWorldXL).
|
|
|
|
|
68 |
"""
|
69 |
)
|
70 |
gr.DuplicateButton(value="Duplicate Space")
|
|
|
90 |
show_progress="full",
|
91 |
show_api=True,
|
92 |
).success(save_gallery_images, [results], [results, image_files], queue=False, show_api=False)
|
93 |
+
clear_prompt.click(lambda: None, None, [prompt], queue=False, show_api=False)
|
94 |
clear_results.click(lambda: (None, None), None, [results, image_files], queue=False, show_api=False)
|
95 |
recom_prompt_preset.change(set_recom_prompt_preset, [recom_prompt_preset],
|
96 |
[positive_prefix, positive_suffix, negative_prefix, negative_suffix], queue=False, show_api=False)
|
model.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from multit2i import find_model_list
|
2 |
+
|
3 |
+
|
4 |
+
models = [
|
5 |
+
'yodayo-ai/kivotos-xl-2.0',
|
6 |
+
'yodayo-ai/holodayo-xl-2.1',
|
7 |
+
'cagliostrolab/animagine-xl-3.1',
|
8 |
+
'votepurchase/ponyDiffusionV6XL',
|
9 |
+
'eienmojiki/Anything-XL',
|
10 |
+
'eienmojiki/Starry-XL-v5.2',
|
11 |
+
'digiplay/majicMIX_sombre_v2',
|
12 |
+
'digiplay/majicMIX_realistic_v7',
|
13 |
+
'votepurchase/counterfeitV30_v30',
|
14 |
+
'Meina/MeinaMix_V11',
|
15 |
+
'KBlueLeaf/Kohaku-XL-Epsilon-rev3',
|
16 |
+
'kayfahaarukku/UrangDiffusion-1.1',
|
17 |
+
'Raelina/Rae-Diffusion-XL-V2',
|
18 |
+
'Raelina/Raemu-XL-V4',
|
19 |
+
]
|
20 |
+
|
21 |
+
|
22 |
+
# Examples:
|
23 |
+
#models = ['yodayo-ai/kivotos-xl-2.0', 'yodayo-ai/holodayo-xl-2.1'] # specific models
|
24 |
+
#models = find_model_list("John6666", [], "", "last_modified", 20) # John6666's latest 20 models
|
25 |
+
#models = find_model_list("John6666", ["anime"], "", "last_modified", 20) # John6666's latest 20 models with 'anime' tag
|
26 |
+
#models = find_model_list("John6666", [], "anime", "last_modified", 20) # John6666's latest 20 models without 'anime' tag
|
27 |
+
#models = find_model_list("", [], "", "last_modified", 20) # latest 20 text-to-image models of huggingface
|
28 |
+
#models = find_model_list("", [], "", "downloads", 20) # monthly most downloaded 20 text-to-image models of huggingface
|
multit2i.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
import asyncio
|
3 |
-
from threading import RLock
|
4 |
from pathlib import Path
|
5 |
|
6 |
|
@@ -70,8 +70,7 @@ def get_t2i_model_info_dict(repo_id: str):
|
|
70 |
elif 'diffusers:StableDiffusion3Pipeline' in tags: info["ver"] = "SD3"
|
71 |
else: info["ver"] = "Other"
|
72 |
info["url"] = f"https://huggingface.co/{repo_id}/"
|
73 |
-
if model.card_data and model.card_data.tags
|
74 |
-
info["tags"] = model.card_data.tags
|
75 |
info["downloads"] = model.downloads
|
76 |
info["likes"] = model.likes
|
77 |
info["last_modified"] = model.last_modified.strftime("lastmod: %Y-%m-%d")
|
@@ -108,31 +107,61 @@ def save_gallery_images(images, progress=gr.Progress(track_tqdm=True)):
|
|
108 |
return gr.update(value=output_images), gr.update(value=output_paths)
|
109 |
|
110 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
def load_model(model_name: str):
|
112 |
global loaded_models
|
113 |
global model_info_dict
|
114 |
if model_name in loaded_models.keys(): return loaded_models[model_name]
|
115 |
try:
|
116 |
-
|
117 |
-
loaded_models[model_name] = gr.load(f'models/{model_name}')
|
118 |
print(f"Loaded: {model_name}")
|
119 |
except Exception as e:
|
120 |
-
|
121 |
-
if model_name in loaded_models.keys(): del loaded_models[model_name]
|
122 |
print(f"Failed to load: {model_name}")
|
123 |
print(e)
|
124 |
return None
|
125 |
try:
|
126 |
-
|
127 |
-
|
128 |
except Exception as e:
|
129 |
-
|
130 |
-
|
131 |
print(e)
|
132 |
return loaded_models[model_name]
|
133 |
|
134 |
|
135 |
-
async def async_load_models(models: list, limit: int=5
|
136 |
sem = asyncio.Semaphore(limit)
|
137 |
async def async_load_model(model: str):
|
138 |
async with sem:
|
@@ -247,6 +276,16 @@ def get_negative_suffix():
|
|
247 |
return list(negative_suffix.keys())
|
248 |
|
249 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
250 |
def get_model_info_md(model_name: str):
|
251 |
if model_name in model_info_dict.keys(): return model_info_dict[model_name].get("md", "")
|
252 |
|
@@ -277,13 +316,13 @@ def infer(prompt: str, neg_prompt: str, model_name: str):
|
|
277 |
|
278 |
async def infer_multi(prompt: str, neg_prompt: str, results: list, image_num: float, model_name: str,
|
279 |
pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], progress=gr.Progress(track_tqdm=True)):
|
280 |
-
|
281 |
image_num = int(image_num)
|
282 |
images = results if results else []
|
283 |
prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
|
284 |
tasks = [asyncio.to_thread(infer, prompt, neg_prompt, model_name) for i in range(image_num)]
|
285 |
-
results = await asyncio.gather(*tasks, return_exceptions=True)
|
286 |
-
|
287 |
if not results: results = []
|
288 |
for result in results:
|
289 |
with lock:
|
@@ -293,7 +332,7 @@ async def infer_multi(prompt: str, neg_prompt: str, results: list, image_num: fl
|
|
293 |
|
294 |
async def infer_multi_random(prompt: str, neg_prompt: str, results: list, image_num: float,
|
295 |
pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], progress=gr.Progress(track_tqdm=True)):
|
296 |
-
|
297 |
import random
|
298 |
image_num = int(image_num)
|
299 |
images = results if results else []
|
@@ -301,8 +340,8 @@ async def infer_multi_random(prompt: str, neg_prompt: str, results: list, image_
|
|
301 |
model_names = random.choices(list(loaded_models.keys()), k = image_num)
|
302 |
prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
|
303 |
tasks = [asyncio.to_thread(infer, prompt, neg_prompt, model_name) for model_name in model_names]
|
304 |
-
results = await asyncio.gather(*tasks, return_exceptions=True)
|
305 |
-
|
306 |
if not results: results = []
|
307 |
for result in results:
|
308 |
with lock:
|
|
|
1 |
import gradio as gr
|
2 |
import asyncio
|
3 |
+
from threading import RLock
|
4 |
from pathlib import Path
|
5 |
|
6 |
|
|
|
70 |
elif 'diffusers:StableDiffusion3Pipeline' in tags: info["ver"] = "SD3"
|
71 |
else: info["ver"] = "Other"
|
72 |
info["url"] = f"https://huggingface.co/{repo_id}/"
|
73 |
+
info["tags"] = model.card_data.tags if model.card_data and model.card_data.tags else []
|
|
|
74 |
info["downloads"] = model.downloads
|
75 |
info["likes"] = model.likes
|
76 |
info["last_modified"] = model.last_modified.strftime("lastmod: %Y-%m-%d")
|
|
|
107 |
return gr.update(value=output_images), gr.update(value=output_paths)
|
108 |
|
109 |
|
110 |
+
def load_from_model(model_name: str, hf_token: str = None):
|
111 |
+
import httpx
|
112 |
+
import huggingface_hub
|
113 |
+
from gradio.exceptions import ModelNotFoundError
|
114 |
+
model_url = f"https://huggingface.co/{model_name}"
|
115 |
+
api_url = f"https://api-inference.huggingface.co/models/{model_name}"
|
116 |
+
print(f"Fetching model from: {model_url}")
|
117 |
+
|
118 |
+
headers = {"Authorization": f"Bearer {hf_token}"} if hf_token is not None else {}
|
119 |
+
response = httpx.request("GET", api_url, headers=headers)
|
120 |
+
if response.status_code != 200:
|
121 |
+
raise ModelNotFoundError(
|
122 |
+
f"Could not find model: {model_name}. If it is a private or gated model, please provide your Hugging Face access token (https://huggingface.co/settings/tokens) as the argument for the `hf_token` parameter."
|
123 |
+
)
|
124 |
+
headers["X-Wait-For-Model"] = "true"
|
125 |
+
client = huggingface_hub.InferenceClient(model=model_name, headers=headers, token=hf_token)
|
126 |
+
inputs = gr.components.Textbox(label="Input")
|
127 |
+
outputs = gr.components.Image(label="Output")
|
128 |
+
fn = client.text_to_image
|
129 |
+
|
130 |
+
def query_huggingface_inference_endpoints(*data):
|
131 |
+
return fn(*data)
|
132 |
+
|
133 |
+
interface_info = {
|
134 |
+
"fn": query_huggingface_inference_endpoints,
|
135 |
+
"inputs": inputs,
|
136 |
+
"outputs": outputs,
|
137 |
+
"title": model_name,
|
138 |
+
}
|
139 |
+
return gr.Interface(**interface_info)
|
140 |
+
|
141 |
+
|
142 |
def load_model(model_name: str):
|
143 |
global loaded_models
|
144 |
global model_info_dict
|
145 |
if model_name in loaded_models.keys(): return loaded_models[model_name]
|
146 |
try:
|
147 |
+
loaded_models[model_name] = load_from_model(model_name)
|
|
|
148 |
print(f"Loaded: {model_name}")
|
149 |
except Exception as e:
|
150 |
+
if model_name in loaded_models.keys(): del loaded_models[model_name]
|
|
|
151 |
print(f"Failed to load: {model_name}")
|
152 |
print(e)
|
153 |
return None
|
154 |
try:
|
155 |
+
model_info_dict[model_name] = get_t2i_model_info_dict(model_name)
|
156 |
+
print(f"Assigned: {model_name}")
|
157 |
except Exception as e:
|
158 |
+
if model_name in model_info_dict.keys(): del model_info_dict[model_name]
|
159 |
+
print(f"Failed to assigned: {model_name}")
|
160 |
print(e)
|
161 |
return loaded_models[model_name]
|
162 |
|
163 |
|
164 |
+
async def async_load_models(models: list, limit: int=5):
|
165 |
sem = asyncio.Semaphore(limit)
|
166 |
async def async_load_model(model: str):
|
167 |
async with sem:
|
|
|
276 |
return list(negative_suffix.keys())
|
277 |
|
278 |
|
279 |
+
def get_tag_type(pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = []):
|
280 |
+
tag_type = "danbooru"
|
281 |
+
words = pos_pre + pos_suf + neg_pre + neg_suf
|
282 |
+
for word in words:
|
283 |
+
if "Pony" in word:
|
284 |
+
tag_type = "e621"
|
285 |
+
break
|
286 |
+
return tag_type
|
287 |
+
|
288 |
+
|
289 |
def get_model_info_md(model_name: str):
|
290 |
if model_name in model_info_dict.keys(): return model_info_dict[model_name].get("md", "")
|
291 |
|
|
|
316 |
|
317 |
async def infer_multi(prompt: str, neg_prompt: str, results: list, image_num: float, model_name: str,
|
318 |
pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], progress=gr.Progress(track_tqdm=True)):
|
319 |
+
from tqdm.asyncio import tqdm_asyncio
|
320 |
image_num = int(image_num)
|
321 |
images = results if results else []
|
322 |
prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
|
323 |
tasks = [asyncio.to_thread(infer, prompt, neg_prompt, model_name) for i in range(image_num)]
|
324 |
+
#results = await asyncio.gather(*tasks, return_exceptions=True)
|
325 |
+
results = await tqdm_asyncio.gather(*tasks)
|
326 |
if not results: results = []
|
327 |
for result in results:
|
328 |
with lock:
|
|
|
332 |
|
333 |
async def infer_multi_random(prompt: str, neg_prompt: str, results: list, image_num: float,
|
334 |
pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], progress=gr.Progress(track_tqdm=True)):
|
335 |
+
from tqdm.asyncio import tqdm_asyncio
|
336 |
import random
|
337 |
image_num = int(image_num)
|
338 |
images = results if results else []
|
|
|
340 |
model_names = random.choices(list(loaded_models.keys()), k = image_num)
|
341 |
prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
|
342 |
tasks = [asyncio.to_thread(infer, prompt, neg_prompt, model_name) for model_name in model_names]
|
343 |
+
#results = await asyncio.gather(*tasks, return_exceptions=True)
|
344 |
+
results = await tqdm_asyncio.gather(*tasks)
|
345 |
if not results: results = []
|
346 |
for result in results:
|
347 |
with lock:
|