John6666 commited on
Commit
9ee2570
0 Parent(s):

Super-squash branch 'main' using huggingface_hub

Browse files
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Votepurchase Multiple Model (SD1.5/SDXL Text-to-Image)
3
+ emoji: 🖼🖼️📦
4
+ colorFrom: purple
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 4.41.0
8
+ app_file: app.py
9
+ license: mit
10
+ short_description: Text-to-Image
11
+ pinned: true
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import gradio as gr
3
+ import numpy as np
4
+
5
+ # DiffuseCraft
6
+ from dc import (
7
+ infer,
8
+ _infer,
9
+ pass_result,
10
+ get_diffusers_model_list,
11
+ get_samplers,
12
+ get_vaes,
13
+ enable_model_recom_prompt,
14
+ enable_diffusers_model_detail,
15
+ get_t2i_model_info,
16
+ get_all_lora_tupled_list,
17
+ update_loras,
18
+ apply_lora_prompt,
19
+ download_my_lora,
20
+ search_civitai_lora,
21
+ select_civitai_lora,
22
+ search_civitai_lora_json,
23
+ preset_quality,
24
+ preset_styles,
25
+ process_style_prompt,
26
+ )
27
+ # Translator
28
+ from llmdolphin import (
29
+ dolphin_respond_auto,
30
+ dolphin_parse_simple,
31
+ get_llm_formats,
32
+ get_dolphin_model_format,
33
+ get_dolphin_models,
34
+ get_dolphin_model_info,
35
+ select_dolphin_model,
36
+ select_dolphin_format,
37
+ get_dolphin_sysprompt,
38
+ )
39
+ # Tagger
40
+ from tagger.v2 import (
41
+ v2_upsampling_prompt,
42
+ V2_ALL_MODELS,
43
+ )
44
+ from tagger.utils import (
45
+ gradio_copy_text,
46
+ gradio_copy_prompt,
47
+ COPY_ACTION_JS,
48
+ V2_ASPECT_RATIO_OPTIONS,
49
+ V2_RATING_OPTIONS,
50
+ V2_LENGTH_OPTIONS,
51
+ V2_IDENTITY_OPTIONS
52
+ )
53
+ from tagger.tagger import (
54
+ predict_tags_wd,
55
+ convert_danbooru_to_e621_prompt,
56
+ remove_specific_prompt,
57
+ insert_recom_prompt,
58
+ compose_prompt_to_copy,
59
+ translate_prompt,
60
+ select_random_character,
61
+ )
62
+ from tagger.fl2sd3longcap import (
63
+ predict_tags_fl2_sd3,
64
+ )
65
+ def description_ui():
66
+ gr.Markdown(
67
+ """
68
+ ## Danbooru Tags Transformer V2 Demo with WD Tagger & SD3 Long Captioner
69
+ (Image =>) Prompt => Upsampled longer prompt
70
+ - Mod of p1atdev's [Danbooru Tags Transformer V2 Demo](https://huggingface.co/spaces/p1atdev/danbooru-tags-transformer-v2) and [WD Tagger with 🤗 transformers](https://huggingface.co/spaces/p1atdev/wd-tagger-transformers).
71
+ - Models: p1atdev's [wd-swinv2-tagger-v3-hf](https://huggingface.co/p1atdev/wd-swinv2-tagger-v3-hf), [dart-v2-moe-sft](https://huggingface.co/p1atdev/dart-v2-moe-sft), [dart-v2-sft](https://huggingface.co/p1atdev/dart-v2-sft)\
72
+ , gokaygokay's [Florence-2-SD3-Captioner](https://huggingface.co/gokaygokay/Florence-2-SD3-Captioner)
73
+ """
74
+ )
75
+
76
+
77
+ MAX_SEED = np.iinfo(np.int32).max
78
+ MAX_IMAGE_SIZE = 1216
79
+
80
+ css = """
81
+ #container {
82
+ margin: 0 auto;
83
+ }
84
+ #col-container {
85
+ margin: 0 auto;
86
+ max-width: 520px;
87
+ }
88
+ #model-info { text-align: center; }
89
+ """
90
+
91
+ with gr.Blocks(css=css, fill_width=True, elem_id="container") as demo:
92
+ with gr.Tab("Image Generator"):
93
+ with gr.Column(elem_id="col-container"):
94
+
95
+ prompt = gr.Text(
96
+ label="Prompt",
97
+ show_label=False,
98
+ lines=1,
99
+ max_lines=8,
100
+ placeholder="Enter your prompt",
101
+ container=False,
102
+ )
103
+
104
+ with gr.Row():
105
+ run_button = gr.Button("Run")
106
+ run_translate_button = gr.Button("Translate")
107
+
108
+ result = gr.Image(label="Result", show_label=False, interactive=False, show_download_button=True, show_share_button=False, container=True)
109
+
110
+ with gr.Accordion("Advanced Settings", open=False):
111
+
112
+ negative_prompt = gr.Text(
113
+ label="Negative prompt",
114
+ lines=1,
115
+ max_lines=6,
116
+ placeholder="Enter a negative prompt",
117
+ value="(low quality, worst quality:1.2), very displeasing, watermark, signature, ugly"
118
+ )
119
+
120
+ seed = gr.Slider(
121
+ label="Seed",
122
+ minimum=0,
123
+ maximum=MAX_SEED,
124
+ step=1,
125
+ value=0,
126
+ )
127
+
128
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
129
+
130
+ with gr.Row():
131
+ width = gr.Slider(
132
+ label="Width",
133
+ minimum=256,
134
+ maximum=MAX_IMAGE_SIZE,
135
+ step=32,
136
+ value=1024,#832,
137
+ )
138
+
139
+ height = gr.Slider(
140
+ label="Height",
141
+ minimum=256,
142
+ maximum=MAX_IMAGE_SIZE,
143
+ step=32,
144
+ value=1024,#1216,
145
+ )
146
+
147
+ with gr.Row():
148
+ guidance_scale = gr.Slider(
149
+ label="Guidance scale",
150
+ minimum=0.0,
151
+ maximum=30.0,
152
+ step=0.1,
153
+ value=7,
154
+ )
155
+
156
+ num_inference_steps = gr.Slider(
157
+ label="Number of inference steps",
158
+ minimum=1,
159
+ maximum=100,
160
+ step=1,
161
+ value=28,
162
+ )
163
+
164
+ with gr.Group():
165
+ model_name = gr.Dropdown(label="Model", info="You can enter a huggingface model repo_id to want to use.", choices=get_diffusers_model_list(), value=get_diffusers_model_list()[0], allow_custom_value=True, interactive=True)
166
+ model_info = gr.Markdown(elem_id="model-info")
167
+ model_detail = gr.Checkbox(label="Show detail of model in list", value=False)
168
+
169
+ sampler = gr.Dropdown(label="Sampler", choices=get_samplers(), value="Euler a")
170
+
171
+ chatbot = gr.Chatbot(likeable=False, render_markdown=False, visible=False) # component for auto-translation
172
+
173
+ with gr.Accordion("LoRA", open=True, visible=True):
174
+ with gr.Group():
175
+ lora1 = gr.Dropdown(label="LoRA 1", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True)
176
+ lora1_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 1: weight")
177
+ with gr.Row():
178
+ lora1_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
179
+ lora1_copy = gr.Button(value="Copy example to prompt", visible=False)
180
+ lora1_md = gr.Markdown(value="", visible=False)
181
+ with gr.Group():
182
+ lora2 = gr.Dropdown(label="LoRA 2", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True)
183
+ lora2_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 2: weight")
184
+ with gr.Row():
185
+ lora2_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
186
+ lora2_copy = gr.Button(value="Copy example to prompt", visible=False)
187
+ lora2_md = gr.Markdown(value="", visible=False)
188
+ with gr.Group():
189
+ lora3 = gr.Dropdown(label="LoRA 3", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True)
190
+ lora3_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 3: weight")
191
+ with gr.Row():
192
+ lora3_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
193
+ lora3_copy = gr.Button(value="Copy example to prompt", visible=False)
194
+ lora3_md = gr.Markdown(value="", visible=False)
195
+ with gr.Group():
196
+ lora4 = gr.Dropdown(label="LoRA 4", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True)
197
+ lora4_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 4: weight")
198
+ with gr.Row():
199
+ lora4_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
200
+ lora4_copy = gr.Button(value="Copy example to prompt", visible=False)
201
+ lora4_md = gr.Markdown(value="", visible=False)
202
+ with gr.Group():
203
+ lora5 = gr.Dropdown(label="LoRA 5", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True)
204
+ lora5_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 5: weight")
205
+ with gr.Row():
206
+ lora5_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
207
+ lora5_copy = gr.Button(value="Copy example to prompt", visible=False)
208
+ lora5_md = gr.Markdown(value="", visible=False)
209
+ with gr.Accordion("From URL", open=True, visible=True):
210
+ with gr.Row():
211
+ lora_search_civitai_query = gr.Textbox(label="Query", placeholder="oomuro sakurako...", lines=1)
212
+ lora_search_civitai_basemodel = gr.CheckboxGroup(label="Search LoRA for", choices=["Pony", "SD 1.5", "SDXL 1.0"], value=["Pony", "SDXL 1.0"])
213
+ lora_search_civitai_submit = gr.Button("Search on Civitai")
214
+ lora_search_civitai_result = gr.Dropdown(label="Search Results", choices=[("", "")], value="", allow_custom_value=True, visible=False)
215
+ lora_search_civitai_json = gr.JSON(value={}, visible=False)
216
+ lora_search_civitai_desc = gr.Markdown(value="", visible=False)
217
+ lora_download_url = gr.Textbox(label="URL", placeholder="http://...my_lora_url.safetensors", lines=1)
218
+ lora_download = gr.Button("Get and set LoRA and apply to prompt")
219
+
220
+ vae_model = gr.Dropdown(label="VAE Model", choices=get_vaes(), value=get_vaes()[0])
221
+ recom_prompt = gr.Checkbox(label="Recommended prompt", value=True)
222
+ quality_selector = gr.Dropdown(label="Quality Tags Presets", interactive=True, choices=list(preset_quality.keys()), value="None")
223
+ style_selector = gr.Dropdown(label="Style Preset", interactive=True, choices=list(preset_styles.keys()), value="None")
224
+
225
+ with gr.Accordion("Translation Settings", open=False):
226
+ chat_model = gr.Dropdown(choices=get_dolphin_models(), value=get_dolphin_models()[0][1], allow_custom_value=True, label="Model")
227
+ chat_model_info = gr.Markdown(value=get_dolphin_model_info(get_dolphin_models()[0][1]), label="Model info")
228
+ chat_format = gr.Dropdown(choices=get_llm_formats(), value=get_dolphin_model_format(get_dolphin_models()[0][1]), label="Message format")
229
+ chat_sysmsg = gr.Textbox(value=get_dolphin_sysprompt(), label="System message")
230
+ chat_tokens = gr.Slider(minimum=1, maximum=4096, value=512, step=1, label="Max tokens")
231
+ chat_temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
232
+ chat_topp = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
233
+ chat_topk = gr.Slider(minimum=0, maximum=100, value=40, step=1, label="Top-k")
234
+ chat_rp = gr.Slider(minimum=0.0, maximum=2.0, value=1.1, step=0.1, label="Repetition penalty")
235
+
236
+ examples = gr.Examples(
237
+ examples = [
238
+ ["souryuu asuka langley, 1girl, neon genesis evangelion, plugsuit, pilot suit, red bodysuit, sitting, crossing legs, black eye patch, cat hat, throne, symmetrical, looking down, from bottom, looking at viewer, outdoors"],
239
+ ["sailor moon, magical girl transformation, sparkles and ribbons, soft pastel colors, crescent moon motif, starry night sky background, shoujo manga style"],
240
+ ["kafuu chino, 1girl, solo"],
241
+ ["1girl"],
242
+ ["beautiful sunset"],
243
+ ],
244
+ inputs=[prompt],
245
+ )
246
+
247
+ gr.on( #lambda x: None, inputs=None, outputs=result).then(
248
+ triggers=[run_button.click, prompt.submit],
249
+ fn=infer,
250
+ inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
251
+ guidance_scale, num_inference_steps, model_name,
252
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
253
+ sampler, vae_model],
254
+ outputs=[result],
255
+ queue=True,
256
+ show_progress="full",
257
+ show_api=True,
258
+ )
259
+
260
+ gr.on( #lambda x: None, inputs=None, outputs=result).then(
261
+ triggers=[run_translate_button.click],
262
+ fn=_infer, # dummy fn for api
263
+ inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
264
+ guidance_scale, num_inference_steps, model_name,
265
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
266
+ sampler, vae_model],
267
+ outputs=[result],
268
+ queue=False,
269
+ show_api=True,
270
+ api_name="infer_translate",
271
+ ).success(
272
+ fn=dolphin_respond_auto,
273
+ inputs=[prompt, chatbot],
274
+ outputs=[chatbot],
275
+ queue=True,
276
+ show_progress="full",
277
+ show_api=False,
278
+ ).success(
279
+ fn=dolphin_parse_simple,
280
+ inputs=[prompt, chatbot],
281
+ outputs=[prompt],
282
+ queue=False,
283
+ show_api=False,
284
+ ).success(
285
+ fn=infer,
286
+ inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
287
+ guidance_scale, num_inference_steps, model_name,
288
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
289
+ sampler, vae_model],
290
+ outputs=[result],
291
+ queue=True,
292
+ show_progress="full",
293
+ show_api=False,
294
+ ).success(lambda: None, None, chatbot, queue=False, show_api=False)\
295
+ .success(pass_result, [result], [result], queue=False, show_api=False) # dummy fn for api
296
+
297
+ gr.on(
298
+ triggers=[lora1.change, lora1_wt.change, lora2.change, lora2_wt.change, lora3.change, lora3_wt.change,
299
+ lora4.change, lora4_wt.change, lora5.change, lora5_wt.change],
300
+ fn=update_loras,
301
+ inputs=[prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt],
302
+ outputs=[prompt, lora1, lora1_wt, lora1_info, lora1_copy, lora1_md,
303
+ lora2, lora2_wt, lora2_info, lora2_copy, lora2_md, lora3, lora3_wt, lora3_info, lora3_copy, lora3_md,
304
+ lora4, lora4_wt, lora4_info, lora4_copy, lora4_md, lora5, lora5_wt, lora5_info, lora5_copy, lora5_md],
305
+ queue=False,
306
+ trigger_mode="once",
307
+ show_api=False,
308
+ )
309
+ lora1_copy.click(apply_lora_prompt, [prompt, lora1_info], [prompt], queue=False, show_api=False)
310
+ lora2_copy.click(apply_lora_prompt, [prompt, lora2_info], [prompt], queue=False, show_api=False)
311
+ lora3_copy.click(apply_lora_prompt, [prompt, lora3_info], [prompt], queue=False, show_api=False)
312
+ lora4_copy.click(apply_lora_prompt, [prompt, lora4_info], [prompt], queue=False, show_api=False)
313
+ lora5_copy.click(apply_lora_prompt, [prompt, lora5_info], [prompt], queue=False, show_api=False)
314
+
315
+ gr.on(
316
+ triggers=[lora_search_civitai_submit.click, lora_search_civitai_query.submit],
317
+ fn=search_civitai_lora,
318
+ inputs=[lora_search_civitai_query, lora_search_civitai_basemodel],
319
+ outputs=[lora_search_civitai_result, lora_search_civitai_desc, lora_search_civitai_submit, lora_search_civitai_query],
320
+ scroll_to_output=True,
321
+ queue=True,
322
+ show_api=False,
323
+ )
324
+ lora_search_civitai_json.change(search_civitai_lora_json, [lora_search_civitai_query, lora_search_civitai_basemodel], [lora_search_civitai_json], queue=True, show_api=True) # fn for api
325
+ lora_search_civitai_result.change(select_civitai_lora, [lora_search_civitai_result], [lora_download_url, lora_search_civitai_desc], scroll_to_output=True, queue=False, show_api=False)
326
+ gr.on(
327
+ triggers=[lora_download.click, lora_download_url.submit],
328
+ fn=download_my_lora,
329
+ inputs=[lora_download_url,lora1, lora2, lora3, lora4, lora5],
330
+ outputs=[lora1, lora2, lora3, lora4, lora5],
331
+ scroll_to_output=True,
332
+ queue=True,
333
+ show_api=False,
334
+ )
335
+
336
+ recom_prompt.change(enable_model_recom_prompt, [recom_prompt], [recom_prompt], queue=False, show_api=False)
337
+ gr.on(
338
+ triggers=[quality_selector.change, style_selector.change],
339
+ fn=process_style_prompt,
340
+ inputs=[prompt, negative_prompt, style_selector, quality_selector],
341
+ outputs=[prompt, negative_prompt],
342
+ queue=False,
343
+ trigger_mode="once",
344
+ )
345
+
346
+ model_detail.change(enable_diffusers_model_detail, [model_detail, model_name], [model_detail, model_name], queue=False, show_api=False)
347
+ model_name.change(get_t2i_model_info, [model_name], [model_info], queue=False, show_api=False)
348
+
349
+ chat_model.change(select_dolphin_model, [chat_model], [chat_model, chat_format, chat_model_info], queue=True, show_progress="full", show_api=False)\
350
+ .success(lambda: None, None, chatbot, queue=False, show_api=False)
351
+ chat_format.change(select_dolphin_format, [chat_format], [chat_format], queue=False, show_api=False)\
352
+ .success(lambda: None, None, chatbot, queue=False, show_api=False)
353
+
354
+ # Tagger
355
+ with gr.Tab("Tags Transformer with Tagger"):
356
+ with gr.Column():
357
+ with gr.Group():
358
+ input_image = gr.Image(label="Input image", type="pil", sources=["upload", "clipboard"], height=256)
359
+ with gr.Accordion(label="Advanced options", open=False):
360
+ general_threshold = gr.Slider(label="Threshold", minimum=0.0, maximum=1.0, value=0.3, step=0.01, interactive=True)
361
+ character_threshold = gr.Slider(label="Character threshold", minimum=0.0, maximum=1.0, value=0.8, step=0.01, interactive=True)
362
+ input_tag_type = gr.Radio(label="Convert tags to", info="danbooru for Animagine, e621 for Pony.", choices=["danbooru", "e621"], value="danbooru")
363
+ recom_prompt = gr.Radio(label="Insert reccomended prompt", choices=["None", "Animagine", "Pony"], value="None", interactive=True)
364
+ image_algorithms = gr.CheckboxGroup(["Use WD Tagger", "Use Florence-2-SD3-Long-Captioner"], label="Algorithms", value=["Use WD Tagger"])
365
+ keep_tags = gr.Radio(label="Remove tags leaving only the following", choices=["body", "dress", "all"], value="all")
366
+ generate_from_image_btn = gr.Button(value="GENERATE TAGS FROM IMAGE", size="lg", variant="primary")
367
+ with gr.Group():
368
+ with gr.Row():
369
+ input_character = gr.Textbox(label="Character tags", placeholder="hatsune miku")
370
+ input_copyright = gr.Textbox(label="Copyright tags", placeholder="vocaloid")
371
+ random_character = gr.Button(value="Random character 🎲", size="sm")
372
+ input_general = gr.TextArea(label="General tags", lines=4, placeholder="1girl, ...", value="")
373
+ input_tags_to_copy = gr.Textbox(value="", visible=False)
374
+ with gr.Row():
375
+ copy_input_btn = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
376
+ copy_prompt_btn_input = gr.Button(value="Copy to primary prompt", size="sm", interactive=False)
377
+ translate_input_prompt_button = gr.Button(value="Translate prompt to English", size="sm", variant="secondary")
378
+ tag_type = gr.Radio(label="Output tag conversion", info="danbooru for Animagine, e621 for Pony.", choices=["danbooru", "e621"], value="e621", visible=False)
379
+ input_rating = gr.Radio(label="Rating", choices=list(V2_RATING_OPTIONS), value="explicit")
380
+ with gr.Accordion(label="Advanced options", open=False):
381
+ input_aspect_ratio = gr.Radio(label="Aspect ratio", info="The aspect ratio of the image.", choices=list(V2_ASPECT_RATIO_OPTIONS), value="square")
382
+ input_length = gr.Radio(label="Length", info="The total length of the tags.", choices=list(V2_LENGTH_OPTIONS), value="very_long")
383
+ input_identity = gr.Radio(label="Keep identity", info="How strictly to keep the identity of the character or subject. If you specify the detail of subject in the prompt, you should choose `strict`. Otherwise, choose `none` or `lax`. `none` is very creative but sometimes ignores the input prompt.", choices=list(V2_IDENTITY_OPTIONS), value="lax")
384
+ input_ban_tags = gr.Textbox(label="Ban tags", info="Tags to ban from the output.", placeholder="alternate costumen, ...", value="censored")
385
+ model_name = gr.Dropdown(label="Model", choices=list(V2_ALL_MODELS.keys()), value=list(V2_ALL_MODELS.keys())[0])
386
+ dummy_np = gr.Textbox(label="Negative prompt", value="", visible=False)
387
+ recom_animagine = gr.Textbox(label="Animagine reccomended prompt", value="Animagine", visible=False)
388
+ recom_pony = gr.Textbox(label="Pony reccomended prompt", value="Pony", visible=False)
389
+ generate_btn = gr.Button(value="GENERATE TAGS", size="lg", variant="primary")
390
+ with gr.Row():
391
+ with gr.Group():
392
+ output_text = gr.TextArea(label="Output tags", interactive=False, show_copy_button=True)
393
+ with gr.Row():
394
+ copy_btn = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
395
+ copy_prompt_btn = gr.Button(value="Copy to primary prompt", size="sm", interactive=False)
396
+ with gr.Group():
397
+ output_text_pony = gr.TextArea(label="Output tags (Pony e621 style)", interactive=False, show_copy_button=True)
398
+ with gr.Row():
399
+ copy_btn_pony = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
400
+ copy_prompt_btn_pony = gr.Button(value="Copy to primary prompt", size="sm", interactive=False)
401
+
402
+ random_character.click(select_random_character, [input_copyright, input_character], [input_copyright, input_character], queue=False, show_api=False)
403
+
404
+ translate_input_prompt_button.click(translate_prompt, [input_general], [input_general], queue=False, show_api=False)
405
+ translate_input_prompt_button.click(translate_prompt, [input_character], [input_character], queue=False, show_api=False)
406
+ translate_input_prompt_button.click(translate_prompt, [input_copyright], [input_copyright], queue=False, show_api=False)
407
+
408
+ generate_from_image_btn.click(
409
+ lambda: ("", "", ""), None, [input_copyright, input_character, input_general], queue=False, show_api=False,
410
+ ).success(
411
+ predict_tags_wd,
412
+ [input_image, input_general, image_algorithms, general_threshold, character_threshold],
413
+ [input_copyright, input_character, input_general, copy_input_btn],
414
+ show_api=False,
415
+ ).success(
416
+ predict_tags_fl2_sd3, [input_image, input_general, image_algorithms], [input_general], show_api=False,
417
+ ).success(
418
+ remove_specific_prompt, [input_general, keep_tags], [input_general], queue=False, show_api=False,
419
+ ).success(
420
+ convert_danbooru_to_e621_prompt, [input_general, input_tag_type], [input_general], queue=False, show_api=False,
421
+ ).success(
422
+ insert_recom_prompt, [input_general, dummy_np, recom_prompt], [input_general, dummy_np], queue=False, show_api=False,
423
+ ).success(lambda: gr.update(interactive=True), None, [copy_prompt_btn_input], queue=False, show_api=False)
424
+ copy_input_btn.click(compose_prompt_to_copy, [input_character, input_copyright, input_general], [input_tags_to_copy], show_api=False)\
425
+ .success(gradio_copy_text, [input_tags_to_copy], js=COPY_ACTION_JS, show_api=False)
426
+ copy_prompt_btn_input.click(compose_prompt_to_copy, inputs=[input_character, input_copyright, input_general], outputs=[input_tags_to_copy], show_api=False)\
427
+ .success(gradio_copy_prompt, inputs=[input_tags_to_copy], outputs=[prompt], show_api=False)
428
+
429
+ generate_btn.click(
430
+ v2_upsampling_prompt,
431
+ [model_name, input_copyright, input_character, input_general,
432
+ input_rating, input_aspect_ratio, input_length, input_identity, input_ban_tags],
433
+ [output_text],
434
+ show_api=False,
435
+ ).success(
436
+ convert_danbooru_to_e621_prompt, [output_text, tag_type], [output_text_pony], queue=False, show_api=False,
437
+ ).success(
438
+ insert_recom_prompt, [output_text, dummy_np, recom_animagine], [output_text, dummy_np], queue=False, show_api=False,
439
+ ).success(
440
+ insert_recom_prompt, [output_text_pony, dummy_np, recom_pony], [output_text_pony, dummy_np], queue=False, show_api=False,
441
+ ).success(lambda: (gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)),
442
+ None, [copy_btn, copy_btn_pony, copy_prompt_btn, copy_prompt_btn_pony], queue=False, show_api=False)
443
+ copy_btn.click(gradio_copy_text, [output_text], js=COPY_ACTION_JS, show_api=False)
444
+ copy_btn_pony.click(gradio_copy_text, [output_text_pony], js=COPY_ACTION_JS, show_api=False)
445
+ copy_prompt_btn.click(gradio_copy_prompt, inputs=[output_text], outputs=[prompt], show_api=False)
446
+ copy_prompt_btn_pony.click(gradio_copy_prompt, inputs=[output_text_pony], outputs=[prompt], show_api=False)
447
+
448
+ demo.queue()
449
+ demo.launch()
dc.py ADDED
@@ -0,0 +1,1328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import os
3
+ from stablepy import Model_Diffusers
4
+ from stablepy.diffusers_vanilla.model import scheduler_names
5
+ from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
6
+ import torch
7
+ import re
8
+ import shutil
9
+ import random
10
+ from stablepy import (
11
+ CONTROLNET_MODEL_IDS,
12
+ VALID_TASKS,
13
+ T2I_PREPROCESSOR_NAME,
14
+ FLASH_LORA,
15
+ SCHEDULER_CONFIG_MAP,
16
+ scheduler_names,
17
+ IP_ADAPTER_MODELS,
18
+ IP_ADAPTERS_SD,
19
+ IP_ADAPTERS_SDXL,
20
+ REPO_IMAGE_ENCODER,
21
+ ALL_PROMPT_WEIGHT_OPTIONS,
22
+ SD15_TASKS,
23
+ SDXL_TASKS,
24
+ )
25
+ import urllib.parse
26
+ import gradio as gr
27
+ from PIL import Image
28
+ import IPython.display
29
+ import time, json
30
+ from IPython.utils import capture
31
+ import logging
32
+ logging.getLogger("diffusers").setLevel(logging.ERROR)
33
+ import diffusers
34
+ diffusers.utils.logging.set_verbosity(40)
35
+ import warnings
36
+ warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
37
+ warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
38
+ warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
39
+ from stablepy import logger
40
+ logger.setLevel(logging.CRITICAL)
41
+
42
+ from env import (
43
+ hf_token,
44
+ hf_read_token, # to use only for private repos
45
+ CIVITAI_API_KEY,
46
+ HF_LORA_PRIVATE_REPOS1,
47
+ HF_LORA_PRIVATE_REPOS2,
48
+ HF_LORA_ESSENTIAL_PRIVATE_REPO,
49
+ HF_VAE_PRIVATE_REPO,
50
+ HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO,
51
+ HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO,
52
+ directory_models,
53
+ directory_loras,
54
+ directory_vaes,
55
+ directory_embeds,
56
+ directory_embeds_sdxl,
57
+ directory_embeds_positive_sdxl,
58
+ load_diffusers_format_model,
59
+ download_model_list,
60
+ download_lora_list,
61
+ download_vae_list,
62
+ download_embeds,
63
+ )
64
+
65
+ preprocessor_controlnet = {
66
+ "openpose": [
67
+ "Openpose",
68
+ "None",
69
+ ],
70
+ "scribble": [
71
+ "HED",
72
+ "Pidinet",
73
+ "None",
74
+ ],
75
+ "softedge": [
76
+ "Pidinet",
77
+ "HED",
78
+ "HED safe",
79
+ "Pidinet safe",
80
+ "None",
81
+ ],
82
+ "segmentation": [
83
+ "UPerNet",
84
+ "None",
85
+ ],
86
+ "depth": [
87
+ "DPT",
88
+ "Midas",
89
+ "None",
90
+ ],
91
+ "normalbae": [
92
+ "NormalBae",
93
+ "None",
94
+ ],
95
+ "lineart": [
96
+ "Lineart",
97
+ "Lineart coarse",
98
+ "Lineart (anime)",
99
+ "None",
100
+ "None (anime)",
101
+ ],
102
+ "shuffle": [
103
+ "ContentShuffle",
104
+ "None",
105
+ ],
106
+ "canny": [
107
+ "Canny"
108
+ ],
109
+ "mlsd": [
110
+ "MLSD"
111
+ ],
112
+ "ip2p": [
113
+ "ip2p"
114
+ ],
115
+ }
116
+
117
+ task_stablepy = {
118
+ 'txt2img': 'txt2img',
119
+ 'img2img': 'img2img',
120
+ 'inpaint': 'inpaint',
121
+ # 'canny T2I Adapter': 'sdxl_canny_t2i', # NO HAVE STEP CALLBACK PARAMETERS SO NOT WORKS WITH DIFFUSERS 0.29.0
122
+ # 'sketch T2I Adapter': 'sdxl_sketch_t2i',
123
+ # 'lineart T2I Adapter': 'sdxl_lineart_t2i',
124
+ # 'depth-midas T2I Adapter': 'sdxl_depth-midas_t2i',
125
+ # 'openpose T2I Adapter': 'sdxl_openpose_t2i',
126
+ 'openpose ControlNet': 'openpose',
127
+ 'canny ControlNet': 'canny',
128
+ 'mlsd ControlNet': 'mlsd',
129
+ 'scribble ControlNet': 'scribble',
130
+ 'softedge ControlNet': 'softedge',
131
+ 'segmentation ControlNet': 'segmentation',
132
+ 'depth ControlNet': 'depth',
133
+ 'normalbae ControlNet': 'normalbae',
134
+ 'lineart ControlNet': 'lineart',
135
+ # 'lineart_anime ControlNet': 'lineart_anime',
136
+ 'shuffle ControlNet': 'shuffle',
137
+ 'ip2p ControlNet': 'ip2p',
138
+ 'optical pattern ControlNet': 'pattern',
139
+ 'tile realistic': 'sdxl_tile_realistic',
140
+ }
141
+
142
+ task_model_list = list(task_stablepy.keys())
143
+
144
+
145
+ def download_things(directory, url, hf_token="", civitai_api_key=""):
146
+ url = url.strip()
147
+
148
+ if "drive.google.com" in url:
149
+ original_dir = os.getcwd()
150
+ os.chdir(directory)
151
+ os.system(f"gdown --fuzzy {url}")
152
+ os.chdir(original_dir)
153
+ elif "huggingface.co" in url:
154
+ url = url.replace("?download=true", "")
155
+ # url = urllib.parse.quote(url, safe=':/') # fix encoding
156
+ if "/blob/" in url:
157
+ url = url.replace("/blob/", "/resolve/")
158
+ user_header = f'"Authorization: Bearer {hf_token}"'
159
+ if hf_token:
160
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
161
+ else:
162
+ os.system (f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
163
+ elif "civitai.com" in url:
164
+ if "?" in url:
165
+ url = url.split("?")[0]
166
+ if civitai_api_key:
167
+ url = url + f"?token={civitai_api_key}"
168
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
169
+ else:
170
+ print("\033[91mYou need an API key to download Civitai models.\033[0m")
171
+ else:
172
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
173
+
174
+
175
+ def get_model_list(directory_path):
176
+ model_list = []
177
+ valid_extensions = {'.ckpt' , '.pt', '.pth', '.safetensors', '.bin'}
178
+
179
+ for filename in os.listdir(directory_path):
180
+ if os.path.splitext(filename)[1] in valid_extensions:
181
+ name_without_extension = os.path.splitext(filename)[0]
182
+ file_path = os.path.join(directory_path, filename)
183
+ # model_list.append((name_without_extension, file_path))
184
+ model_list.append(file_path)
185
+ print('\033[34mFILE: ' + file_path + '\033[0m')
186
+ return model_list
187
+
188
+
189
+ def process_string(input_string):
190
+ parts = input_string.split('/')
191
+
192
+ if len(parts) == 2:
193
+ first_element = parts[1]
194
+ complete_string = input_string
195
+ result = (first_element, complete_string)
196
+ return result
197
+ else:
198
+ return None
199
+
200
+ ## BEGIN MOD
201
+ from modutils import (
202
+ to_list,
203
+ list_uniq,
204
+ list_sub,
205
+ get_model_id_list,
206
+ get_tupled_embed_list,
207
+ get_tupled_model_list,
208
+ get_lora_model_list,
209
+ download_private_repo,
210
+ )
211
+
212
+ # - **Download Models**
213
+ download_model = ", ".join(download_model_list)
214
+ # - **Download VAEs**
215
+ download_vae = ", ".join(download_vae_list)
216
+ # - **Download LoRAs**
217
+ download_lora = ", ".join(download_lora_list)
218
+
219
+ #download_private_repo(HF_LORA_ESSENTIAL_PRIVATE_REPO, directory_loras, True)
220
+ download_private_repo(HF_VAE_PRIVATE_REPO, directory_vaes, False)
221
+
222
+ load_diffusers_format_model = list_uniq(load_diffusers_format_model + get_model_id_list())
223
+ ## END MOD
224
+
225
+ CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
226
+ hf_token = os.environ.get("HF_TOKEN")
227
+
228
+ # Download stuffs
229
+ for url in [url.strip() for url in download_model.split(',')]:
230
+ if not os.path.exists(f"./models/{url.split('/')[-1]}"):
231
+ download_things(directory_models, url, hf_token, CIVITAI_API_KEY)
232
+ for url in [url.strip() for url in download_vae.split(',')]:
233
+ if not os.path.exists(f"./vaes/{url.split('/')[-1]}"):
234
+ download_things(directory_vaes, url, hf_token, CIVITAI_API_KEY)
235
+ for url in [url.strip() for url in download_lora.split(',')]:
236
+ if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
237
+ download_things(directory_loras, url, hf_token, CIVITAI_API_KEY)
238
+
239
+ # Download Embeddings
240
+ for url_embed in download_embeds:
241
+ if not os.path.exists(f"./embedings/{url_embed.split('/')[-1]}"):
242
+ download_things(directory_embeds, url_embed, hf_token, CIVITAI_API_KEY)
243
+
244
+ # Build list models
245
+ embed_list = get_model_list(directory_embeds)
246
+ model_list = get_model_list(directory_models)
247
+ model_list = load_diffusers_format_model + model_list
248
+ ## BEGIN MOD
249
+ lora_model_list = get_lora_model_list()
250
+ vae_model_list = get_model_list(directory_vaes)
251
+ vae_model_list.insert(0, "None")
252
+
253
+ #download_private_repo(HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO, directory_embeds_sdxl, False)
254
+ #download_private_repo(HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO, directory_embeds_positive_sdxl, False)
255
+ embed_sdxl_list = get_model_list(directory_embeds_sdxl) + get_model_list(directory_embeds_positive_sdxl)
256
+
257
+ def get_embed_list(pipeline_name):
258
+ return get_tupled_embed_list(embed_sdxl_list if pipeline_name == "StableDiffusionXLPipeline" else embed_list)
259
+
260
+
261
+ ## END MOD
262
+
263
+ print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
264
+
265
+ upscaler_dict_gui = {
266
+ None : None,
267
+ "Lanczos" : "Lanczos",
268
+ "Nearest" : "Nearest",
269
+ "RealESRGAN_x4plus" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
270
+ "RealESRNet_x4plus" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth",
271
+ "RealESRGAN_x4plus_anime_6B": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
272
+ "RealESRGAN_x2plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
273
+ "realesr-animevideov3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
274
+ "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
275
+ "realesr-general-wdn-x4v3" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
276
+ "4x-UltraSharp" : "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
277
+ "4x_foolhardy_Remacri" : "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
278
+ "Remacri4xExtraSmoother" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
279
+ "AnimeSharp4x" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
280
+ "lollypop" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/lollypop.pth",
281
+ "RealisticRescaler4x" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/RealisticRescaler%204x.pth",
282
+ "NickelbackFS4x" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/NickelbackFS%204x.pth"
283
+ }
284
+
285
+
286
+ def extract_parameters(input_string):
287
+ parameters = {}
288
+ input_string = input_string.replace("\n", "")
289
+
290
+ if not "Negative prompt:" in input_string:
291
+ print("Negative prompt not detected")
292
+ parameters["prompt"] = input_string
293
+ return parameters
294
+
295
+ parm = input_string.split("Negative prompt:")
296
+ parameters["prompt"] = parm[0]
297
+ if not "Steps:" in parm[1]:
298
+ print("Steps not detected")
299
+ parameters["neg_prompt"] = parm[1]
300
+ return parameters
301
+ parm = parm[1].split("Steps:")
302
+ parameters["neg_prompt"] = parm[0]
303
+ input_string = "Steps:" + parm[1]
304
+
305
+ # Extracting Steps
306
+ steps_match = re.search(r'Steps: (\d+)', input_string)
307
+ if steps_match:
308
+ parameters['Steps'] = int(steps_match.group(1))
309
+
310
+ # Extracting Size
311
+ size_match = re.search(r'Size: (\d+x\d+)', input_string)
312
+ if size_match:
313
+ parameters['Size'] = size_match.group(1)
314
+ width, height = map(int, parameters['Size'].split('x'))
315
+ parameters['width'] = width
316
+ parameters['height'] = height
317
+
318
+ # Extracting other parameters
319
+ other_parameters = re.findall(r'(\w+): (.*?)(?=, \w+|$)', input_string)
320
+ for param in other_parameters:
321
+ parameters[param[0]] = param[1].strip('"')
322
+
323
+ return parameters
324
+
325
+
326
+ ## BEGIN MOD
327
+ class GuiSD:
328
+ def __init__(self):
329
+ self.model = None
330
+
331
+ print("Loading model...")
332
+ self.model = Model_Diffusers(
333
+ base_model_id="cagliostrolab/animagine-xl-3.1",
334
+ task_name="txt2img",
335
+ vae_model=None,
336
+ type_model_precision=torch.float16,
337
+ retain_task_model_in_cache=False,
338
+ )
339
+
340
+ def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
341
+ progress(0, desc="Start inference...")
342
+ images, image_list = model(**pipe_params)
343
+ progress(1, desc="Inference completed.")
344
+ if not isinstance(images, list): images = [images]
345
+ img = []
346
+ for image in images:
347
+ img.append((image, None))
348
+ return img
349
+
350
+ def load_new_model(self, model_name, vae_model, task, progress=gr.Progress(track_tqdm=True)):
351
+
352
+ yield f"Loading model: {model_name}"
353
+
354
+ vae_model = vae_model if vae_model != "None" else None
355
+
356
+ if model_name in model_list:
357
+ model_is_xl = "xl" in model_name.lower()
358
+ sdxl_in_vae = vae_model and "sdxl" in vae_model.lower()
359
+ model_type = "SDXL" if model_is_xl else "SD 1.5"
360
+ incompatible_vae = (model_is_xl and vae_model and not sdxl_in_vae) or (not model_is_xl and sdxl_in_vae)
361
+
362
+ if incompatible_vae:
363
+ vae_model = None
364
+
365
+
366
+ self.model.load_pipe(
367
+ model_name,
368
+ task_name=task_stablepy[task],
369
+ vae_model=vae_model if vae_model != "None" else None,
370
+ type_model_precision=torch.float16,
371
+ retain_task_model_in_cache=False,
372
+ )
373
+ yield f"Model loaded: {model_name}"
374
+
375
+ @spaces.GPU
376
+ def generate_pipeline(
377
+ self,
378
+ prompt,
379
+ neg_prompt,
380
+ num_images,
381
+ steps,
382
+ cfg,
383
+ clip_skip,
384
+ seed,
385
+ lora1,
386
+ lora_scale1,
387
+ lora2,
388
+ lora_scale2,
389
+ lora3,
390
+ lora_scale3,
391
+ lora4,
392
+ lora_scale4,
393
+ lora5,
394
+ lora_scale5,
395
+ sampler,
396
+ img_height,
397
+ img_width,
398
+ model_name,
399
+ vae_model,
400
+ task,
401
+ image_control,
402
+ preprocessor_name,
403
+ preprocess_resolution,
404
+ image_resolution,
405
+ style_prompt, # list []
406
+ style_json_file,
407
+ image_mask,
408
+ strength,
409
+ low_threshold,
410
+ high_threshold,
411
+ value_threshold,
412
+ distance_threshold,
413
+ controlnet_output_scaling_in_unet,
414
+ controlnet_start_threshold,
415
+ controlnet_stop_threshold,
416
+ textual_inversion,
417
+ syntax_weights,
418
+ upscaler_model_path,
419
+ upscaler_increases_size,
420
+ esrgan_tile,
421
+ esrgan_tile_overlap,
422
+ hires_steps,
423
+ hires_denoising_strength,
424
+ hires_sampler,
425
+ hires_prompt,
426
+ hires_negative_prompt,
427
+ hires_before_adetailer,
428
+ hires_after_adetailer,
429
+ loop_generation,
430
+ leave_progress_bar,
431
+ disable_progress_bar,
432
+ image_previews,
433
+ display_images,
434
+ save_generated_images,
435
+ image_storage_location,
436
+ retain_compel_previous_load,
437
+ retain_detailfix_model_previous_load,
438
+ retain_hires_model_previous_load,
439
+ t2i_adapter_preprocessor,
440
+ t2i_adapter_conditioning_scale,
441
+ t2i_adapter_conditioning_factor,
442
+ xformers_memory_efficient_attention,
443
+ freeu,
444
+ generator_in_cpu,
445
+ adetailer_inpaint_only,
446
+ adetailer_verbose,
447
+ adetailer_sampler,
448
+ adetailer_active_a,
449
+ prompt_ad_a,
450
+ negative_prompt_ad_a,
451
+ strength_ad_a,
452
+ face_detector_ad_a,
453
+ person_detector_ad_a,
454
+ hand_detector_ad_a,
455
+ mask_dilation_a,
456
+ mask_blur_a,
457
+ mask_padding_a,
458
+ adetailer_active_b,
459
+ prompt_ad_b,
460
+ negative_prompt_ad_b,
461
+ strength_ad_b,
462
+ face_detector_ad_b,
463
+ person_detector_ad_b,
464
+ hand_detector_ad_b,
465
+ mask_dilation_b,
466
+ mask_blur_b,
467
+ mask_padding_b,
468
+ retain_task_cache_gui,
469
+ image_ip1,
470
+ mask_ip1,
471
+ model_ip1,
472
+ mode_ip1,
473
+ scale_ip1,
474
+ image_ip2,
475
+ mask_ip2,
476
+ model_ip2,
477
+ mode_ip2,
478
+ scale_ip2,
479
+ progress=gr.Progress(track_tqdm=True),
480
+ ):
481
+ progress(0, desc="Preparing inference...")
482
+
483
+ vae_model = vae_model if vae_model != "None" else None
484
+ loras_list = [lora1, lora2, lora3, lora4, lora5]
485
+ vae_msg = f"VAE: {vae_model}" if vae_model else ""
486
+ msg_lora = []
487
+
488
+ ## BEGIN MOD
489
+ prompt, neg_prompt = insert_model_recom_prompt(prompt, neg_prompt, model_name)
490
+ global lora_model_list
491
+ lora_model_list = get_lora_model_list()
492
+ ## END MOD
493
+
494
+ if model_name in model_list:
495
+ model_is_xl = "xl" in model_name.lower()
496
+ sdxl_in_vae = vae_model and "sdxl" in vae_model.lower()
497
+ model_type = "SDXL" if model_is_xl else "SD 1.5"
498
+ incompatible_vae = (model_is_xl and vae_model and not sdxl_in_vae) or (not model_is_xl and sdxl_in_vae)
499
+
500
+ if incompatible_vae:
501
+ msg_inc_vae = (
502
+ f"The selected VAE is for a { 'SD 1.5' if model_is_xl else 'SDXL' } model, but you"
503
+ f" are using a { model_type } model. The default VAE "
504
+ "will be used."
505
+ )
506
+ gr.Info(msg_inc_vae)
507
+ vae_msg = msg_inc_vae
508
+ vae_model = None
509
+
510
+ for la in loras_list:
511
+ if la is not None and la != "None" and la in lora_model_list:
512
+ print(la)
513
+ lora_type = ("animetarot" in la.lower() or "Hyper-SD15-8steps".lower() in la.lower())
514
+ if (model_is_xl and lora_type) or (not model_is_xl and not lora_type):
515
+ msg_inc_lora = f"The LoRA {la} is for { 'SD 1.5' if model_is_xl else 'SDXL' }, but you are using { model_type }."
516
+ gr.Info(msg_inc_lora)
517
+ msg_lora.append(msg_inc_lora)
518
+
519
+ task = task_stablepy[task]
520
+
521
+ params_ip_img = []
522
+ params_ip_msk = []
523
+ params_ip_model = []
524
+ params_ip_mode = []
525
+ params_ip_scale = []
526
+
527
+ all_adapters = [
528
+ (image_ip1, mask_ip1, model_ip1, mode_ip1, scale_ip1),
529
+ (image_ip2, mask_ip2, model_ip2, mode_ip2, scale_ip2),
530
+ ]
531
+
532
+ for imgip, mskip, modelip, modeip, scaleip in all_adapters:
533
+ if imgip:
534
+ params_ip_img.append(imgip)
535
+ if mskip:
536
+ params_ip_msk.append(mskip)
537
+ params_ip_model.append(modelip)
538
+ params_ip_mode.append(modeip)
539
+ params_ip_scale.append(scaleip)
540
+
541
+ # First load
542
+ model_precision = torch.float16
543
+ if not self.model:
544
+ from stablepy import Model_Diffusers
545
+
546
+ print("Loading model...")
547
+ self.model = Model_Diffusers(
548
+ base_model_id=model_name,
549
+ task_name=task,
550
+ vae_model=vae_model if vae_model != "None" else None,
551
+ type_model_precision=model_precision,
552
+ retain_task_model_in_cache=retain_task_cache_gui,
553
+ )
554
+
555
+ if task != "txt2img" and not image_control:
556
+ raise ValueError("No control image found: To use this function, you have to upload an image in 'Image ControlNet/Inpaint/Img2img'")
557
+
558
+ if task == "inpaint" and not image_mask:
559
+ raise ValueError("No mask image found: Specify one in 'Image Mask'")
560
+
561
+ if upscaler_model_path in [None, "Lanczos", "Nearest"]:
562
+ upscaler_model = upscaler_model_path
563
+ else:
564
+ directory_upscalers = 'upscalers'
565
+ os.makedirs(directory_upscalers, exist_ok=True)
566
+
567
+ url_upscaler = upscaler_dict_gui[upscaler_model_path]
568
+
569
+ if not os.path.exists(f"./upscalers/{url_upscaler.split('/')[-1]}"):
570
+ download_things(directory_upscalers, url_upscaler, hf_token)
571
+
572
+ upscaler_model = f"./upscalers/{url_upscaler.split('/')[-1]}"
573
+
574
+ logging.getLogger("ultralytics").setLevel(logging.INFO if adetailer_verbose else logging.ERROR)
575
+
576
+ print("Config model:", model_name, vae_model, loras_list)
577
+
578
+ self.model.load_pipe(
579
+ model_name,
580
+ task_name=task,
581
+ vae_model=vae_model if vae_model != "None" else None,
582
+ type_model_precision=model_precision,
583
+ retain_task_model_in_cache=retain_task_cache_gui,
584
+ )
585
+
586
+ ## BEGIN MOD
587
+ # if textual_inversion and self.model.class_name == "StableDiffusionXLPipeline":
588
+ # print("No Textual inversion for SDXL")
589
+ ## END MOD
590
+
591
+ adetailer_params_A = {
592
+ "face_detector_ad" : face_detector_ad_a,
593
+ "person_detector_ad" : person_detector_ad_a,
594
+ "hand_detector_ad" : hand_detector_ad_a,
595
+ "prompt": prompt_ad_a,
596
+ "negative_prompt" : negative_prompt_ad_a,
597
+ "strength" : strength_ad_a,
598
+ # "image_list_task" : None,
599
+ "mask_dilation" : mask_dilation_a,
600
+ "mask_blur" : mask_blur_a,
601
+ "mask_padding" : mask_padding_a,
602
+ "inpaint_only" : adetailer_inpaint_only,
603
+ "sampler" : adetailer_sampler,
604
+ }
605
+
606
+ adetailer_params_B = {
607
+ "face_detector_ad" : face_detector_ad_b,
608
+ "person_detector_ad" : person_detector_ad_b,
609
+ "hand_detector_ad" : hand_detector_ad_b,
610
+ "prompt": prompt_ad_b,
611
+ "negative_prompt" : negative_prompt_ad_b,
612
+ "strength" : strength_ad_b,
613
+ # "image_list_task" : None,
614
+ "mask_dilation" : mask_dilation_b,
615
+ "mask_blur" : mask_blur_b,
616
+ "mask_padding" : mask_padding_b,
617
+ }
618
+ pipe_params = {
619
+ "prompt": prompt,
620
+ "negative_prompt": neg_prompt,
621
+ "img_height": img_height,
622
+ "img_width": img_width,
623
+ "num_images": num_images,
624
+ "num_steps": steps,
625
+ "guidance_scale": cfg,
626
+ "clip_skip": clip_skip,
627
+ "seed": seed,
628
+ "image": image_control,
629
+ "preprocessor_name": preprocessor_name,
630
+ "preprocess_resolution": preprocess_resolution,
631
+ "image_resolution": image_resolution,
632
+ "style_prompt": style_prompt if style_prompt else "",
633
+ "style_json_file": "",
634
+ "image_mask": image_mask, # only for Inpaint
635
+ "strength": strength, # only for Inpaint or ...
636
+ "low_threshold": low_threshold,
637
+ "high_threshold": high_threshold,
638
+ "value_threshold": value_threshold,
639
+ "distance_threshold": distance_threshold,
640
+ "lora_A": lora1 if lora1 != "None" else None,
641
+ "lora_scale_A": lora_scale1,
642
+ "lora_B": lora2 if lora2 != "None" else None,
643
+ "lora_scale_B": lora_scale2,
644
+ "lora_C": lora3 if lora3 != "None" else None,
645
+ "lora_scale_C": lora_scale3,
646
+ "lora_D": lora4 if lora4 != "None" else None,
647
+ "lora_scale_D": lora_scale4,
648
+ "lora_E": lora5 if lora5 != "None" else None,
649
+ "lora_scale_E": lora_scale5,
650
+ ## BEGIN MOD
651
+ "textual_inversion": get_embed_list(self.model.class_name) if textual_inversion else [],
652
+ ## END MOD
653
+ "syntax_weights": syntax_weights, # "Classic"
654
+ "sampler": sampler,
655
+ "xformers_memory_efficient_attention": xformers_memory_efficient_attention,
656
+ "gui_active": True,
657
+ "loop_generation": loop_generation,
658
+ "controlnet_conditioning_scale": float(controlnet_output_scaling_in_unet),
659
+ "control_guidance_start": float(controlnet_start_threshold),
660
+ "control_guidance_end": float(controlnet_stop_threshold),
661
+ "generator_in_cpu": generator_in_cpu,
662
+ "FreeU": freeu,
663
+ "adetailer_A": adetailer_active_a,
664
+ "adetailer_A_params": adetailer_params_A,
665
+ "adetailer_B": adetailer_active_b,
666
+ "adetailer_B_params": adetailer_params_B,
667
+ "leave_progress_bar": leave_progress_bar,
668
+ "disable_progress_bar": disable_progress_bar,
669
+ "image_previews": image_previews,
670
+ "display_images": display_images,
671
+ "save_generated_images": save_generated_images,
672
+ "image_storage_location": image_storage_location,
673
+ "retain_compel_previous_load": retain_compel_previous_load,
674
+ "retain_detailfix_model_previous_load": retain_detailfix_model_previous_load,
675
+ "retain_hires_model_previous_load": retain_hires_model_previous_load,
676
+ "t2i_adapter_preprocessor": t2i_adapter_preprocessor,
677
+ "t2i_adapter_conditioning_scale": float(t2i_adapter_conditioning_scale),
678
+ "t2i_adapter_conditioning_factor": float(t2i_adapter_conditioning_factor),
679
+ "upscaler_model_path": upscaler_model,
680
+ "upscaler_increases_size": upscaler_increases_size,
681
+ "esrgan_tile": esrgan_tile,
682
+ "esrgan_tile_overlap": esrgan_tile_overlap,
683
+ "hires_steps": hires_steps,
684
+ "hires_denoising_strength": hires_denoising_strength,
685
+ "hires_prompt": hires_prompt,
686
+ "hires_negative_prompt": hires_negative_prompt,
687
+ "hires_sampler": hires_sampler,
688
+ "hires_before_adetailer": hires_before_adetailer,
689
+ "hires_after_adetailer": hires_after_adetailer,
690
+ "ip_adapter_image": params_ip_img,
691
+ "ip_adapter_mask": params_ip_msk,
692
+ "ip_adapter_model": params_ip_model,
693
+ "ip_adapter_mode": params_ip_mode,
694
+ "ip_adapter_scale": params_ip_scale,
695
+ }
696
+
697
+ # Maybe fix lora issue: 'Cannot copy out of meta tensor; no data!''
698
+ self.model.pipe.to("cuda:0" if torch.cuda.is_available() else "cpu")
699
+
700
+ progress(1, desc="Inference preparation completed. Starting inference...")
701
+
702
+ info_state = f"PROCESSING "
703
+ info_state += ">"
704
+ info_state = f"COMPLETED. Seeds: {str(seed)}"
705
+ if vae_msg:
706
+ info_state = info_state + "<br>" + vae_msg
707
+ if msg_lora:
708
+ info_state = info_state + "<br>" + "<br>".join(msg_lora)
709
+ return self.infer_short(self.model, pipe_params), info_state
710
+ ## END MOD
711
+
712
+
713
+ from pathlib import Path
714
+ from modutils import (
715
+ safe_float,
716
+ escape_lora_basename,
717
+ to_lora_key,
718
+ to_lora_path,
719
+ get_local_model_list,
720
+ get_private_lora_model_lists,
721
+ get_valid_lora_name,
722
+ get_valid_lora_path,
723
+ get_valid_lora_wt,
724
+ get_lora_info,
725
+ normalize_prompt_list,
726
+ get_civitai_info,
727
+ search_lora_on_civitai,
728
+ )
729
+
730
+ sd_gen = GuiSD()
731
+ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
732
+ model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
733
+ lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
734
+ sampler = "Euler a", vae = None, progress=gr.Progress(track_tqdm=True)):
735
+ import PIL
736
+ import numpy as np
737
+ MAX_SEED = np.iinfo(np.int32).max
738
+
739
+ images: list[tuple[PIL.Image.Image, str | None]] = []
740
+ info: str = ""
741
+ progress(0, desc="Preparing...")
742
+
743
+ if randomize_seed:
744
+ seed = random.randint(0, MAX_SEED)
745
+
746
+ generator = torch.Generator().manual_seed(seed).seed()
747
+
748
+ prompt, negative_prompt = insert_model_recom_prompt(prompt, negative_prompt, model_name)
749
+ progress(0.5, desc="Preparing...")
750
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt = \
751
+ set_prompt_loras(prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt)
752
+ lora1 = get_valid_lora_path(lora1)
753
+ lora2 = get_valid_lora_path(lora2)
754
+ lora3 = get_valid_lora_path(lora3)
755
+ lora4 = get_valid_lora_path(lora4)
756
+ lora5 = get_valid_lora_path(lora5)
757
+ progress(1, desc="Preparation completed. Starting inference preparation...")
758
+
759
+ sd_gen.load_new_model(model_name, vae, task_model_list[0])
760
+ images, info = sd_gen.generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
761
+ guidance_scale, True, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
762
+ lora4, lora4_wt, lora5, lora5_wt, sampler,
763
+ height, width, model_name, vae, task_model_list[0], None, "Canny", 512, 1024,
764
+ None, None, None, 0.35, 100, 200, 0.1, 0.1, 1.0, 0., 1., False, "Classic", None,
765
+ 1.0, 100, 10, 30, 0.55, "Use same sampler", "", "",
766
+ False, True, 1, True, False, False, False, False, "./images", False, False, False, True, 1, 0.55,
767
+ False, False, False, True, False, "Use same sampler", False, "", "", 0.35, True, True, False, 4, 4, 32,
768
+ False, "", "", 0.35, True, True, False, 4, 4, 32,
769
+ True, None, None, "plus_face", "original", 0.7, None, None, "base", "style", 0.7
770
+ )
771
+
772
+ progress(1, desc="Inference completed.")
773
+ output_image = images[0][0] if images else None
774
+
775
+ return output_image
776
+
777
+
778
+ def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
779
+ model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
780
+ lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
781
+ sampler = "Euler a", vae = None, progress=gr.Progress(track_tqdm=True)):
782
+ return gr.update(visible=True)
783
+
784
+
785
+ def pass_result(result):
786
+ return result
787
+
788
+
789
+ def get_samplers():
790
+ return scheduler_names
791
+
792
+
793
+ def get_vaes():
794
+ return vae_model_list
795
+
796
+
797
+ show_diffusers_model_list_detail = False
798
+ cached_diffusers_model_tupled_list = get_tupled_model_list(load_diffusers_format_model)
799
+ def get_diffusers_model_list():
800
+ if show_diffusers_model_list_detail:
801
+ return cached_diffusers_model_tupled_list
802
+ else:
803
+ return load_diffusers_format_model
804
+
805
+
806
+ def enable_diffusers_model_detail(is_enable: bool = False, model_name: str = ""):
807
+ global show_diffusers_model_list_detail
808
+ show_diffusers_model_list_detail = is_enable
809
+ new_value = model_name
810
+ index = 0
811
+ if model_name in set(load_diffusers_format_model):
812
+ index = load_diffusers_format_model.index(model_name)
813
+ if is_enable:
814
+ new_value = cached_diffusers_model_tupled_list[index][1]
815
+ else:
816
+ new_value = load_diffusers_format_model[index]
817
+ return gr.update(value=is_enable), gr.update(value=new_value, choices=get_diffusers_model_list())
818
+
819
+
820
+ def get_t2i_model_info(repo_id: str):
821
+ from huggingface_hub import HfApi
822
+ api = HfApi()
823
+ try:
824
+ if " " in repo_id or not api.repo_exists(repo_id): return ""
825
+ model = api.model_info(repo_id=repo_id)
826
+ except Exception as e:
827
+ print(f"Error: Failed to get {repo_id}'s info. ")
828
+ return ""
829
+ if model.private or model.gated: return ""
830
+ tags = model.tags
831
+ info = []
832
+ url = f"https://huggingface.co/{repo_id}/"
833
+ if not 'diffusers' in tags: return ""
834
+ if 'diffusers:StableDiffusionXLPipeline' in tags:
835
+ info.append("SDXL")
836
+ elif 'diffusers:StableDiffusionPipeline' in tags:
837
+ info.append("SD1.5")
838
+ if model.card_data and model.card_data.tags:
839
+ info.extend(list_sub(model.card_data.tags, ['text-to-image', 'stable-diffusion', 'stable-diffusion-api', 'safetensors', 'stable-diffusion-xl']))
840
+ info.append(f"DLs: {model.downloads}")
841
+ info.append(f"likes: {model.likes}")
842
+ info.append(model.last_modified.strftime("lastmod: %Y-%m-%d"))
843
+ md = f"Model Info: {', '.join(info)}, [Model Repo]({url})"
844
+ return gr.update(value=md)
845
+
846
+
847
+ def load_model_prompt_dict():
848
+ import json
849
+ dict = {}
850
+ try:
851
+ with open('model_dict.json', encoding='utf-8') as f:
852
+ dict = json.load(f)
853
+ except Exception:
854
+ pass
855
+ return dict
856
+
857
+
858
+ model_prompt_dict = load_model_prompt_dict()
859
+
860
+
861
+ model_recom_prompt_enabled = True
862
+ animagine_ps = to_list("masterpiece, best quality, very aesthetic, absurdres")
863
+ animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
864
+ pony_ps = to_list("score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
865
+ pony_nps = to_list("source_pony, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
866
+ other_ps = to_list("anime artwork, anime style, studio anime, highly detailed, cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed")
867
+ other_nps = to_list("photo, deformed, black and white, realism, disfigured, low contrast, drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly")
868
+ default_ps = to_list("highly detailed, masterpiece, best quality, very aesthetic, absurdres")
869
+ default_nps = to_list("score_6, score_5, score_4, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
870
+ def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name: str = "None"):
871
+ if not model_recom_prompt_enabled or not model_name: return prompt, neg_prompt
872
+ prompts = to_list(prompt)
873
+ neg_prompts = to_list(neg_prompt)
874
+ prompts = list_sub(prompts, animagine_ps + pony_ps + other_ps)
875
+ neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps + other_nps)
876
+ last_empty_p = [""] if not prompts and type != "None" else []
877
+ last_empty_np = [""] if not neg_prompts and type != "None" else []
878
+ ps = []
879
+ nps = []
880
+ if model_name in model_prompt_dict.keys():
881
+ ps = to_list(model_prompt_dict[model_name]["prompt"])
882
+ nps = to_list(model_prompt_dict[model_name]["negative_prompt"])
883
+ else:
884
+ ps = default_ps
885
+ nps = default_nps
886
+ prompts = prompts + ps
887
+ neg_prompts = neg_prompts + nps
888
+ prompt = ", ".join(list_uniq(prompts) + last_empty_p)
889
+ neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
890
+ return prompt, neg_prompt
891
+
892
+
893
+ def enable_model_recom_prompt(is_enable: bool = True):
894
+ global model_recom_prompt_enabled
895
+ model_recom_prompt_enabled = is_enable
896
+ return is_enable
897
+
898
+
899
+ private_lora_dict = {}
900
+ try:
901
+ with open('lora_dict.json', encoding='utf-8') as f:
902
+ d = json.load(f)
903
+ for k, v in d.items():
904
+ private_lora_dict[escape_lora_basename(k)] = v
905
+ except Exception:
906
+ pass
907
+
908
+
909
+ private_lora_model_list = get_private_lora_model_lists()
910
+ loras_dict = {"None": ["", "", "", "", ""], "": ["", "", "", "", ""]} | private_lora_dict.copy()
911
+ loras_url_to_path_dict = {} # {"URL to download": "local filepath", ...}
912
+ civitai_lora_last_results = {} # {"URL to download": {search results}, ...}
913
+ all_lora_list = []
914
+
915
+
916
+ def get_all_lora_list():
917
+ global all_lora_list
918
+ loras = get_lora_model_list()
919
+ all_lora_list = loras.copy()
920
+ return loras
921
+
922
+
923
+ def get_all_lora_tupled_list():
924
+ global loras_dict
925
+ models = get_all_lora_list()
926
+ if not models: return []
927
+ tupled_list = []
928
+ for model in models:
929
+ #if not model: continue # to avoid GUI-related bug
930
+ basename = Path(model).stem
931
+ key = to_lora_key(model)
932
+ items = None
933
+ if key in loras_dict.keys():
934
+ items = loras_dict.get(key, None)
935
+ else:
936
+ items = get_civitai_info(model)
937
+ if items != None:
938
+ loras_dict[key] = items
939
+ name = basename
940
+ value = model
941
+ if items and items[2] != "":
942
+ if items[1] == "Pony":
943
+ name = f"{basename} (for {items[1]}🐴, {items[2]})"
944
+ else:
945
+ name = f"{basename} (for {items[1]}, {items[2]})"
946
+ tupled_list.append((name, value))
947
+ return tupled_list
948
+
949
+
950
+ def update_lora_dict(path: str):
951
+ global loras_dict
952
+ key = to_lora_key(path)
953
+ if key in loras_dict.keys(): return
954
+ items = get_civitai_info(path)
955
+ if items == None: return
956
+ loras_dict[key] = items
957
+
958
+
959
+ def download_lora(dl_urls: str):
960
+ global loras_url_to_path_dict
961
+ dl_path = ""
962
+ before = get_local_model_list(directory_loras)
963
+ urls = []
964
+ for url in [url.strip() for url in dl_urls.split(',')]:
965
+ local_path = f"{directory_loras}/{url.split('/')[-1]}"
966
+ if not Path(local_path).exists():
967
+ download_things(directory_loras, url, hf_token, CIVITAI_API_KEY)
968
+ urls.append(url)
969
+ after = get_local_model_list(directory_loras)
970
+ new_files = list_sub(after, before)
971
+ i = 0
972
+ for file in new_files:
973
+ path = Path(file)
974
+ if path.exists():
975
+ new_path = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
976
+ path.resolve().rename(new_path.resolve())
977
+ loras_url_to_path_dict[urls[i]] = str(new_path)
978
+ update_lora_dict(str(new_path))
979
+ dl_path = str(new_path)
980
+ i += 1
981
+ return dl_path
982
+
983
+
984
+ def copy_lora(path: str, new_path: str):
985
+ import shutil
986
+ if path == new_path: return new_path
987
+ cpath = Path(path)
988
+ npath = Path(new_path)
989
+ if cpath.exists():
990
+ try:
991
+ shutil.copy(str(cpath.resolve()), str(npath.resolve()))
992
+ except Exception:
993
+ return None
994
+ update_lora_dict(str(npath))
995
+ return new_path
996
+ else:
997
+ return None
998
+
999
+
1000
+ def download_my_lora(dl_urls: str, lora1: str, lora2: str, lora3: str, lora4: str, lora5: str):
1001
+ path = download_lora(dl_urls)
1002
+ if path:
1003
+ if not lora1 or lora1 == "None":
1004
+ lora1 = path
1005
+ elif not lora2 or lora2 == "None":
1006
+ lora2 = path
1007
+ elif not lora3 or lora3 == "None":
1008
+ lora3 = path
1009
+ elif not lora4 or lora4 == "None":
1010
+ lora4 = path
1011
+ elif not lora5 or lora5 == "None":
1012
+ lora5 = path
1013
+ choices = get_all_lora_tupled_list()
1014
+ return gr.update(value=lora1, choices=choices), gr.update(value=lora2, choices=choices), gr.update(value=lora3, choices=choices),\
1015
+ gr.update(value=lora4, choices=choices), gr.update(value=lora5, choices=choices)
1016
+
1017
+
1018
+ def set_prompt_loras(prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
1019
+ import re
1020
+ lora1 = get_valid_lora_name(lora1)
1021
+ lora2 = get_valid_lora_name(lora2)
1022
+ lora3 = get_valid_lora_name(lora3)
1023
+ lora4 = get_valid_lora_name(lora4)
1024
+ lora5 = get_valid_lora_name(lora5)
1025
+ if not "<lora" in prompt: return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
1026
+ lora1_wt = get_valid_lora_wt(prompt, lora1, lora1_wt)
1027
+ lora2_wt = get_valid_lora_wt(prompt, lora2, lora2_wt)
1028
+ lora3_wt = get_valid_lora_wt(prompt, lora3, lora3_wt)
1029
+ lora4_wt = get_valid_lora_wt(prompt, lora4, lora4_wt)
1030
+ lora5_wt = get_valid_lora_wt(prompt, lora5, lora5_wt)
1031
+ on1, label1, tag1, md1 = get_lora_info(lora1)
1032
+ on2, label2, tag2, md2 = get_lora_info(lora2)
1033
+ on3, label3, tag3, md3 = get_lora_info(lora3)
1034
+ on4, label4, tag4, md4 = get_lora_info(lora4)
1035
+ on5, label5, tag5, md5 = get_lora_info(lora5)
1036
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
1037
+ prompts = prompt.split(",") if prompt else []
1038
+ for p in prompts:
1039
+ p = str(p).strip()
1040
+ if "<lora" in p:
1041
+ result = re.findall(r'<lora:(.+?):(.+?)>', p)
1042
+ if not result: continue
1043
+ key = result[0][0]
1044
+ wt = result[0][1]
1045
+ path = to_lora_path(key)
1046
+ if not key in loras_dict.keys() or not path:
1047
+ path = get_valid_lora_name(path)
1048
+ if not path or path == "None": continue
1049
+ if path in lora_paths:
1050
+ continue
1051
+ elif not on1:
1052
+ lora1 = path
1053
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
1054
+ lora1_wt = safe_float(wt)
1055
+ on1 = True
1056
+ elif not on2:
1057
+ lora2 = path
1058
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
1059
+ lora2_wt = safe_float(wt)
1060
+ on2 = True
1061
+ elif not on3:
1062
+ lora3 = path
1063
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
1064
+ lora3_wt = safe_float(wt)
1065
+ on3 = True
1066
+ elif not on4:
1067
+ lora4 = path
1068
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
1069
+ lora4_wt = safe_float(wt)
1070
+ on4, label4, tag4, md4 = get_lora_info(lora4)
1071
+ elif not on5:
1072
+ lora5 = path
1073
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
1074
+ lora5_wt = safe_float(wt)
1075
+ on5 = True
1076
+ return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
1077
+
1078
+
1079
+ def apply_lora_prompt(prompt: str, lora_info: str):
1080
+ if lora_info == "None": return gr.update(value=prompt)
1081
+ tags = prompt.split(",") if prompt else []
1082
+ prompts = normalize_prompt_list(tags)
1083
+ lora_tag = lora_info.replace("/",",")
1084
+ lora_tags = lora_tag.split(",") if str(lora_info) != "None" else []
1085
+ lora_prompts = normalize_prompt_list(lora_tags)
1086
+ empty = [""]
1087
+ prompt = ", ".join(list_uniq(prompts + lora_prompts) + empty)
1088
+ return gr.update(value=prompt)
1089
+
1090
+
1091
+ def update_loras(prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
1092
+ import re
1093
+ on1, label1, tag1, md1 = get_lora_info(lora1)
1094
+ on2, label2, tag2, md2 = get_lora_info(lora2)
1095
+ on3, label3, tag3, md3 = get_lora_info(lora3)
1096
+ on4, label4, tag4, md4 = get_lora_info(lora4)
1097
+ on5, label5, tag5, md5 = get_lora_info(lora5)
1098
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
1099
+ prompts = prompt.split(",") if prompt else []
1100
+ output_prompts = []
1101
+ for p in prompts:
1102
+ p = str(p).strip()
1103
+ if "<lora" in p:
1104
+ result = re.findall(r'<lora:(.+?):(.+?)>', p)
1105
+ if not result: continue
1106
+ key = result[0][0]
1107
+ wt = result[0][1]
1108
+ path = to_lora_path(key)
1109
+ if not key in loras_dict.keys() or not path: continue
1110
+ if path in lora_paths:
1111
+ output_prompts.append(f"<lora:{to_lora_key(path)}:{safe_float(wt):.2f}>")
1112
+ elif p:
1113
+ output_prompts.append(p)
1114
+ lora_prompts = []
1115
+ if on1: lora_prompts.append(f"<lora:{to_lora_key(lora1)}:{lora1_wt:.2f}>")
1116
+ if on2: lora_prompts.append(f"<lora:{to_lora_key(lora2)}:{lora2_wt:.2f}>")
1117
+ if on3: lora_prompts.append(f"<lora:{to_lora_key(lora3)}:{lora3_wt:.2f}>")
1118
+ if on4: lora_prompts.append(f"<lora:{to_lora_key(lora4)}:{lora4_wt:.2f}>")
1119
+ if on5: lora_prompts.append(f"<lora:{to_lora_key(lora5)}:{lora5_wt:.2f}>")
1120
+ output_prompt = ", ".join(list_uniq(output_prompts + lora_prompts + [""]))
1121
+ choices = get_all_lora_tupled_list()
1122
+ return gr.update(value=output_prompt), gr.update(value=lora1, choices=choices), gr.update(value=lora1_wt),\
1123
+ gr.update(value=tag1, label=label1, visible=on1), gr.update(visible=on1), gr.update(value=md1, visible=on1),\
1124
+ gr.update(value=lora2, choices=choices), gr.update(value=lora2_wt),\
1125
+ gr.update(value=tag2, label=label2, visible=on2), gr.update(visible=on2), gr.update(value=md2, visible=on2),\
1126
+ gr.update(value=lora3, choices=choices), gr.update(value=lora3_wt),\
1127
+ gr.update(value=tag3, label=label3, visible=on3), gr.update(visible=on3), gr.update(value=md3, visible=on3),\
1128
+ gr.update(value=lora4, choices=choices), gr.update(value=lora4_wt),\
1129
+ gr.update(value=tag4, label=label4, visible=on4), gr.update(visible=on4), gr.update(value=md4, visible=on4),\
1130
+ gr.update(value=lora5, choices=choices), gr.update(value=lora5_wt),\
1131
+ gr.update(value=tag5, label=label5, visible=on5), gr.update(visible=on5), gr.update(value=md5, visible=on5)
1132
+
1133
+
1134
+ def search_civitai_lora(query, base_model):
1135
+ global civitai_lora_last_results
1136
+ items = search_lora_on_civitai(query, base_model)
1137
+ if not items: return gr.update(choices=[("", "")], value="", visible=False),\
1138
+ gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True)
1139
+ civitai_lora_last_results = {}
1140
+ choices = []
1141
+ for item in items:
1142
+ base_model_name = "Pony🐴" if item['base_model'] == "Pony" else item['base_model']
1143
+ name = f"{item['name']} (for {base_model_name} / By: {item['creator']} / Tags: {', '.join(item['tags'])})"
1144
+ value = item['dl_url']
1145
+ choices.append((name, value))
1146
+ civitai_lora_last_results[value] = item
1147
+ if not choices: return gr.update(choices=[("", "")], value="", visible=False),\
1148
+ gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True)
1149
+ result = civitai_lora_last_results.get(choices[0][1], "None")
1150
+ md = result['md'] if result else ""
1151
+ return gr.update(choices=choices, value=choices[0][1], visible=True), gr.update(value=md, visible=True),\
1152
+ gr.update(visible=True), gr.update(visible=True)
1153
+
1154
+
1155
+ def select_civitai_lora(search_result):
1156
+ if not "http" in search_result: return gr.update(value=""), gr.update(value="None", visible=True)
1157
+ result = civitai_lora_last_results.get(search_result, "None")
1158
+ md = result['md'] if result else ""
1159
+ return gr.update(value=search_result), gr.update(value=md, visible=True)
1160
+
1161
+
1162
+ def search_civitai_lora_json(query, base_model):
1163
+ results = {}
1164
+ items = search_lora_on_civitai(query, base_model)
1165
+ if not items: return gr.update(value=results)
1166
+ for item in items:
1167
+ results[item['dl_url']] = item
1168
+ return gr.update(value=results)
1169
+
1170
+
1171
+ quality_prompt_list = [
1172
+ {
1173
+ "name": "None",
1174
+ "prompt": "",
1175
+ "negative_prompt": "lowres",
1176
+ },
1177
+ {
1178
+ "name": "Animagine Common",
1179
+ "prompt": "anime artwork, anime style, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres",
1180
+ "negative_prompt": "lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
1181
+ },
1182
+ {
1183
+ "name": "Pony Anime Common",
1184
+ "prompt": "source_anime, score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres",
1185
+ "negative_prompt": "source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends",
1186
+ },
1187
+ {
1188
+ "name": "Pony Common",
1189
+ "prompt": "source_anime, score_9, score_8_up, score_7_up",
1190
+ "negative_prompt": "source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends",
1191
+ },
1192
+ {
1193
+ "name": "Animagine Standard v3.0",
1194
+ "prompt": "masterpiece, best quality",
1195
+ "negative_prompt": "lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name",
1196
+ },
1197
+ {
1198
+ "name": "Animagine Standard v3.1",
1199
+ "prompt": "masterpiece, best quality, very aesthetic, absurdres",
1200
+ "negative_prompt": "lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
1201
+ },
1202
+ {
1203
+ "name": "Animagine Light v3.1",
1204
+ "prompt": "(masterpiece), best quality, very aesthetic, perfect face",
1205
+ "negative_prompt": "(low quality, worst quality:1.2), very displeasing, 3d, watermark, signature, ugly, poorly drawn",
1206
+ },
1207
+ {
1208
+ "name": "Animagine Heavy v3.1",
1209
+ "prompt": "(masterpiece), (best quality), (ultra-detailed), very aesthetic, illustration, disheveled hair, perfect composition, moist skin, intricate details",
1210
+ "negative_prompt": "longbody, lowres, bad anatomy, bad hands, missing fingers, pubic hair, extra digit, fewer digits, cropped, worst quality, low quality, very displeasing",
1211
+ },
1212
+ ]
1213
+
1214
+
1215
+ style_list = [
1216
+ {
1217
+ "name": "None",
1218
+ "prompt": "",
1219
+ "negative_prompt": "",
1220
+ },
1221
+ {
1222
+ "name": "Cinematic",
1223
+ "prompt": "cinematic still, emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
1224
+ "negative_prompt": "cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
1225
+ },
1226
+ {
1227
+ "name": "Photographic",
1228
+ "prompt": "cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed",
1229
+ "negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly",
1230
+ },
1231
+ {
1232
+ "name": "Anime",
1233
+ "prompt": "anime artwork, anime style, vibrant, studio anime, highly detailed",
1234
+ "negative_prompt": "photo, deformed, black and white, realism, disfigured, low contrast",
1235
+ },
1236
+ {
1237
+ "name": "Manga",
1238
+ "prompt": "manga style, vibrant, high-energy, detailed, iconic, Japanese comic style",
1239
+ "negative_prompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, Western comic style",
1240
+ },
1241
+ {
1242
+ "name": "Digital Art",
1243
+ "prompt": "concept art, digital artwork, illustrative, painterly, matte painting, highly detailed",
1244
+ "negative_prompt": "photo, photorealistic, realism, ugly",
1245
+ },
1246
+ {
1247
+ "name": "Pixel art",
1248
+ "prompt": "pixel-art, low-res, blocky, pixel art style, 8-bit graphics",
1249
+ "negative_prompt": "sloppy, messy, blurry, noisy, highly detailed, ultra textured, photo, realistic",
1250
+ },
1251
+ {
1252
+ "name": "Fantasy art",
1253
+ "prompt": "ethereal fantasy concept art, magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy",
1254
+ "negative_prompt": "photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white",
1255
+ },
1256
+ {
1257
+ "name": "Neonpunk",
1258
+ "prompt": "neonpunk style, cyberpunk, vaporwave, neon, vibes, vibrant, stunningly beautiful, crisp, detailed, sleek, ultramodern, magenta highlights, dark purple shadows, high contrast, cinematic, ultra detailed, intricate, professional",
1259
+ "negative_prompt": "painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured",
1260
+ },
1261
+ {
1262
+ "name": "3D Model",
1263
+ "prompt": "professional 3d model, octane render, highly detailed, volumetric, dramatic lighting",
1264
+ "negative_prompt": "ugly, deformed, noisy, low poly, blurry, painting",
1265
+ },
1266
+ ]
1267
+
1268
+
1269
+ preset_styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
1270
+ preset_quality = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in quality_prompt_list}
1271
+
1272
+
1273
+ def process_style_prompt(prompt: str, neg_prompt: str, styles_key: str = "None", quality_key: str = "None"):
1274
+ def to_list(s):
1275
+ return [x.strip() for x in s.split(",") if not s == ""]
1276
+
1277
+ def list_sub(a, b):
1278
+ return [e for e in a if e not in b]
1279
+
1280
+ def list_uniq(l):
1281
+ return sorted(set(l), key=l.index)
1282
+
1283
+ animagine_ps = to_list("anime artwork, anime style, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres")
1284
+ animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
1285
+ pony_ps = to_list("source_anime, score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
1286
+ pony_nps = to_list("source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
1287
+ prompts = to_list(prompt)
1288
+ neg_prompts = to_list(neg_prompt)
1289
+
1290
+ all_styles_ps = []
1291
+ all_styles_nps = []
1292
+ for d in style_list:
1293
+ all_styles_ps.extend(to_list(str(d.get("prompt", ""))))
1294
+ all_styles_nps.extend(to_list(str(d.get("negative_prompt", ""))))
1295
+
1296
+ all_quality_ps = []
1297
+ all_quality_nps = []
1298
+ for d in quality_prompt_list:
1299
+ all_quality_ps.extend(to_list(str(d.get("prompt", ""))))
1300
+ all_quality_nps.extend(to_list(str(d.get("negative_prompt", ""))))
1301
+
1302
+ quality_ps = to_list(preset_quality[quality_key][0])
1303
+ quality_nps = to_list(preset_quality[quality_key][1])
1304
+ styles_ps = to_list(preset_styles[styles_key][0])
1305
+ styles_nps = to_list(preset_styles[styles_key][1])
1306
+
1307
+ prompts = list_sub(prompts, animagine_ps + pony_ps + all_styles_ps + all_quality_ps)
1308
+ neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps + all_styles_nps + all_quality_nps)
1309
+
1310
+ last_empty_p = [""] if not prompts and type != "None" and type != "Auto" and styles_key != "None" and quality_key != "None" else []
1311
+ last_empty_np = [""] if not neg_prompts and type != "None" and type != "Auto" and styles_key != "None" and quality_key != "None" else []
1312
+
1313
+ if type == "Animagine":
1314
+ prompts = prompts + animagine_ps
1315
+ neg_prompts = neg_prompts + animagine_nps
1316
+ elif type == "Pony":
1317
+ prompts = prompts + pony_ps
1318
+ neg_prompts = neg_prompts + pony_nps
1319
+
1320
+ prompts = prompts + styles_ps + quality_ps
1321
+ neg_prompts = neg_prompts + styles_nps + quality_nps
1322
+
1323
+ prompt = ", ".join(list_uniq(prompts) + last_empty_p)
1324
+ neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
1325
+
1326
+ return gr.update(value=prompt), gr.update(value=neg_prompt)
1327
+
1328
+
env.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
4
+ hf_token = os.environ.get("HF_TOKEN")
5
+ hf_read_token = os.environ.get('HF_READ_TOKEN') # only use for private repo
6
+
7
+ # - **List Models**
8
+ load_diffusers_format_model = [
9
+ 'votepurchase/animagine-xl-3.1',
10
+ 'votepurchase/NSFW-GEN-ANIME-v2',
11
+ 'votepurchase/kivotos-xl-2.0',
12
+ 'votepurchase/holodayo-xl-2.1',
13
+ 'votepurchase/ponyDiffusionV6XL',
14
+ 'votepurchase/AnythingXL_xl',
15
+ 'votepurchase/7thAnimeXLPonyA_v10',
16
+ 'votepurchase/ChilloutMix',
17
+ 'votepurchase/NovelAIRemix',
18
+ 'votepurchase/NSFW-gen-v2',
19
+ 'votepurchase/PerfectDeliberate-Anime_v2',
20
+ 'votepurchase/realpony-xl',
21
+ 'votepurchase/artiwaifu-diffusion-1.0',
22
+ 'votepurchase/Starry-XL-v5.2',
23
+ 'votepurchase/Yaki-Dofu-Mix',
24
+ 'votepurchase/ebara-pony-v1-sdxl',
25
+ 'votepurchase/waiANIMIXPONYXL_v10',
26
+ 'votepurchase/counterfeitV30_v30',
27
+ 'votepurchase/ebara-pony',
28
+ 'votepurchase/Realistic_Vision_V1.4',
29
+ 'votepurchase/pony',
30
+ 'votepurchase/ponymatureSDXL_ponyeclipse10',
31
+ 'votepurchase/waiREALMIX_v70',
32
+ 'votepurchase/waiREALCN_v10',
33
+ 'votepurchase/PVCStyleModelMovable_pony151',
34
+ 'votepurchase/PVCStyleModelMovable_beta27Realistic',
35
+ 'votepurchase/PVCStyleModelFantasy_beta12',
36
+ 'votepurchase/pvcxl-v1-lora',
37
+ 'votepurchase/Realistic_Vision_V2.0',
38
+ 'votepurchase/RealVisXL_V4.0',
39
+ 'votepurchase/juggernautXL_hyper_8step_sfw',
40
+ 'votepurchase/ponyRealism_v21MainVAE',
41
+ 'stabilityai/stable-diffusion-xl-base-1.0',
42
+ 'cagliostrolab/animagine-xl-3.1',
43
+ 'misri/epicrealismXL_v7FinalDestination',
44
+ 'misri/juggernautXL_juggernautX',
45
+ 'misri/zavychromaxl_v80',
46
+ 'SG161222/RealVisXL_V4.0',
47
+ 'misri/newrealityxlAllInOne_Newreality40',
48
+ 'eienmojiki/Anything-XL',
49
+ 'eienmojiki/Starry-XL-v5.2',
50
+ 'gsdf/CounterfeitXL',
51
+ 'kitty7779/ponyDiffusionV6XL',
52
+ 'yodayo-ai/clandestine-xl-1.0',
53
+ 'yodayo-ai/kivotos-xl-2.0',
54
+ 'yodayo-ai/holodayo-xl-2.1',
55
+ 'digiplay/majicMIX_sombre_v2',
56
+ 'digiplay/majicMIX_realistic_v6',
57
+ 'digiplay/majicMIX_realistic_v7',
58
+ 'digiplay/DreamShaper_8',
59
+ 'digiplay/BeautifulArt_v1',
60
+ 'digiplay/DarkSushi2.5D_v1',
61
+ 'digiplay/darkphoenix3D_v1.1',
62
+ 'digiplay/BeenYouLiteL11_diffusers',
63
+ 'rubbrband/revAnimated_v2Rebirth',
64
+ 'youknownothing/cyberrealistic_v50',
65
+ 'votepurchase/counterfeitV30_v30',
66
+ 'Meina/MeinaMix_V11',
67
+ 'Meina/MeinaUnreal_V5',
68
+ 'Meina/MeinaPastel_V7',
69
+ 'rubbrband/realcartoon3d_v16',
70
+ 'rubbrband/realcartoonRealistic_v14',
71
+ 'KBlueLeaf/Kohaku-XL-Epsilon-rev2',
72
+ 'KBlueLeaf/Kohaku-XL-Epsilon-rev3',
73
+ 'KBlueLeaf/Kohaku-XL-Zeta',
74
+ 'kayfahaarukku/UrangDiffusion-1.2',
75
+ 'Eugeoter/artiwaifu-diffusion-1.0',
76
+ 'Raelina/Rae-Diffusion-XL-V2',
77
+ 'Raelina/Raemu-XL-V4',
78
+ ]
79
+
80
+ # List all Models for specified user
81
+ HF_MODEL_USER_LIKES = ["votepurchase"] # sorted by number of likes
82
+ HF_MODEL_USER_EX = ["John6666"] # sorted by a special rule
83
+
84
+
85
+ # - **Download Models**
86
+ download_model_list = [
87
+ ]
88
+
89
+ # - **Download VAEs**
90
+ download_vae_list = [
91
+ 'https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/resolve/main/sdxl.vae.safetensors?download=true',
92
+ 'https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/resolve/main/sdxl_vae-fp16fix-c-1.1-b-0.5.safetensors?download=true',
93
+ "https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/blob/main/sdxl_vae-fp16fix-blessed.safetensors",
94
+ "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt",
95
+ "https://huggingface.co/stabilityai/sd-vae-ft-ema-original/resolve/main/vae-ft-ema-560000-ema-pruned.ckpt",
96
+ ]
97
+
98
+ # - **Download LoRAs**
99
+ download_lora_list = [
100
+ ]
101
+
102
+ # Download Embeddings
103
+ download_embeds = [
104
+ 'https://huggingface.co/datasets/Nerfgun3/bad_prompt/blob/main/bad_prompt_version2.pt',
105
+ 'https://huggingface.co/embed/negative/resolve/main/EasyNegativeV2.safetensors',
106
+ 'https://huggingface.co/embed/negative/resolve/main/bad-hands-5.pt',
107
+ ]
108
+
109
+ directory_models = 'models'
110
+ os.makedirs(directory_models, exist_ok=True)
111
+ directory_loras = 'loras'
112
+ os.makedirs(directory_loras, exist_ok=True)
113
+ directory_vaes = 'vaes'
114
+ os.makedirs(directory_vaes, exist_ok=True)
115
+ directory_embeds = 'embedings'
116
+ os.makedirs(directory_embeds, exist_ok=True)
117
+
118
+ directory_embeds_sdxl = 'embedings_xl'
119
+ os.makedirs(directory_embeds_sdxl, exist_ok=True)
120
+ directory_embeds_positive_sdxl = 'embedings_xl/positive'
121
+ os.makedirs(directory_embeds_positive_sdxl, exist_ok=True)
122
+
123
+ HF_LORA_PRIVATE_REPOS1 = ['John6666/loratest1', 'John6666/loratest3', 'John6666/loratest4', 'John6666/loratest6']
124
+ HF_LORA_PRIVATE_REPOS2 = ['John6666/loratest10', 'John6666/loratest11','John6666/loratest'] # to be sorted as 1 repo
125
+ HF_LORA_PRIVATE_REPOS = HF_LORA_PRIVATE_REPOS1 + HF_LORA_PRIVATE_REPOS2
126
+ HF_LORA_ESSENTIAL_PRIVATE_REPO = 'John6666/loratest1' # to be downloaded on run app
127
+ HF_VAE_PRIVATE_REPO = 'John6666/vaetest'
128
+ HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO = 'John6666/embeddingstest'
129
+ HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO = 'John6666/embeddingspositivetest'
ja_to_danbooru/character_series_dict.json ADDED
The diff for this file is too large to render. See raw diff
 
ja_to_danbooru/danbooru_tagtype_dict.json ADDED
The diff for this file is too large to render. See raw diff
 
ja_to_danbooru/ja_danbooru_dict.json ADDED
The diff for this file is too large to render. See raw diff
 
ja_to_danbooru/ja_to_danbooru.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import re
3
+ from pathlib import Path
4
+
5
+
6
+ def load_json_dict(path: str):
7
+ import json
8
+ from pathlib import Path
9
+ dict = {}
10
+ if not Path(path).exists(): return dict
11
+ try:
12
+ with open(path, encoding='utf-8') as f:
13
+ dict = json.load(f)
14
+ except Exception:
15
+ print(f"Failed to open dictionary file: {path}")
16
+ return dict
17
+ return dict
18
+
19
+
20
+ ja_danbooru_dict = load_json_dict('ja_danbooru_dict.json')
21
+ char_series_dict = load_json_dict('character_series_dict.json')
22
+ tagtype_dict = load_json_dict('danbooru_tagtype_dict.json')
23
+
24
+
25
+ def jatags_to_danbooru_tags(jatags: list[str]):
26
+ from rapidfuzz.process import extractOne
27
+ from rapidfuzz.utils import default_process
28
+ keys = list(ja_danbooru_dict.keys())
29
+ ckeys = list(char_series_dict.keys())
30
+ tags = []
31
+ for jatag in jatags:
32
+ jatag = str(jatag).strip()
33
+ s = default_process(str(jatag))
34
+ e1 = extractOne(s, keys, processor=default_process, score_cutoff=90.0)
35
+ if e1:
36
+ tag = str(ja_danbooru_dict[e1[0]])
37
+ tags.append(tag)
38
+ if tag in tagtype_dict.keys() and tagtype_dict[tag] == "character":
39
+ cs = default_process(tag)
40
+ ce1 = extractOne(cs, ckeys, processor=default_process, score_cutoff=95.0)
41
+ if ce1:
42
+ series = str(char_series_dict[ce1[0]])
43
+ tags.append(series)
44
+ return tags
45
+
46
+
47
+ def jatags_to_danbooru(input_tag, input_file, output_file, is_append):
48
+ if input_file and Path(input_file).exists():
49
+ try:
50
+ with open(input_file, 'r', encoding='utf-8') as f:
51
+ input_tag = f.read()
52
+ except Exception:
53
+ print(f"Failed to open input file: {input_file}")
54
+ ja_tags = [tag.strip() for tag in input_tag.split(",")] if input_tag else []
55
+ tags = jatags_to_danbooru_tags(ja_tags)
56
+ output_tags = ja_tags + tags if is_append else tags
57
+ output_tag = ", ".join(output_tags)
58
+ if output_file:
59
+ try:
60
+ with open(output_file, mode='w', encoding="utf-8") as f:
61
+ f.write(output_tag)
62
+ except Exception:
63
+ print(f"Failed to write output file: {output_file}")
64
+ else:
65
+ print(output_tag)
66
+ return output_tag
67
+
68
+
69
+ if __name__ == "__main__":
70
+ parser = argparse.ArgumentParser()
71
+ parser.add_argument("--tags", default=None, type=str, required=False, help="Input tags.")
72
+ parser.add_argument("--file", default=None, type=str, required=False, help="Input tags from a text file.")
73
+ parser.add_argument("--out", default=None, type=str, help="Output to text file.")
74
+ parser.add_argument("--append", default=False, type=bool, help="Whether the output contains the input tags or not.")
75
+
76
+ args = parser.parse_args()
77
+ assert (args.tags, args.file) != (None, None), "Must provide --tags or --file!"
78
+
79
+ jatags_to_danbooru(args.tags, args.file, args.out, args.append)
80
+
81
+
82
+ # Usage:
83
+ # python ja_to_danbooru.py --tags "女の子, 大室櫻子"
84
+ # python danbooru_to_ja.py --file inputtag.txt
85
+ # python danbooru_to_ja.py --file inputtag.txt --append True
86
+ # Datasets: https://huggingface.co/datasets/p1atdev/danbooru-ja-tag-pair-20240715
87
+ # Datasets: https://github.com/ponapon280/danbooru-e621-converter
llmdolphin.py ADDED
@@ -0,0 +1,884 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
3
+ from llama_cpp import Llama
4
+ from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
5
+ from llama_cpp_agent.providers import LlamaCppPythonProvider
6
+ from llama_cpp_agent.chat_history import BasicChatHistory
7
+ from llama_cpp_agent.chat_history.messages import Roles
8
+ from ja_to_danbooru.ja_to_danbooru import jatags_to_danbooru_tags
9
+ import wrapt_timeout_decorator
10
+
11
+
12
+ llm_models_dir = "./llm_models"
13
+ llm_models = {
14
+ #"": ["", MessagesFormatterType.LLAMA_3],
15
+ #"": ["", MessagesFormatterType.MISTRAL],
16
+ #"": ["", MessagesFormatterType.ALPACA],
17
+ #"": ["", MessagesFormatterType.OPEN_CHAT],
18
+ #"": ["", MessagesFormatterType.CHATML],
19
+ #"": ["", MessagesFormatterType.PHI_3],
20
+ "mn-12b-lyra-v2a1-q5_k_m.gguf": ["HalleyStarbun/MN-12B-Lyra-v2a1-Q5_K_M-GGUF", MessagesFormatterType.CHATML],
21
+ "L3-8B-Tamamo-v1.i1-Q5_K_M.gguf": ["mradermacher/L3-8B-Tamamo-v1-i1-GGUF", MessagesFormatterType.LLAMA_3],
22
+ "Llama-3.1-8B-EZO-1.1-it.Q5_K_M.gguf": ["mradermacher/Llama-3.1-8B-EZO-1.1-it-GGUF", MessagesFormatterType.MISTRAL],
23
+ "MN-12B-Starcannon-v1.i1-Q4_K_M.gguf": ["mradermacher/MN-12B-Starcannon-v1-i1-GGUF", MessagesFormatterType.MISTRAL],
24
+ "MN-12B-Starcannon-v2.i1-Q4_K_M.gguf": ["mradermacher/MN-12B-Starcannon-v2-i1-GGUF", MessagesFormatterType.CHATML],
25
+ "MN-12B-Starcannon-v3.i1-Q4_K_M.gguf": ["mradermacher/MN-12B-Starcannon-v3-i1-GGUF", MessagesFormatterType.CHATML],
26
+ "MN-12B-Starcannon-v4-unofficial.i1-Q4_K_M.gguf": ["mradermacher/MN-12B-Starcannon-v4-unofficial-i1-GGUF", MessagesFormatterType.MISTRAL],
27
+ "MN-12B-Starsong-v1.i1-Q4_K_M.gguf": ["mradermacher/MN-12B-Starsong-v1-i1-GGUF", MessagesFormatterType.CHATML],
28
+ "Lumimaid-Magnum-12B.i1-Q4_K_M.gguf": ["mradermacher/Lumimaid-Magnum-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
29
+ "Nemo-12B-Marlin-v1.i1-Q4_K_M.gguf": ["mradermacher/Nemo-12B-Marlin-v1-i1-GGUF", MessagesFormatterType.MISTRAL],
30
+ "Nemo-12B-Marlin-v2.i1-Q4_K_M.gguf": ["mradermacher/Nemo-12B-Marlin-v2-i1-GGUF", MessagesFormatterType.MISTRAL],
31
+ "Nemo-12B-Marlin-v3.Q4_K_M.gguf": ["mradermacher/Nemo-12B-Marlin-v3-GGUF", MessagesFormatterType.MISTRAL],
32
+ "Nemo-12B-Marlin-v4.i1-Q4_K_M.gguf": ["mradermacher/Nemo-12B-Marlin-v4-i1-GGUF", MessagesFormatterType.MISTRAL],
33
+ "Nemo-12B-Marlin-v5-Q4_K_M.gguf": ["starble-dev/Nemo-12B-Marlin-v5-GGUF", MessagesFormatterType.CHATML],
34
+ "Nemo-12B-Marlin-v7.Q4_K_M.gguf": ["mradermacher/Nemo-12B-Marlin-v7-GGUF", MessagesFormatterType.MISTRAL],
35
+ "Nemo-12B-Marlin-v8.Q4_K_S.gguf": ["mradermacher/Nemo-12B-Marlin-v8-GGUF", MessagesFormatterType.MISTRAL],
36
+ "NemoDori-v0.2-Upscaled.1-14B.Q4_K_M.gguf": ["mradermacher/NemoDori-v0.2-Upscaled.1-14B-GGUF", MessagesFormatterType.MISTRAL],
37
+ "Fireball-12B-v1.0.i1-Q4_K_M.gguf": ["mradermacher/Fireball-12B-v1.0-i1-GGUF", MessagesFormatterType.MISTRAL],
38
+ "Fireball-Mistral-Nemo-Base-2407-sft-v2.2a.Q4_K_M.gguf": ["mradermacher/Fireball-Mistral-Nemo-Base-2407-sft-v2.2a-GGUF", MessagesFormatterType.MISTRAL],
39
+ "T-III-12B.Q4_K_M.gguf": ["mradermacher/T-III-12B-GGUF", MessagesFormatterType.CHATML],
40
+ "T-IIIa-12B.Q4_K_S.gguf": ["mradermacher/T-IIIa-12B-GGUF", MessagesFormatterType.MISTRAL],
41
+ "StorieCreative.i1-Q4_K_S.gguf": ["mradermacher/StorieCreative-i1-GGUF", MessagesFormatterType.MISTRAL],
42
+ "Deutscher-Pantheon-12B.Q4_K_M.gguf": ["mradermacher/Deutscher-Pantheon-12B-GGUF", MessagesFormatterType.MISTRAL],
43
+ "guns-and-roses-r1-Q4_K_L-imat.gguf": ["Reiterate3680/guns-and-roses-r1-GGUF", MessagesFormatterType.MISTRAL],
44
+ "Trinas_Nectar-8B-model_stock.i1-Q4_K_M.gguf": ["mradermacher/Trinas_Nectar-8B-model_stock-i1-GGUF", MessagesFormatterType.MISTRAL],
45
+ "nemo-12b-hiwaifu-Q4_K_L-imat.gguf": ["Reiterate3680/nemo-12b-hiwaifu-GGUF", MessagesFormatterType.MISTRAL],
46
+ "Soliloquy-7B-v3-Q4_K_L-imat.gguf": ["Reiterate3680/Soliloquy-7B-v3-GGUF", MessagesFormatterType.OPEN_CHAT],
47
+ "Lyra-Gutenberg-mistral-nemo-12B.Q4_K_M.gguf": ["mradermacher/Lyra-Gutenberg-mistral-nemo-12B-GGUF", MessagesFormatterType.MISTRAL],
48
+ "Gutensuppe-mistral-nemo-12B.Q4_K_M.gguf": ["mradermacher/Gutensuppe-mistral-nemo-12B-GGUF", MessagesFormatterType.MISTRAL],
49
+ "IceTea21EnergyDrinkRPV13-dpo240-Q8_0.gguf": ["icefog72/IceTea21EnergyDrinkRPV13-dpo240-gguf", MessagesFormatterType.MISTRAL],
50
+ "Instant-RP-Noodles-12B.Q4_K_M.gguf": ["mradermacher/Instant-RP-Noodles-12B-GGUF", MessagesFormatterType.MISTRAL],
51
+ "Violet_Twilight-v0.1_q4_K_M.gguf": ["Epiculous/Violet_Twilight-v0.1-GGUF", MessagesFormatterType.MISTRAL],
52
+ "Llama3.1-vodka.Q4_K_S.gguf": ["mradermacher/Llama3.1-vodka-GGUF", MessagesFormatterType.MISTRAL],
53
+ "L3.1-Pyro-Mantus-v0.1c-8B.q5_k_m.gguf": ["kromquant/L3.1-Pyro-Mantus-v0.1c-8B-GGUFs", MessagesFormatterType.MISTRAL],
54
+ "Llama-3.1-8B-ArliAI-RPMax-v1.1-Q5_K_M.gguf": ["ArliAI/Llama-3.1-8B-ArliAI-RPMax-v1.1-GGUF", MessagesFormatterType.MISTRAL],
55
+ "l3-notcrazy-8b-q4_k_m.gguf": ["bunnycore/L3-NotCrazy-8B-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
56
+ "Maverick-8B.Q5_K_M.gguf": ["RichardErkhov/bunnycore_-_Maverick-8B-gguf", MessagesFormatterType.LLAMA_3],
57
+ "Fireball-12B-v1.01a.Q4_K_M.gguf": ["mradermacher/Fireball-12B-v1.01a-GGUF", MessagesFormatterType.CHATML],
58
+ "Loki-v5.2.Q5_K_M.gguf": ["mradermacher/Loki-v5.2-GGUF", MessagesFormatterType.MISTRAL],
59
+ "Loki-v5.1.Q5_K_M.gguf": ["mradermacher/Loki-v5.1-GGUF", MessagesFormatterType.MISTRAL],
60
+ "GracieRP-freefallenLora-Gemma2-Inst-9B.i1-Q4_K_M.gguf": ["mradermacher/GracieRP-freefallenLora-Gemma2-Inst-9B-i1-GGUF", MessagesFormatterType.ALPACA],
61
+ "mistral-nemo-gutenberg-12B-v4.Q4_K_M.gguf": ["mradermacher/mistral-nemo-gutenberg-12B-v4-GGUF", MessagesFormatterType.MISTRAL],
62
+ "FunkyMerge-12b-0.1.Q4_K_M.gguf": ["mradermacher/FunkyMerge-12b-0.1-GGUF", MessagesFormatterType.MISTRAL],
63
+ "NemoMix-Unleashed-12B-Q4_K_M.gguf": ["bartowski/NemoMix-Unleashed-12B-GGUF", MessagesFormatterType.MISTRAL],
64
+ "IceTea21EnergyDrinkRPV13.Q4_K_S.gguf": ["mradermacher/IceTea21EnergyDrinkRPV13-GGUF", MessagesFormatterType.MISTRAL],
65
+ "MegaBeam-Mistral-7B-512k-Q5_K_M.gguf": ["bartowski/MegaBeam-Mistral-7B-512k-GGUF", MessagesFormatterType.MISTRAL],
66
+ "azur-8b-model_stock-q4_k_m.gguf": ["DreadPoor/Azur-8B-model_stock-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
67
+ "Chronos-Gold-12B-1.0-Q4_K_M.gguf": ["bartowski/Chronos-Gold-12B-1.0-GGUF", MessagesFormatterType.MISTRAL],
68
+ "L3.1-Romes-Ninomos-Maxxing.Q5_K_M.gguf": ["mradermacher/L3.1-Romes-Ninomos-Maxxing-GGUF", MessagesFormatterType.LLAMA_3],
69
+ "mistral-nemo-minitron-8b-base-q4_k_m.gguf": ["Daemontatox/Mistral-NeMo-Minitron-8B-Base-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
70
+ "Nokstella_coder-8B-model_stock.i1-Q4_K_S.gguf": ["mradermacher/Nokstella_coder-8B-model_stock-i1-GGUF", MessagesFormatterType.LLAMA_3],
71
+ "vtion_model_v1.Q5_K_M.gguf": ["mradermacher/vtion_model_v1-GGUF", MessagesFormatterType.LLAMA_3],
72
+ "storiecreative-q5_k_m.gguf": ["ClaudioItaly/StorieCreative-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
73
+ "L3.1-gramamax.Q5_K_M.gguf": ["mradermacher/L3.1-gramamax-GGUF", MessagesFormatterType.MISTRAL],
74
+ "Evolutionstory128.Q5_K_M.gguf": ["mradermacher/Evolutionstory128-GGUF", MessagesFormatterType.CHATML],
75
+ "sellen-8b-model_stock-q4_k_m.gguf": ["DreadPoor/Sellen-8B-model_stock-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
76
+ "nokstella_coder-8b-model_stock-q4_k_m.gguf": ["DreadPoor/Nokstella_coder-8B-model_stock-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
77
+ "Ultra-Instruct-12B-Q4_K_M.gguf": ["bartowski/Ultra-Instruct-12B-GGUF", MessagesFormatterType.MISTRAL],
78
+ "L3.1-Sithamo-v0.4-8B.q5_k_m.gguf": ["kromquant/L3.1-Siithamo-v0.4-8B-GGUFs", MessagesFormatterType.MISTRAL],
79
+ "Berry-Spark-7B-Fix.Q5_K_M.gguf": ["mradermacher/Berry-Spark-7B-Fix-GGUF", MessagesFormatterType.OPEN_CHAT],
80
+ "llama3.1-gutenberg-8B.Q4_K_S.gguf": ["mradermacher/llama3.1-gutenberg-8B-GGUF", MessagesFormatterType.LLAMA_3],
81
+ "L3.1-Romes-Ninomos.Q4_K_S.gguf": ["mradermacher/L3.1-Romes-Ninomos-GGUF", MessagesFormatterType.LLAMA_3],
82
+ "nemo-12b-summarizer-de-v3.Q4_K_M.gguf": ["mradermacher/nemo-12b-summarizer-de-v3-GGUF", MessagesFormatterType.MISTRAL],
83
+ "suzume-llama-3-8B-multilingual-orpo-borda-top25.Q5_K_M.gguf": ["darkshapes/suzume-llama-3-8B-multilingual-orpo-borda-top25-gguf", MessagesFormatterType.LLAMA_3],
84
+ "Fireball-Mistral-Nemo-Base-2407-sft-v2.1.Q4_K_M.gguf": ["mradermacher/Fireball-Mistral-Nemo-Base-2407-sft-v2.1-GGUF", MessagesFormatterType.MISTRAL],
85
+ "gemma-2-9B-it-advanced-v2.1-Q5_K_M.gguf": ["jsgreenawalt/gemma-2-9B-it-advanced-v2.1-GGUF", MessagesFormatterType.ALPACA],
86
+ "mistral-12b-neptune-6k-instruct.Q4_K_M.gguf": ["mradermacher/mistral-12b-neptune-6k-instruct-GGUF", MessagesFormatterType.MISTRAL],
87
+ "evolutionstory-q5_k_m.gguf": ["ClaudioItaly/Evolutionstory-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
88
+ "AuraFinal12B-Q4_K_L-imat.gguf": ["Reiterate3680/AuraFinal12B-GGUF", MessagesFormatterType.MISTRAL],
89
+ "Hollow-Tail-V1-12B-Q5_K_M.gguf": ["starble-dev/Hollow-Tail-V1-12B-GGUF", MessagesFormatterType.MISTRAL],
90
+ "IceSakeRPTrainingTestV1-7b.Q5_K_M.gguf": ["mradermacher/IceSakeRPTrainingTestV1-7b-GGUF", MessagesFormatterType.MISTRAL],
91
+ "IceTea21EnergyDrinkRPV10.Q5_K_M.gguf": ["mradermacher/IceTea21EnergyDrinkRPV10-GGUF", MessagesFormatterType.MISTRAL],
92
+ "MN-LooseCannon-12B-v2-Q4_K_L-imat.gguf": ["Reiterate3680/MN-LooseCannon-12B-v2-GGUF", MessagesFormatterType.CHATML],
93
+ "MN-MT3-m4-12B-Q4_K_L-imat.gguf": ["Reiterate3680/MN-MT3-m4-12B-GGUF", MessagesFormatterType.CHATML],
94
+ "Mahou-Gutenberg-Nemo-12B.Q4_K_M.gguf": ["mradermacher/Mahou-Gutenberg-Nemo-12B-GGUF", MessagesFormatterType.MISTRAL],
95
+ "Mahou-1.3-llama3.1-8B.Q5_K_M.gguf": ["mradermacher/Mahou-1.3-llama3.1-8B-GGUF", MessagesFormatterType.CHATML],
96
+ "gemma-advanced-v1.Q4_K_M.gguf": ["QuantFactory/gemma-advanced-v1-GGUF", MessagesFormatterType.ALPACA],
97
+ "flammen21X-mistral-7B-Q5_K_M.gguf": ["duyntnet/flammen21X-mistral-7B-imatrix-GGUF", MessagesFormatterType.MISTRAL],
98
+ "Magnum-Instruct-DPO-12B.Q4_K_M.gguf": ["mradermacher/Magnum-Instruct-DPO-12B-GGUF", MessagesFormatterType.MISTRAL],
99
+ "Carasique-v0.3b.Q4_K_S.gguf": ["mradermacher/Carasique-v0.3b-GGUF", MessagesFormatterType.MISTRAL],
100
+ "MN-12b-Sunrose-Q4_K_L-imat.gguf": ["Reiterate3680/MN-12b-Sunrose-GGUF", MessagesFormatterType.MISTRAL],
101
+ "OpenChat-3.5-7B-SOLAR-v2.0.i1-Q4_K_M.gguf": ["mradermacher/OpenChat-3.5-7B-SOLAR-v2.0-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
102
+ "Carasique-v0.3.Q4_K_M.gguf": ["mradermacher/Carasique-v0.3-GGUF", MessagesFormatterType.MISTRAL],
103
+ "Crimson_Dawn-V0.1.Q4_K_M.gguf": ["mradermacher/Crimson_Dawn-V0.1-GGUF", MessagesFormatterType.MISTRAL],
104
+ "Samantha-hermes3-8b-model-fixed.i1-Q5_K_M.gguf": ["mradermacher/Samantha-hermes3-8b-model-fixed-i1-GGUF", MessagesFormatterType.MISTRAL],
105
+ "Hermes-3-Llama-3.1-8B-lorablated-Q5_K_M.gguf": ["bartowski/Hermes-3-Llama-3.1-8B-lorablated-GGUF", MessagesFormatterType.LLAMA_3],
106
+ "stratagem-instruct-12b.i1-Q4_K_M.gguf": ["mradermacher/stratagem-instruct-12b-i1-GGUF", MessagesFormatterType.MISTRAL],
107
+ "omed-llama3.1-8b.Q5_K_M.gguf": ["mradermacher/omed-llama3.1-8b-GGUF", MessagesFormatterType.LLAMA_3],
108
+ "omed-gemma2-9b.i1-Q4_K_M.gguf": ["mradermacher/omed-gemma2-9b-i1-GGUF", MessagesFormatterType.ALPACA],
109
+ "L3.1-Siithamo-v0.3-8B.q5_k_m.gguf": ["kromquant/L3.1-Siithamo-v0.3-8B-GGUFs", MessagesFormatterType.LLAMA_3],
110
+ "mistral-nemo-gutenberg-12B-v3.i1-Q4_K_M.gguf": ["mradermacher/mistral-nemo-gutenberg-12B-v3-i1-GGUF", MessagesFormatterType.MISTRAL],
111
+ "MN-12B-Tarsus-Q4_K_L-imat.gguf": ["Reiterate3680/MN-12B-Tarsus-GGUF", MessagesFormatterType.MISTRAL],
112
+ "Magnum-Instruct-12B.Q4_K_M.gguf": ["mradermacher/Magnum-Instruct-12B-GGUF", MessagesFormatterType.MISTRAL],
113
+ "Rocinante-12B-v1.i1-Q4_K_M.gguf": ["mradermacher/Rocinante-12B-v1-i1-GGUF", MessagesFormatterType.MISTRAL],
114
+ "Llama-3.1-Storm-8B-Q5_K_M.gguf": ["bartowski/Llama-3.1-Storm-8B-GGUF", MessagesFormatterType.MISTRAL],
115
+ "Tess-3-Mistral-Nemo-12B.i1-Q4_K_M.gguf": ["mradermacher/Tess-3-Mistral-Nemo-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
116
+ "Hermes-3-Llama-3.1-8B.Q5_K_M.gguf": ["mradermacher/Hermes-3-Llama-3.1-8B-GGUF", MessagesFormatterType.MISTRAL],
117
+ "Roleplay-Hermes-3-Llama-3.1-8B.i1-Q5_K_M.gguf": ["mradermacher/Roleplay-Hermes-3-Llama-3.1-8B-i1-GGUF", MessagesFormatterType.MISTRAL],
118
+ "Dusk_Rainbow_Ep03-Q5_K_M.gguf": ["SicariusSicariiStuff/Dusk_Rainbow_GGUFs", MessagesFormatterType.LLAMA_3],
119
+ "NemoReRemix-12B-Q4_K_M.gguf": ["bartowski/NemoReRemix-12B-GGUF", MessagesFormatterType.MISTRAL],
120
+ "Aura-NeMo-12B-Q4_K_L-imat.gguf": ["Reiterate3680/Aura-NeMo-12B-GGUF", MessagesFormatterType.MISTRAL],
121
+ "TypeII-12B.Q4_K_S.gguf": ["mradermacher/TypeII-12B-GGUF", MessagesFormatterType.MISTRAL],
122
+ "TypeII-A-12B.Q4_K_M.gguf": ["mradermacher/TypeII-A-12B-GGUF", MessagesFormatterType.CHATML],
123
+ "yuna-ai-v3-atomic-q_4_k_m.gguf": ["yukiarimo/yuna-ai-v3-atomic", MessagesFormatterType.CHATML],
124
+ "Peach-9B-8k-Roleplay-Q4_K_M.gguf": ["bartowski/Peach-9B-8k-Roleplay-GGUF", MessagesFormatterType.LLAMA_3],
125
+ "heartstolen_model-stock_8b-q4_k_m.gguf": ["DreadPoor/HeartStolen_model-stock_8B-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
126
+ "Llama-3.1-8B-ArliAI-Formax-v1.0-Q5_K_M.gguf": ["ArliAI/Llama-3.1-8B-ArliAI-Formax-v1.0-GGUF", MessagesFormatterType.MISTRAL],
127
+ "ArliAI-Llama-3-8B-Formax-v1.0-Q5_K_M.gguf": ["ArliAI/ArliAI-Llama-3-8B-Formax-v1.0-GGUF", MessagesFormatterType.LLAMA_3],
128
+ "Llama-3.1-8B-ArliAI-RPMax-v1.0-Q5_K_M.gguf": ["ArliAI/Llama-3.1-8B-ArliAI-RPMax-v1.0-GGUF", MessagesFormatterType.MISTRAL],
129
+ "badger-writer-llama-3-8b-q4_k_m.gguf": ["A2va/badger-writer-llama-3-8b-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
130
+ "magnum-12b-v2.5-kto-Q4_K_L-imat.gguf": ["Reiterate3680/magnum-12b-v2.5-kto-GGUF", MessagesFormatterType.CHATML],
131
+ "CeleMo-Instruct-128k.Q4_K_S.gguf": ["mradermacher/CeleMo-Instruct-128k-GGUF", MessagesFormatterType.CHATML],
132
+ "KukulStanta-7B-Seamaiiza-7B-v1-slerp-merge.q3_k_l.gguf": ["AlekseiPravdin/KukulStanta-7B-Seamaiiza-7B-v1-slerp-merge-gguf", MessagesFormatterType.MISTRAL],
133
+ "HolyNemo-12B.Q4_K_M.gguf": ["mradermacher/HolyNemo-12B-GGUF", MessagesFormatterType.MISTRAL],
134
+ "mistral-nemo-gutenberg-12B-v2.Q4_K_M.gguf": ["mradermacher/mistral-nemo-gutenberg-12B-v2-GGUF", MessagesFormatterType.MISTRAL],
135
+ "KukulStanta-InfinityRP-7B-slerp.Q5_K_M.gguf": ["mradermacher/KukulStanta-InfinityRP-7B-slerp-GGUF", MessagesFormatterType.MISTRAL],
136
+ "Rocinante-12B-v1a-Q4_K_M.gguf": ["BeaverAI/Rocinante-12B-v1a-GGUF", MessagesFormatterType.MISTRAL],
137
+ "gemma-2-9b-it-WPO-HB.Q4_K_M.gguf": ["mradermacher/gemma-2-9b-it-WPO-HB-GGUF", MessagesFormatterType.ALPACA],
138
+ "mistral-nemo-bophades-12B.Q4_K_M.gguf": ["mradermacher/mistral-nemo-bophades-12B-GGUF", MessagesFormatterType.MISTRAL],
139
+ "Stella-mistral-nemo-12B.Q4_K_S.gguf": ["mradermacher/Stella-mistral-nemo-12B-GGUF", MessagesFormatterType.MISTRAL],
140
+ "Gemma-2-Ataraxy-9B.Q4_K_M.gguf": ["mradermacher/Gemma-2-Ataraxy-9B-GGUF", MessagesFormatterType.ALPACA],
141
+ "NemoRemix-Magnum_V2_Base-12B.Q4_K_S.gguf": ["mradermacher/NemoRemix-Magnum_V2_Base-12B-GGUF", MessagesFormatterType.MISTRAL],
142
+ "Synatra-7B-v0.3-dpo.Q5_K_M.gguf": ["mradermacher/Synatra-7B-v0.3-dpo-GGUF", MessagesFormatterType.MISTRAL],
143
+ "OpenCrystal-12B-Instruct.Q4_K_M.gguf": ["mradermacher/OpenCrystal-12B-Instruct-GGUF", MessagesFormatterType.MISTRAL],
144
+ "dolphinmaid_l3-1_01sl-q5ks.gguf": ["Dunjeon/DolphinMaid_L3.1_8B-01_GGUF", MessagesFormatterType.LLAMA_3],
145
+ "TypeI-12B.Q4_K_S.gguf": ["mradermacher/TypeI-12B-GGUF", MessagesFormatterType.CHATML],
146
+ "lyralin-12b-v1-q5_k_m.gguf": ["NGalrion/Lyralin-12B-v1-Q5_K_M-GGUF", MessagesFormatterType.CHATML],
147
+ "margnum-12b-v1-q5_k_m.gguf": ["NGalrion/Margnum-12B-v1-Q5_K_M-GGUF", MessagesFormatterType.CHATML],
148
+ "L3-Boshima-a.Q5_K_M.gguf": ["mradermacher/L3-Boshima-a-GGUF", MessagesFormatterType.LLAMA_3],
149
+ "canidori-12b-v1-q5_k_m.gguf": ["NGalrion/Canidori-12B-v1-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
150
+ "MN-12B-Estrella-v1.Q4_K_S.gguf": ["mradermacher/MN-12B-Estrella-v1-GGUF", MessagesFormatterType.CHATML],
151
+ "gemmaomni2-2b-q5_k_m.gguf": ["bunnycore/GemmaOmni2-2B-Q5_K_M-GGUF", MessagesFormatterType.ALPACA],
152
+ "MN-LooseCannon-12B-v1.Q4_K_M.gguf": ["mradermacher/MN-LooseCannon-12B-v1-GGUF", MessagesFormatterType.CHATML],
153
+ "Pleiades-12B-v1.Q4_K_M.gguf": ["mradermacher/Pleiades-12B-v1-GGUF", MessagesFormatterType.CHATML],
154
+ "mistral-nemo-gutenberg-12B.Q4_K_S.gguf": ["mradermacher/mistral-nemo-gutenberg-12B-GGUF", MessagesFormatterType.MISTRAL],
155
+ "gemma2-gutenberg-9B.Q4_K_M.gguf": ["mradermacher/gemma2-gutenberg-9B-GGUF", MessagesFormatterType.ALPACA],
156
+ "NemoDori-v0.5-12B-MN-BT.i1-Q4_K_M.gguf": ["mradermacher/NemoDori-v0.5-12B-MN-BT-i1-GGUF", MessagesFormatterType.MISTRAL],
157
+ "NemoDori-v0.2.1-12B-MN-BT.Q4_K_M.gguf": ["mradermacher/NemoDori-v0.2.1-12B-MN-BT-GGUF", MessagesFormatterType.MISTRAL],
158
+ "NemoDori-v0.2.2-12B-MN-ties.Q4_K_M.gguf": ["mradermacher/NemoDori-v0.2.2-12B-MN-ties-GGUF", MessagesFormatterType.MISTRAL],
159
+ "Mini-Magnum-Unboxed-12B-Q4_K_M.gguf": ["concedo/Mini-Magnum-Unboxed-12B-GGUF", MessagesFormatterType.ALPACA],
160
+ "L3.1-Siithamo-v0.1-8B.i1-Q5_K_M.gguf": ["mradermacher/L3.1-Siithamo-v0.1-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
161
+ "L3.1-Siithamo-v0.2-8B.i1-Q5_K_M.gguf": ["mradermacher/L3.1-Siithamo-v0.2-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
162
+ "Kitsunebi-v1-Gemma2-8k-9B.Q5_K_M.gguf": ["grimjim/Kitsunebi-v1-Gemma2-8k-9B-GGUF", MessagesFormatterType.ALPACA],
163
+ "Llama-3-8B-Stroganoff-3.0.i1-Q4_K_M.gguf": ["mradermacher/Llama-3-8B-Stroganoff-3.0-i1-GGUF", MessagesFormatterType.LLAMA_3],
164
+ "NemoDori-v0.2-12B-MN-BT.i1-Q4_K_M.gguf": ["mradermacher/NemoDori-v0.2-12B-MN-BT-i1-GGUF", MessagesFormatterType.CHATML],
165
+ "NemoDori-v0.1-12B-MS.Q4_K_M.gguf": ["mradermacher/NemoDori-v0.1-12B-MS-GGUF", MessagesFormatterType.CHATML],
166
+ "magnum-12b-v2.i1-Q4_K_M.gguf": ["mradermacher/magnum-12b-v2-i1-GGUF", MessagesFormatterType.CHATML],
167
+ "Alpaca-Llama3.1-8B.Q5_K_M.gguf": ["mradermacher/Alpaca-Llama3.1-8B-GGUF", MessagesFormatterType.CHATML],
168
+ "Orthrus-12b-v0.8.Q4_K_M.gguf": ["mradermacher/Orthrus-12b-v0.8-GGUF", MessagesFormatterType.CHATML],
169
+ "LongWriter-llama3.1-8b-Q5_K_M.gguf": ["bartowski/LongWriter-llama3.1-8b-GGUF", MessagesFormatterType.MISTRAL],
170
+ "L3-bluuwhale-SAO-MIX-8B-V1_fp32-merge-calc.Q5_K_M.gguf": ["mradermacher/L3-bluuwhale-SAO-MIX-8B-V1_fp32-merge-calc-GGUF", MessagesFormatterType.LLAMA_3],
171
+ "YetAnotherMerge-v0.5.Q4_K_M.gguf": ["mradermacher/YetAnotherMerge-v0.5-GGUF", MessagesFormatterType.CHATML],
172
+ "open-hermes-sd-finetune-erot-story.Q5_K_M.gguf": ["mradermacher/open-hermes-sd-finetune-erot-story-GGUF", MessagesFormatterType.CHATML],
173
+ "OntologyHermes-2.5-Mistral-7B.Q6_K.gguf": ["mradermacher/OntologyHermes-2.5-Mistral-7B-GGUF", MessagesFormatterType.MISTRAL],
174
+ "cosmic-2.i1-Q5_K_M.gguf": ["mradermacher/cosmic-2-i1-GGUF", MessagesFormatterType.MISTRAL],
175
+ "L3-Horizon-Anteros-Ara-v0.1-9B.i1-Q4_K_M.gguf": ["mradermacher/L3-Horizon-Anteros-Ara-v0.1-9B-i1-GGUF", MessagesFormatterType.LLAMA_3],
176
+ "Mistral-Nemo-Instruct-2407.i1-Q4_K_M.gguf": ["mradermacher/Mistral-Nemo-Instruct-2407-i1-GGUF", MessagesFormatterType.MISTRAL],
177
+ "Ellaria-9B.i1-Q4_K_M.gguf": ["mradermacher/Ellaria-9B-i1-GGUF", MessagesFormatterType.ALPACA],
178
+ "Apollo-0.4-Llama-3.1-8B.i1-Q5_K_M.gguf": ["mradermacher/Apollo-0.4-Llama-3.1-8B-i1-GGUF", MessagesFormatterType.MISTRAL],
179
+ "NemoRemix-12B.Q4_K_M.gguf": ["mradermacher/NemoRemix-12B-GGUF", MessagesFormatterType.MISTRAL],
180
+ "32K_Selfbot.i1-Q5_K_M.gguf": ["mradermacher/32K_Selfbot-i1-GGUF", MessagesFormatterType.MISTRAL],
181
+ "Viviana_V3.i1-Q5_K_M.gguf": ["mradermacher/Viviana_V3-i1-GGUF", MessagesFormatterType.MISTRAL],
182
+ "dolphin-2.9.4-llama3.1-8b.i1-Q5_K_M.gguf": ["mradermacher/dolphin-2.9.4-llama3.1-8b-i1-GGUF", MessagesFormatterType.CHATML],
183
+ "L3-SAO-MIX-8B-V1.i1-Q5_K_M.gguf": ["mradermacher/L3-SAO-MIX-8B-V1-i1-GGUF", MessagesFormatterType.LLAMA_3],
184
+ "bestofllama3-8b-stock-q5_k_m.gguf": ["bunnycore/BestofLLama3-8B-stock-Q5_K_M-GGUF", MessagesFormatterType.LLAMA_3],
185
+ "L3-Umbral-Mind-RP-v3.0-8B-Q5_K_M.gguf": ["bartowski/L3-Umbral-Mind-RP-v3.0-8B-GGUF", MessagesFormatterType.LLAMA_3],
186
+ "Tess-3-Mistral-Nemo-Q4_K_M.gguf": ["bartowski/Tess-3-Mistral-Nemo-GGUF", MessagesFormatterType.MISTRAL],
187
+ "Llama-3-8B-Stroganoff-2.0.Q5_K_M.gguf": ["RichardErkhov/HiroseKoichi_-_Llama-3-8B-Stroganoff-2.0-gguf", MessagesFormatterType.LLAMA_3],
188
+ "L3-8B-Helium3.Q5_K_M.gguf": ["mradermacher/L3-8B-Helium3-GGUF", MessagesFormatterType.LLAMA_3],
189
+ "MN-12B-Lyra-v1.i1-Q4_K_M.gguf": ["mradermacher/MN-12B-Lyra-v1-i1-GGUF", MessagesFormatterType.CHATML],
190
+ "mahou-1.3-mistral-nemo-12b-q5_k_m.gguf": ["sh1njuku/Mahou-1.3-mistral-nemo-12B-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
191
+ "Humanish-Roleplay-Llama-3.1-8B.i1-Q5_K_M.gguf": ["mradermacher/Humanish-Roleplay-Llama-3.1-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
192
+ "Llama-3-Luminurse-v0.1-OAS-8B.Q5_K_M.gguf": ["grimjim/Llama-3-Luminurse-v0.1-OAS-8B-GGUF", MessagesFormatterType.LLAMA_3],
193
+ "L3.1-8B-Niitama-v1.1-Q5_K_M-imat.gguf": ["L3.1-8B-Niitama-v1.1-Q5_K_M-imat.gguf", MessagesFormatterType.MISTRAL],
194
+ "Evolved-Llama3-8B.i1-Q5_K_M.gguf": ["mradermacher/Evolved-Llama3-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
195
+ "Pantheon-RP-1.5-12b-Nemo.i1-Q4_K_M.gguf": ["mradermacher/Pantheon-RP-1.5-12b-Nemo-i1-GGUF", MessagesFormatterType.CHATML],
196
+ "DarkIdol-Llama-3.1-8B-Instruct-1.2-Uncensored-Q5_K_M.gguf": ["bartowski/DarkIdol-Llama-3.1-8B-Instruct-1.2-Uncensored-GGUF", MessagesFormatterType.LLAMA_3],
197
+ "Llama-3-Swallow-8B-Instruct-v0.1.Q5_K_M.gguf": ["YukiTomita-CC/Llama-3-Swallow-8B-Instruct-v0.1-IMat-GGUF_dolly-15k-ja-prompt", MessagesFormatterType.ALPACA],
198
+ "natsumura-storytelling-rp-1.0-llama-3.1-8B.Q5_K_M.gguf": ["tohur/natsumura-storytelling-rp-1.0-llama-3.1-8b-GGUF", MessagesFormatterType.LLAMA_3],
199
+ "mini-magnum-12b-v1.1.i1-Q4_K_M.gguf": ["mradermacher/mini-magnum-12b-v1.1-i1-GGUF", MessagesFormatterType.MISTRAL],
200
+ "MN-12B-Celeste-V1.9-Q4_K_M.gguf": ["bartowski/MN-12B-Celeste-V1.9-GGUF", MessagesFormatterType.CHATML],
201
+ "Llama-3.1-Techne-RP-8b-v1.i1-Q5_K_M.gguf": ["mradermacher/Llama-3.1-Techne-RP-8b-v1-i1-GGUF", MessagesFormatterType.LLAMA_3],
202
+ "L3-Rhaenys-8B.i1-Q5_K_M.gguf": ["mradermacher/L3-Rhaenys-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
203
+ "Llama-3.1-8b-Uncensored-Dare.i1-Q4_K_M.gguf": ["mradermacher/Llama-3.1-8b-Uncensored-Dare-i1-GGUF", MessagesFormatterType.LLAMA_3],
204
+ "Eros_Scribe-10.7b-v3.Q4_K_M.gguf": ["mradermacher/Eros_Scribe-10.7b-v3-GGUF", MessagesFormatterType.MISTRAL],
205
+ "Gemma2-Nephilim-v3-9B.i1-Q5_K_M.gguf": ["mradermacher/Gemma2-Nephilim-v3-9B-i1-GGUF", MessagesFormatterType.ALPACA],
206
+ "Nemomix-v4.0-12B-Q4_K_M.gguf": ["bartowski/Nemomix-v4.0-12B-GGUF", MessagesFormatterType.CHATML],
207
+ "Nemomix-v0.1-12B-Q4_K_M.gguf": ["bartowski/Nemomix-v0.1-12B-GGUF", MessagesFormatterType.CHATML],
208
+ "Loki-v2.1.i1-Q5_K_M.gguf": ["mradermacher/Loki-v2.1-i1-GGUF", MessagesFormatterType.LLAMA_3],
209
+ "llama3-8B-Special-Dark-RP2.i1-Q5_K_M.gguf": ["mradermacher/llama3-8B-Special-Dark-RP2-i1-GGUF", MessagesFormatterType.LLAMA_3],
210
+ "L3-8B-Celeste-v1-Q5_K_M.gguf": ["bartowski/L3-8B-Celeste-v1-GGUF", MessagesFormatterType.LLAMA_3],
211
+ "L3-8B-Celeste-V1.2-Q5_K_M.gguf": ["bartowski/L3-8B-Celeste-V1.2-GGUF", MessagesFormatterType.LLAMA_3],
212
+ "L3.1-8B-Celeste-V1.5.i1-Q5_K_M.gguf": ["mradermacher/L3.1-8B-Celeste-V1.5-i1-GGUF", MessagesFormatterType.MISTRAL],
213
+ "Celeste-12B-V1.6-Q4_K_M.gguf": ["bartowski/Celeste-12B-V1.6-GGUF", MessagesFormatterType.MISTRAL],
214
+ "L3-SthenoMaidBlackroot-8B-V1-exp5-11-Q4_K_M.gguf": ["DavidAU/L3-SthenoMaidBlackroot-8.9B-V1-BRAINSTORM-5x-GGUF", MessagesFormatterType.LLAMA_3],
215
+ "Llama-3.1-8B-Instruct-Fei-v1-Uncensored.i1-Q5_K_M.gguf": ["mradermacher/Llama-3.1-8B-Instruct-Fei-v1-Uncensored-i1-GGUF", MessagesFormatterType.MISTRAL],
216
+ "IceCoffeeRP-7b.i1-Q5_K_M.gguf": ["mradermacher/IceCoffeeRP-7b-i1-GGUF", MessagesFormatterType.ALPACA],
217
+ "lumi-nemo-e2.0.Q4_K_M.gguf": ["mradermacher/lumi-nemo-e2.0-GGUF", MessagesFormatterType.MISTRAL],
218
+ "Lumimaid-v0.2-8B.i1-Q5_K_M.gguf": ["mradermacher/Lumimaid-v0.2-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
219
+ "Lumimaid-v0.2-12B.i1-Q4_K_M.gguf": ["mradermacher/Lumimaid-v0.2-12B-i1-GGUF", MessagesFormatterType.LLAMA_3],
220
+ "Llama-3.1-8B-Instruct-abliterated_via_adapter.Q5_K_M.gguf": ["grimjim/Llama-3.1-8B-Instruct-abliterated_via_adapter-GGUF", MessagesFormatterType.LLAMA_3],
221
+ "Llama-Nephilim-Metamorphosis-v1-8B.Q5_K_M.gguf": ["grimjim/Llama-Nephilim-Metamorphosis-v1-8B-GGUF", MessagesFormatterType.LLAMA_3],
222
+ "Meta-Llama-3.1-8B-Instruct-abliterated.i1-Q5_K_M.gguf": ["mradermacher/Meta-Llama-3.1-8B-Instruct-abliterated-i1-GGUF", MessagesFormatterType.LLAMA_3],
223
+ "pstella-16b.Q5_K_M.gguf": ["mradermacher/pstella-16b-GGUF", MessagesFormatterType.LLAMA_3],
224
+ "DarkIdol-Llama-3.1-8B-Instruct-1.1-Uncensored.i1-Q5_K_M.gguf": ["mradermacher/DarkIdol-Llama-3.1-8B-Instruct-1.1-Uncensored-i1-GGUF", MessagesFormatterType.LLAMA_3],
225
+ "Mistral-Nemo-Instruct-2407-Q4_K_M.gguf": ["bartowski/Mistral-Nemo-Instruct-2407-GGUF", MessagesFormatterType.MISTRAL],
226
+ "ghost-8b-beta.q5_k.gguf": ["ZeroWw/ghost-8b-beta-GGUF", MessagesFormatterType.MISTRAL],
227
+ "Honey-Yuzu-13B.Q4_K_M.gguf": ["backyardai/Honey-Yuzu-13B-GGUF", MessagesFormatterType.MISTRAL],
228
+ "llama3-8B-DarkIdol-2.3-Uncensored-32K.i1-Q5_K_M.gguf": ["mradermacher/llama3-8B-DarkIdol-2.3-Uncensored-32K-i1-GGUF", MessagesFormatterType.LLAMA_3],
229
+ "LLaMa-3-Instruct-SmallPrefMix-ORPO-8B.i1-Q5_K_M.gguf": ["mradermacher/LLaMa-3-Instruct-SmallPrefMix-ORPO-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
230
+ "NeuralLemon.Q5_K_M.gguf": ["backyardai/NeuralLemon-GGUF", MessagesFormatterType.MISTRAL],
231
+ "Llama-3-Intermix.i1-Q5_K_M.gguf": ["mradermacher/Llama-3-Intermix-i1-GGUF", MessagesFormatterType.LLAMA_3],
232
+ "C3TR-Adapter-Q4_k_m.gguf": ["webbigdata/C3TR-Adapter_gguf", MessagesFormatterType.ALPACA],
233
+ "Llama-3-8B-Magpie-Mix-RC-UltraDPO-08-3.Q5_K_M.gguf": ["mradermacher/Llama-3-8B-Magpie-Mix-RC-UltraDPO-08-3-GGUF", MessagesFormatterType.LLAMA_3],
234
+ "Tiger-Gemma-9B-v2.Q4_K_M.gguf": ["QuantFactory/Tiger-Gemma-9B-v2-GGUF", MessagesFormatterType.ALPACA],
235
+ "gemma-2-9b-it-SimPO.i1-Q4_K_M.gguf": ["mradermacher/gemma-2-9b-it-SimPO-i1-GGUF", MessagesFormatterType.ALPACA],
236
+ "Gemma-2-9B-It-SPPO-Iter3.Q4_K_M.iMatrix.gguf": ["MCZK/Gemma-2-9B-It-SPPO-Iter3-GGUF", MessagesFormatterType.ALPACA],
237
+ "Llama-3-NeuralPaca-8b.Q4_K_M.gguf": ["RichardErkhov/NeuralNovel_-_Llama-3-NeuralPaca-8b-gguf", MessagesFormatterType.ALPACA],
238
+ "SaoRPM-2x8B.i1-Q4_K_M.gguf": ["mradermacher/SaoRPM-2x8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
239
+ "L3-Hecate-8B-v1.2.Q4_K_M.gguf": ["mradermacher/L3-Hecate-8B-v1.2-GGUF", MessagesFormatterType.LLAMA_3],
240
+ "Mahou-1.3b-llama3-8B.i1-Q4_K_M.gguf": ["mradermacher/Mahou-1.3b-llama3-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
241
+ "SwallowMaid-8B-L3-SPPO-abliterated.i1-Q5_K_M.gguf": ["mradermacher/SwallowMaid-8B-L3-SPPO-abliterated-i1-GGUF", MessagesFormatterType.LLAMA_3],
242
+ "L3-8B-Lunar-Stheno.i1-Q5_K_M.gguf": ["mradermacher/L3-8B-Lunar-Stheno-i1-GGUF", MessagesFormatterType.LLAMA_3],
243
+ "llama3_Loradent.Q4_K_M.gguf": ["mradermacher/llama3_Loradent-GGUF", MessagesFormatterType.LLAMA_3],
244
+ "Llama-3-8B-Stroganoff.i1-Q4_K_M.gguf": ["mradermacher/Llama-3-8B-Stroganoff-i1-GGUF", MessagesFormatterType.LLAMA_3],
245
+ "L3-8B-EnchantedForest-v0.5.i1-Q4_K_M.gguf": ["mradermacher/L3-8B-EnchantedForest-v0.5-i1-GGUF", MessagesFormatterType.LLAMA_3],
246
+ "gemma-radiation-rp-9b-q5_k_m.gguf": ["pegasus912/Gemma-Radiation-RP-9B-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
247
+ "Magic-Dolphin-7b.Q4_K_M.gguf": ["mradermacher/Magic-Dolphin-7b-GGUF", MessagesFormatterType.MISTRAL],
248
+ "mathstral-7B-v0.1-Q5_K_M.gguf": ["bartowski/mathstral-7B-v0.1-GGUF", MessagesFormatterType.MISTRAL],
249
+ "Gemma2-9B-it-Boku-v1.Q5_K_M.gguf": ["mradermacher/Gemma2-9B-it-Boku-v1-GGUF", MessagesFormatterType.MISTRAL],
250
+ "Gemma-2-9B-It-SPPO-Iter3-Q5_K_M.gguf": ["grapevine-AI/Gemma-2-9B-It-SPPO-Iter3-GGUF", MessagesFormatterType.MISTRAL],
251
+ "L3-8B-Niitama-v1.i1-Q4_K_M.gguf": ["mradermacher/L3-8B-Niitama-v1-i1-GGUF", MessagesFormatterType.LLAMA_3],
252
+ "Maidphin-Kunoichi-7B.Q5_K_M.gguf": ["RichardErkhov/nbeerbower_-_Maidphin-Kunoichi-7B-gguf", MessagesFormatterType.MISTRAL],
253
+ "L3-15B-EtherealMaid-t0.0001.i1-Q4_K_M.gguf": ["mradermacher/L3-15B-EtherealMaid-t0.0001-i1-GGUF", MessagesFormatterType.LLAMA_3],
254
+ "L3-15B-MythicalMaid-t0.0001.i1-Q4_K_M.gguf": ["mradermacher/L3-15B-MythicalMaid-t0.0001-i1-GGUF", MessagesFormatterType.LLAMA_3],
255
+ "llama-3-Nephilim-v3-8B.Q5_K_M.gguf": ["grimjim/llama-3-Nephilim-v3-8B-GGUF", MessagesFormatterType.LLAMA_3],
256
+ "NarutoDolphin-10B.Q5_K_M.gguf": ["RichardErkhov/FelixChao_-_NarutoDolphin-10B-gguf", MessagesFormatterType.MISTRAL],
257
+ "l3-8b-tamamo-v1-q8_0.gguf": ["Ransss/L3-8B-Tamamo-v1-Q8_0-GGUF", MessagesFormatterType.LLAMA_3],
258
+ "Tiger-Gemma-9B-v1-Q4_K_M.gguf": ["bartowski/Tiger-Gemma-9B-v1-GGUF", MessagesFormatterType.LLAMA_3],
259
+ "TooManyMixRolePlay-7B-Story_V3.5.Q4_K_M.gguf": ["mradermacher/TooManyMixRolePlay-7B-Story_V3.5-GGUF", MessagesFormatterType.LLAMA_3],
260
+ "natsumura-llama3-v1.1-8b.Q4_K_M.gguf": ["mradermacher/natsumura-llama3-v1.1-8b-GGUF", MessagesFormatterType.LLAMA_3],
261
+ "natsumura-llama3-v1-8b.i1-Q4_K_M.gguf": ["mradermacher/natsumura-llama3-v1-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
262
+ "nephra_v1.0.Q5_K_M.gguf": ["PrunaAI/yodayo-ai-nephra_v1.0-GGUF-smashed", MessagesFormatterType.LLAMA_3],
263
+ "DPO-ONLY-Zephyr-7B.Q6_K.gguf": ["mradermacher/DPO-ONLY-Zephyr-7B-GGUF", MessagesFormatterType.LLAMA_3],
264
+ "L3-Deluxe-Scrambled-Eggs-On-Toast-8B.Q8_0.gguf": ["mradermacher/L3-Deluxe-Scrambled-Eggs-On-Toast-8B-GGUF", MessagesFormatterType.LLAMA_3],
265
+ "L3-Scrambled-Eggs-On-Toast-8B.i1-Q6_K.gguf": ["mradermacher/L3-Scrambled-Eggs-On-Toast-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
266
+ "Llama-3-uncensored-Dare-1.Q4_K_M.gguf": ["mradermacher/Llama-3-uncensored-Dare-1-GGUF", MessagesFormatterType.LLAMA_3],
267
+ "llama3-8B-DarkIdol-2.2-Uncensored-1048K.i1-Q6_K.gguf": ["mradermacher/llama3-8B-DarkIdol-2.2-Uncensored-1048K-i1-GGUF", MessagesFormatterType.LLAMA_3],
268
+ "dolphin-2.9.3-mistral-7b-32k-q4_k_m.gguf": ["huggingkot/dolphin-2.9.3-mistral-7B-32k-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
269
+ "dolphin-2.9.3-mistral-7B-32k-Q5_K_M.gguf": ["bartowski/dolphin-2.9.3-mistral-7B-32k-GGUF", MessagesFormatterType.MISTRAL],
270
+ "Lexi-Llama-3-8B-Uncensored_Q5_K_M.gguf": ["Orenguteng/Llama-3-8B-Lexi-Uncensored-GGUF", MessagesFormatterType.LLAMA_3],
271
+ "Llama3-Sophie.Q8_0.gguf": ["mradermacher/Llama3-Sophie-GGUF", MessagesFormatterType.LLAMA_3],
272
+ "Aura-Uncensored-OAS-8B-L3.i1-Q4_K_M.gguf": ["mradermacher/Aura-Uncensored-OAS-8B-L3-i1-GGUF", MessagesFormatterType.LLAMA_3],
273
+ "L3-Uncen-Merger-Omelette-RP-v0.2-8B-Q5_K_S-imat.gguf": ["LWDCLS/L3-Uncen-Merger-Omelette-RP-v0.2-8B-GGUF-IQ-Imatrix-Request", MessagesFormatterType.LLAMA_3],
274
+ "qwen2-diffusion-prompter-v01-q6_k.gguf": ["trollek/Qwen2-0.5B-DiffusionPrompter-v0.1-GGUF", MessagesFormatterType.LLAMA_3],
275
+ "Smegmma-Deluxe-9B-v1-Q6_K.gguf": ["bartowski/Smegmma-Deluxe-9B-v1-GGUF", MessagesFormatterType.MISTRAL],
276
+ "Mahou-1.3c-mistral-7B.i1-Q6_K.gguf": ["mradermacher/Mahou-1.3c-mistral-7B-i1-GGUF", MessagesFormatterType.MISTRAL],
277
+ "Silicon-Maid-7B-Q8_0_X.gguf": ["duyntnet/Silicon-Maid-7B-imatrix-GGUF", MessagesFormatterType.ALPACA],
278
+ "l3-umbral-mind-rp-v3.0-8b-q5_k_m-imat.gguf": ["Casual-Autopsy/L3-Umbral-Mind-RP-v3.0-8B-Q5_K_M-GGUF", MessagesFormatterType.LLAMA_3],
279
+ "Meta-Llama-3.1-8B-Claude-iMat-Q5_K_M.gguf": ["InferenceIllusionist/Meta-Llama-3.1-8B-Claude-iMat-GGUF", MessagesFormatterType.LLAMA_3],
280
+ "Phi-3.1-mini-128k-instruct-Q6_K_L.gguf": ["bartowski/Phi-3.1-mini-128k-instruct-GGUF", MessagesFormatterType.PHI_3],
281
+ "tifa-7b-qwen2-v0.1.q4_k_m.gguf": ["Tifa-RP/Tifa-7B-Qwen2-v0.1-GGUF", MessagesFormatterType.OPEN_CHAT],
282
+ "Oumuamua-7b-RP_Q5_K_M.gguf": ["Aratako/Oumuamua-7b-RP-GGUF", MessagesFormatterType.MISTRAL],
283
+ "Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW_iMat_Ch200_IQ4_XS.gguf": ["dddump/Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW-gguf", MessagesFormatterType.VICUNA],
284
+ "ChatWaifu_v1.2.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.2.1-GGUF", MessagesFormatterType.MISTRAL],
285
+ "ChatWaifu_v1.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.1-GGUF", MessagesFormatterType.MISTRAL],
286
+ "Ninja-V2-7B_Q4_K_M.gguf": ["Local-Novel-LLM-project/Ninja-V2-7B-GGUF", MessagesFormatterType.VICUNA],
287
+ "Yamase-12B.Q4_K_M.gguf": ["mradermacher/Yamase-12B-GGUF", MessagesFormatterType.MISTRAL],
288
+ "borea-phi-3.5-mini-instruct-common.Q5_K_M.gguf": ["keitokei1994/Borea-Phi-3.5-mini-Instruct-Common-GGUF", MessagesFormatterType.PHI_3],
289
+ "Llama-3-Nymeria-ELYZA-8B.i1-Q4_K_M.gguf": ["mradermacher/Llama-3-Nymeria-ELYZA-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
290
+ "suzume-llama-3-8B-japanese.Q4_K_M.gguf": ["PrunaAI/lightblue-suzume-llama-3-8B-japanese-GGUF-smashed", MessagesFormatterType.LLAMA_3],
291
+ "suzume-llama-3-8B-multilingual-orpo-borda-top25.Q4_K_M.gguf": ["RichardErkhov/lightblue_-_suzume-llama-3-8B-multilingual-orpo-borda-top25-gguf", MessagesFormatterType.LLAMA_3],
292
+ "Bungo-L3-8B.Q5_K_M.gguf": ["backyardai/Bungo-L3-8B-GGUF", MessagesFormatterType.LLAMA_3],
293
+ "ezo-common-t2-2b-gemma-2-it.Q6_K.gguf": ["keitokei1994/EZO-Common-T2-2B-gemma-2-it-GGUF", MessagesFormatterType.ALPACA],
294
+ "Llama-3-EZO-8b-Common-it.Q5_K_M.iMatrix.gguf": ["MCZK/Llama-3-EZO-8b-Common-it-GGUF", MessagesFormatterType.MISTRAL],
295
+ "EZO-Common-9B-gemma-2-it.i1-Q4_K_M.gguf": ["mradermacher/EZO-Common-9B-gemma-2-it-i1-GGUF", MessagesFormatterType.MISTRAL],
296
+ }
297
+ llm_formats = {
298
+ "MISTRAL": MessagesFormatterType.MISTRAL,
299
+ "CHATML": MessagesFormatterType.CHATML,
300
+ "VICUNA": MessagesFormatterType.VICUNA,
301
+ "LLAMA 2": MessagesFormatterType.LLAMA_2,
302
+ "SYNTHIA": MessagesFormatterType.SYNTHIA,
303
+ "NEURAL CHAT": MessagesFormatterType.NEURAL_CHAT,
304
+ "SOLAR": MessagesFormatterType.SOLAR,
305
+ "OPEN CHAT": MessagesFormatterType.OPEN_CHAT,
306
+ "ALPACA": MessagesFormatterType.ALPACA,
307
+ "CODE DS": MessagesFormatterType.CODE_DS,
308
+ "B22": MessagesFormatterType.B22,
309
+ "LLAMA 3": MessagesFormatterType.LLAMA_3,
310
+ "PHI 3": MessagesFormatterType.PHI_3,
311
+ "Autocoder": MessagesFormatterType.AUTOCODER,
312
+ "DeepSeek Coder v2": MessagesFormatterType.DEEP_SEEK_CODER_2,
313
+ "Gemma 2": MessagesFormatterType.ALPACA,
314
+ "Qwen2": MessagesFormatterType.OPEN_CHAT,
315
+ }
316
+ # https://github.com/Maximilian-Winter/llama-cpp-agent
317
+ llm_languages = ["English", "Japanese", "Chinese"]
318
+ llm_models_tupled_list = []
319
+ default_llm_model_filename = list(llm_models.keys())[0]
320
+ override_llm_format = None
321
+
322
+
323
+ def to_list(s):
324
+ return [x.strip() for x in s.split(",") if not s == ""]
325
+
326
+
327
+ def list_uniq(l):
328
+ return sorted(set(l), key=l.index)
329
+
330
+
331
+ @wrapt_timeout_decorator.timeout(dec_timeout=3.5)
332
+ def to_list_ja(s):
333
+ import re
334
+ s = re.sub(r'[、。]', ',', s)
335
+ return [x.strip() for x in s.split(",") if not s == ""]
336
+
337
+
338
+ def is_japanese(s):
339
+ import unicodedata
340
+ for ch in s:
341
+ name = unicodedata.name(ch, "")
342
+ if "CJK UNIFIED" in name or "HIRAGANA" in name or "KATAKANA" in name:
343
+ return True
344
+ return False
345
+
346
+
347
+ def update_llm_model_tupled_list():
348
+ from pathlib import Path
349
+ global llm_models_tupled_list
350
+ llm_models_tupled_list = []
351
+ for k, v in llm_models.items():
352
+ name = k
353
+ value = k
354
+ llm_models_tupled_list.append((name, value))
355
+ model_files = Path(llm_models_dir).glob('*.gguf')
356
+ for path in model_files:
357
+ name = path.name
358
+ value = path.name
359
+ llm_models_tupled_list.append((name, value))
360
+ llm_models_tupled_list = list_uniq(llm_models_tupled_list)
361
+ return llm_models_tupled_list
362
+
363
+
364
+ def download_llm_models():
365
+ from huggingface_hub import hf_hub_download
366
+ global llm_models_tupled_list
367
+ llm_models_tupled_list = []
368
+ for k, v in llm_models.items():
369
+ try:
370
+ hf_hub_download(repo_id = v[0], filename = k, local_dir = llm_models_dir)
371
+ except Exception:
372
+ continue
373
+ name = k
374
+ value = k
375
+ llm_models_tupled_list.append((name, value))
376
+
377
+
378
+ def download_llm_model(filename):
379
+ from huggingface_hub import hf_hub_download
380
+ if not filename in llm_models.keys(): return default_llm_model_filename
381
+ try:
382
+ hf_hub_download(repo_id = llm_models[filename][0], filename = filename, local_dir = llm_models_dir)
383
+ except Exception:
384
+ return default_llm_model_filename
385
+ update_llm_model_tupled_list()
386
+ return filename
387
+
388
+
389
+ def get_dolphin_model_info(filename):
390
+ md = "None"
391
+ items = llm_models.get(filename, None)
392
+ if items:
393
+ md = f'Repo: [{items[0]}](https://huggingface.co/{items[0]})'
394
+ return md
395
+
396
+
397
+ def select_dolphin_model(filename, progress=gr.Progress(track_tqdm=True)):
398
+ global override_llm_format
399
+ override_llm_format = None
400
+ progress(0, desc="Loading model...")
401
+ value = download_llm_model(filename)
402
+ progress(1, desc="Model loaded.")
403
+ md = get_dolphin_model_info(filename)
404
+ return gr.update(value=value, choices=get_dolphin_models()), gr.update(value=get_dolphin_model_format(value)), gr.update(value=md)
405
+
406
+
407
+ def select_dolphin_format(format_name):
408
+ global override_llm_format
409
+ override_llm_format = llm_formats[format_name]
410
+ return gr.update(value=format_name)
411
+
412
+
413
+ #download_llm_models()
414
+ download_llm_model(default_llm_model_filename)
415
+
416
+
417
+ def get_dolphin_models():
418
+ return update_llm_model_tupled_list()
419
+
420
+
421
+ def get_llm_formats():
422
+ return list(llm_formats.keys())
423
+
424
+
425
+ def get_key_from_value(d, val):
426
+ keys = [k for k, v in d.items() if v == val]
427
+ if keys:
428
+ return keys[0]
429
+ return None
430
+
431
+
432
+ def get_dolphin_model_format(filename):
433
+ if not filename in llm_models.keys(): filename = default_llm_model_filename
434
+ format = llm_models[filename][1]
435
+ format_name = get_key_from_value(llm_formats, format)
436
+ return format_name
437
+
438
+
439
+ def add_dolphin_models(query, format_name):
440
+ import re
441
+ from huggingface_hub import HfApi
442
+ global llm_models
443
+ api = HfApi()
444
+ add_models = {}
445
+ format = llm_formats[format_name]
446
+ filename = ""
447
+ repo = ""
448
+ try:
449
+ s = list(re.findall(r'^(?:https?://huggingface.co/)?(.+?/.+?)(?:/.*/(.+?.gguf).*?)?$', query)[0])
450
+ if s and "" in s: s.remove("")
451
+ if len(s) == 1:
452
+ repo = s[0]
453
+ if not api.repo_exists(repo_id = repo): return gr.update(visible=True)
454
+ files = api.list_repo_files(repo_id = repo)
455
+ for file in files:
456
+ if str(file).endswith(".gguf"): add_models[filename] = [repo, format]
457
+ elif len(s) >= 2:
458
+ repo = s[0]
459
+ filename = s[1]
460
+ if not api.repo_exists(repo_id = repo) or not api.file_exists(repo_id = repo, filename = filename): return gr.update(visible=True)
461
+ add_models[filename] = [repo, format]
462
+ else: return gr.update(visible=True)
463
+ except Exception:
464
+ return gr.update(visible=True)
465
+ print(add_models)
466
+ llm_models = (llm_models | add_models).copy()
467
+ return gr.update(choices=get_dolphin_models())
468
+
469
+
470
+ dolphin_output_language = "English"
471
+ dolphin_sysprompt_mode = "Default"
472
+ dolphin_system_prompt = {"Default": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
473
+ The message must have the following [Tags] generated in strict accordance with the following [Rules]:
474
+ ```
475
+ [Tags]
476
+ - Words to describe full names of characters and names of series in which they appear.
477
+ - Words to describe names of the people there and their numbers, such as 2girls, 1boy.
478
+ - Words to describe their hair color, hairstyle, hair length, hair accessory, eye color, eye shape, facial expression, breast size, and clothing of them in detail, such as long hair.
479
+ - Words to describe their external features, ornaments and belongings (also specify colors, patterns, shapes) in detail.
480
+ - Words to describe their stance from head to toe in detail.
481
+ - Words to describe their acting, especially with sexual activity in detail.
482
+ - Words to describe their surroundings in detail.
483
+ - Words to describe background details, such as inside room, forest, starry sky.
484
+ [Rules]
485
+ - Any output should be plain text in English and don't use line breaks.
486
+ - Output only composed of Tags in 1 line, separated by commas with spaces between Tags, in lower case English.
487
+ - Output should be in the format: "//GENBEGIN//://1girl, Tag, Tag, ..., Tag//://GENEND//".
488
+ - Preferably refer to and describe the information obtained from Danbooru. If not, describe it in own way.
489
+ - It's preferable that each Tag is a plain phrase, word, caption, Danbooru tag, or E621 tag.
490
+ - Convert any nicknames to full names first.
491
+ - If a sexual theme is given, priority should be given to specific and rich descriptions of sexual activity, especially about genitals, fluids.
492
+ - Assemble a short story internally which is developed from the themes provided, then describe a scene into an detailed English sentences based on the central character internally.
493
+ - Split sentences into short phrases or words, and then convert them to Tags.
494
+ - Use associated Danbooru tags, E621 tags.
495
+ - Same Tags should be used only once per output.
496
+ - Anyway, keep processing until you've finished outputting a message.
497
+ ```
498
+ Based on these Rules, please tell me a message within 40 Tags that can generate an image for the following themes:''',
499
+ "Strictly on themes": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
500
+ The message must have the following [Tags] generated in strict accordance with the following [Rules]:
501
+ ```
502
+ [Tags]
503
+ - Words to describe full names of characters and names of series in which they appear.
504
+ - Words to describe names of the people there and their numbers, such as 2girls, 1boy.
505
+ - Words to describe their hair color, hairstyle, hair length, hair accessory, eye color, eye shape, facial expression, breast size, and clothing of them in detail, such as long hair.
506
+ - Words to describe their external features, ornaments and belongings (also specify colors, patterns, shapes) in detail.
507
+ - Words to describe their stance from head to toe in detail.
508
+ - Words to describe their acting, especially with sexual activity in detail.
509
+ - Words to describe their surroundings in detail.
510
+ - Words to describe background details, such as inside room, forest, starry sky.
511
+ [Rules]
512
+ - Any output should be plain text in English and don't use line breaks.
513
+ - Output only composed of Tags in 1 line, separated by commas with spaces between Tags, in lower case English.
514
+ - Output should be in the format: "//GENBEGIN//://1girl, Tag, Tag, ..., Tag//://GENEND//".
515
+ - Preferably refer to and describe the information obtained from Danbooru. If not, describe it in own way.
516
+ - It's preferable that each Tag is a plain phrase, word, caption, Danbooru tag, or E621 tag.
517
+ - Convert any nicknames to full names first.
518
+ - If a sexual theme is given, priority should be given to specific and rich descriptions of sexual activity, especially about genitals, fluids.
519
+ - Rewrite the given themes in plain English without changing the main idea.
520
+ - Split sentences into short phrases or words, and then convert them to Tags.
521
+ - Use associated Danbooru tags, E621 tags.
522
+ - Same Tags should be used only once per output.
523
+ - Anyway, keep processing until you've finished outputting a message.
524
+ ```
525
+ Based on these Rules, please tell me a message within 40 Tags that can generate an image for the following themes:''',
526
+ "With description": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
527
+ The message must have the following [Tags] generated in strict accordance with the following [Rules]:
528
+ ```
529
+ [Tags]
530
+ - Words to describe full names of characters and names of series in which they appear.
531
+ - Words to describe names of the people there and their numbers, such as 2girls, 1boy.
532
+ - Words to describe their hair color, hairstyle, hair length, hair accessory, eye color, eye shape, facial expression, breast size, and clothing of them in detail, such as long hair.
533
+ - Words to describe their external features, ornaments and belongings (also specify colors, patterns, shapes) in detail.
534
+ - Words to describe their stance from head to toe in detail.
535
+ - Words to describe their acting, especially with sexual activity in detail.
536
+ - Words to describe their surroundings in detail.
537
+ - Words to describe background details, such as inside room, forest, starry sky.
538
+ [Rules]
539
+ - Any Tags should be plain text in English and don't use line breaks.
540
+ - Message is only composed of Tags in 1 line, separated by commas with spaces between Tags, in lower case English.
541
+ - Message should be in the format: "//GENBEGIN//://1girl, Tag, Tag, ..., Tag//://GENEND//".
542
+ - Preferably refer to and describe the information obtained from Danbooru. If not, describe it in own way.
543
+ - It's preferable that each Tag is a plain phrase, word, caption, Danbooru tag, or E621 tag.
544
+ - Convert any nicknames to full names first.
545
+ - If a sexual theme is given, priority should be given to specific and rich descriptions of sexual activity, especially about genitals, fluids.
546
+ - Assemble a short story internally which is developed from the themes provided, then describe a scene into an detailed English sentences based on the central character internally.
547
+ - Split sentences into short phrases or words, and then convert them to Tags.
548
+ - Use associated Danbooru tags, E621 tags.
549
+ - Same Tags should be used only once per output.
550
+ - Anyway, keep processing until you've finished outputting a message.
551
+ ```
552
+ Based on these Rules, please tell me a message within 40 Tags that can generate an image for the following themes,
553
+ then describe the message you've generated in short, in <LANGUAGE>.:''',
554
+ "With dialogue and description": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
555
+ The message must have the following [Tags] generated in strict accordance with the following [Rules]:
556
+ ```
557
+ [Tags]
558
+ - Words to describe full names of characters and names of series in which they appear.
559
+ - Words to describe names of the people there and their numbers, such as 2girls, 1boy.
560
+ - Words to describe their hair color, hairstyle, hair length, hair accessory, eye color, eye shape, facial expression, breast size, and clothing of them in detail, such as long hair.
561
+ - Words to describe their external features, ornaments and belongings (also specify colors, patterns, shapes) in detail.
562
+ - Words to describe their stance from head to toe in detail.
563
+ - Words to describe their acting, especially with sexual activity in detail.
564
+ - Words to describe their surroundings in detail.
565
+ - Words to describe background details, such as inside room, forest, starry sky.
566
+ [Rules]
567
+ - Any Tags should be plain text in English and don't use line breaks.
568
+ - Message is only composed of Tags in 1 line, separated by commas with spaces between Tags, in lower case English.
569
+ - Message should be in the format: "//GENBEGIN//://1girl, Tag, Tag, ..., Tag//://GENEND//".
570
+ - Preferably refer to and describe the information obtained from Danbooru. If not, describe it in own way.
571
+ - It's preferable that each Tag is a plain phrase, word, caption, Danbooru tag, or E621 tag.
572
+ - Convert any nicknames to full names first.
573
+ - If a sexual theme is given, priority should be given to specific and rich descriptions of sexual activity, especially about genitals, fluids.
574
+ - Assemble a short story internally which is developed from the themes provided, then describe a scene into an detailed English sentences based on the central character internally.
575
+ - Split sentences into short phrases or words, and then convert them to Tags.
576
+ - Use associated Danbooru tags, E621 tags.
577
+ - Same Tags should be used only once per output.
578
+ - Anyway, keep processing until you've finished outputting a message.
579
+ ```
580
+ Based on these Rules, please tell me message within 40 Tags that can generate an image for the following themes,
581
+ then write the character's long actor's line composed of one's voices and moaning and voices in thought, based on the story you have assembled, in <LANGUAGE>,
582
+ enclosed in //VOICEBEGIN//:// and //://VOICEEND//, then describe the message you've generated in short, in <LANGUAGE>.:''',
583
+ "Longer prompt": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
584
+ The message must have the following [Tags] generated in strict accordance with the following [Rules]:
585
+ ```
586
+ [Tags]
587
+ - Words to describe full names of characters and names of series in which they appear.
588
+ - Words to describe names of the people there and their numbers, such as 2girls, 1boy.
589
+ - Words to describe their hair color, hairstyle, hair length, hair accessory, eye color, eye shape, facial expression, breast size, and clothing of them in detail.
590
+ - Words to describe their external features, ornaments and belongings (also specify colors, patterns, shapes) in detail.
591
+ - Words to describe their stance from head to toe in detail.
592
+ - Words to describe their acting, especially with sexual activity in detail.
593
+ - Words to describe their surroundings in detail.
594
+ - Words to describe background details.
595
+ [Rules]
596
+ - Any Tags should be plain text in English and don't use line breaks.
597
+ - Message is only composed of Tags in 1 line, separated by commas with spaces between Tags, in lower case English.
598
+ - Message should be enclosed in //GENBEGIN//:// and //://GENEND//.
599
+ - Convert any nicknames to full names first.
600
+ - If a sexual theme is given, priority should be given to specific and rich descriptions of sexual activity, especially about genitals, fluids.
601
+ - Assemble a short story internally which is developed from the themes provided, then describe a scene into detailed English text based on the central character internally.
602
+ - Tags can be in the form of sentences.
603
+ - You can also use Danbooru tags, E621 tags as Tags.
604
+ - Anyway, keep processing until you've finished outputting a message.
605
+ ```
606
+ Based on these Rules, please tell me a message within 40 Tags that can generate an image for the following themes:''',
607
+ "Longer prompt strictly on themes": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
608
+ The message must have the following [Tags] generated in strict accordance with the following [Rules]:
609
+ ```
610
+ [Tags]
611
+ - Words to describe full names of characters and names of series in which they appear.
612
+ - Words to describe names of the people there and their numbers, such as 2girls, 1boy.
613
+ - Words to describe their hair color, hairstyle, hair length, hair accessory, eye color, eye shape, facial expression, breast size, and clothing of them in detail.
614
+ - Words to describe their external features, ornaments and belongings (also specify colors, patterns, shapes) in detail.
615
+ - Words to describe their stance from head to toe in detail.
616
+ - Words to describe their acting, especially with sexual activity in detail.
617
+ - Words to describe their surroundings in detail.
618
+ - Words to describe background details.
619
+ [Rules]
620
+ - Any Tags should be plain text in English and don't use line breaks.
621
+ - Message is only composed of Tags in 1 line, separated by commas with spaces between Tags, in lower case English.
622
+ - Message should be enclosed in //GENBEGIN//:// and //://GENEND//.
623
+ - Convert any nicknames to full names first.
624
+ - If a sexual theme is given, priority should be given to specific and rich descriptions of sexual activity, especially about genitals, fluids.
625
+ - Rewrite the given themes in plain English without changing the main idea.
626
+ - Tags can be in the form of sentences.
627
+ - You can also use Danbooru tags, E621 tags as Tags.
628
+ - Anyway, keep processing until you've finished outputting a message.
629
+ ```
630
+ Based on these Rules, please tell me a message within 40 Tags that can generate an image for the following themes:''',
631
+ "Longer prompt with description": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
632
+ The message must have the following [Tags] generated in strict accordance with the following [Rules]:
633
+ ```
634
+ [Tags]
635
+ - Words to describe full names of characters and names of series in which they appear.
636
+ - Words to describe names of the people there and their numbers, such as 2girls, 1boy.
637
+ - Words to describe their hair color, hairstyle, hair length, hair accessory, eye color, eye shape, facial expression, breast size, and clothing of them in detail.
638
+ - Words to describe their external features, ornaments and belongings (also specify colors, patterns, shapes) in detail.
639
+ - Words to describe their stance from head to toe in detail.
640
+ - Words to describe their acting, especially with sexual activity in detail.
641
+ - Words to describe their surroundings in detail.
642
+ - Words to describe background details.
643
+ [Rules]
644
+ - Any Tags should be plain text in English and don't use line breaks.
645
+ - Message is only composed of Tags in 1 line, separated by commas with spaces between Tags, in lower case English.
646
+ - Message should be enclosed in //GENBEGIN//:// and //://GENEND//.
647
+ - Convert any nicknames to full names first.
648
+ - If a sexual theme is given, priority should be given to specific and rich descriptions of sexual activity, especially about genitals, fluids.
649
+ - Assemble a short story internally which is developed from the themes provided, then describe a scene into detailed English text based on the central character internally.
650
+ - Tags can be in the form of sentences.
651
+ - You can also use Danbooru tags, E621 tags as Tags.
652
+ - Anyway, keep processing until you've finished outputting a message.
653
+ ```
654
+ Based on these Rules, please tell me a message within 40 Tags that can generate an image for the following themes,
655
+ then describe the message you've generated in short, in <LANGUAGE>.:''',
656
+ "Japanese to Danbooru Dictionary": r"""You are a helpful AI assistant.
657
+ Extract Japanese words from the following sentences and output them separated by commas. Convert words in their original forms.
658
+ Output should be enclosed in //GENBEGIN//:// and //://GENEND//. The text to be given is as follows:""",
659
+ "Chat with LLM": r"You are a helpful AI assistant. Respond in <LANGUAGE>."}
660
+
661
+
662
+ def get_dolphin_sysprompt():
663
+ import re
664
+ prompt = re.sub('<LANGUAGE>', dolphin_output_language, dolphin_system_prompt.get(dolphin_sysprompt_mode, ""))
665
+ return prompt
666
+
667
+
668
+ def get_dolphin_sysprompt_mode():
669
+ return list(dolphin_system_prompt.keys())
670
+
671
+
672
+ def select_dolphin_sysprompt(key: str):
673
+ global dolphin_sysprompt_mode
674
+ if not key in dolphin_system_prompt.keys():
675
+ dolphin_sysprompt_mode = "Default"
676
+ else:
677
+ dolphin_sysprompt_mode = key
678
+ return gr.update(value=get_dolphin_sysprompt())
679
+
680
+
681
+ def get_dolphin_languages():
682
+ return llm_languages
683
+
684
+
685
+ def select_dolphin_language(lang: str):
686
+ global dolphin_output_language
687
+ dolphin_output_language = lang
688
+ return gr.update(value=get_dolphin_sysprompt())
689
+
690
+
691
+ @wrapt_timeout_decorator.timeout(dec_timeout=5.0)
692
+ def get_raw_prompt(msg: str):
693
+ import re
694
+ m = re.findall(r'/GENBEGIN/(.+?)/GENEND/', msg, re.DOTALL)
695
+ return re.sub(r'[*/:_"#\n]', ' ', ", ".join(m)).lower() if m else ""
696
+
697
+
698
+ @spaces.GPU(duration=60)
699
+ def dolphin_respond(
700
+ message: str,
701
+ history: list[tuple[str, str]],
702
+ model: str = default_llm_model_filename,
703
+ system_message: str = get_dolphin_sysprompt(),
704
+ max_tokens: int = 1024,
705
+ temperature: float = 0.7,
706
+ top_p: float = 0.95,
707
+ top_k: int = 40,
708
+ repeat_penalty: float = 1.1,
709
+ progress=gr.Progress(track_tqdm=True),
710
+ ):
711
+ from pathlib import Path
712
+ progress(0, desc="Processing...")
713
+
714
+ if override_llm_format:
715
+ chat_template = override_llm_format
716
+ else:
717
+ chat_template = llm_models[model][1]
718
+
719
+ llm = Llama(
720
+ model_path=str(Path(f"{llm_models_dir}/{model}")),
721
+ flash_attn=True,
722
+ n_gpu_layers=81, # 81
723
+ n_batch=1024,
724
+ n_ctx=8192, #8192
725
+ )
726
+ provider = LlamaCppPythonProvider(llm)
727
+
728
+ agent = LlamaCppAgent(
729
+ provider,
730
+ system_prompt=f"{system_message}",
731
+ predefined_messages_formatter_type=chat_template,
732
+ debug_output=False
733
+ )
734
+
735
+ settings = provider.get_provider_default_settings()
736
+ settings.temperature = temperature
737
+ settings.top_k = top_k
738
+ settings.top_p = top_p
739
+ settings.max_tokens = max_tokens
740
+ settings.repeat_penalty = repeat_penalty
741
+ settings.stream = True
742
+
743
+ messages = BasicChatHistory()
744
+
745
+ for msn in history:
746
+ user = {
747
+ 'role': Roles.user,
748
+ 'content': msn[0]
749
+ }
750
+ assistant = {
751
+ 'role': Roles.assistant,
752
+ 'content': msn[1]
753
+ }
754
+ messages.add_message(user)
755
+ messages.add_message(assistant)
756
+
757
+ stream = agent.get_chat_response(
758
+ message,
759
+ llm_sampling_settings=settings,
760
+ chat_history=messages,
761
+ returns_streaming_generator=True,
762
+ print_output=False
763
+ )
764
+
765
+ progress(0.5, desc="Processing...")
766
+
767
+ outputs = ""
768
+ for output in stream:
769
+ outputs += output
770
+ yield [(outputs, None)]
771
+
772
+
773
+ def dolphin_parse(
774
+ history: list[tuple[str, str]],
775
+ ):
776
+ if dolphin_sysprompt_mode == "Chat with LLM" or not history or len(history) < 1:
777
+ return "", gr.update(visible=True), gr.update(visible=True)
778
+ try:
779
+ msg = history[-1][0]
780
+ raw_prompt = get_raw_prompt(msg)
781
+ except Exception:
782
+ return "", gr.update(visible=True), gr.update(visible=True)
783
+ prompts = []
784
+ if dolphin_sysprompt_mode == "Japanese to Danbooru Dictionary" and is_japanese(raw_prompt):
785
+ prompts = list_uniq(jatags_to_danbooru_tags(to_list_ja(raw_prompt)) + ["nsfw", "explicit"])
786
+ else:
787
+ prompts = list_uniq(to_list(raw_prompt) + ["nsfw", "explicit"])
788
+ return ", ".join(prompts), gr.update(interactive=True), gr.update(interactive=True)
789
+
790
+
791
+ @spaces.GPU(duration=60)
792
+ def dolphin_respond_auto(
793
+ message: str,
794
+ history: list[tuple[str, str]],
795
+ model: str = default_llm_model_filename,
796
+ system_message: str = get_dolphin_sysprompt(),
797
+ max_tokens: int = 1024,
798
+ temperature: float = 0.7,
799
+ top_p: float = 0.95,
800
+ top_k: int = 40,
801
+ repeat_penalty: float = 1.1,
802
+ progress=gr.Progress(track_tqdm=True),
803
+ ):
804
+ #if not is_japanese(message): return [(None, None)]
805
+ from pathlib import Path
806
+ progress(0, desc="Processing...")
807
+
808
+ if override_llm_format:
809
+ chat_template = override_llm_format
810
+ else:
811
+ chat_template = llm_models[model][1]
812
+
813
+ llm = Llama(
814
+ model_path=str(Path(f"{llm_models_dir}/{model}")),
815
+ flash_attn=True,
816
+ n_gpu_layers=81, # 81
817
+ n_batch=1024,
818
+ n_ctx=8192, #8192
819
+ )
820
+ provider = LlamaCppPythonProvider(llm)
821
+
822
+ agent = LlamaCppAgent(
823
+ provider,
824
+ system_prompt=f"{system_message}",
825
+ predefined_messages_formatter_type=chat_template,
826
+ debug_output=False
827
+ )
828
+
829
+ settings = provider.get_provider_default_settings()
830
+ settings.temperature = temperature
831
+ settings.top_k = top_k
832
+ settings.top_p = top_p
833
+ settings.max_tokens = max_tokens
834
+ settings.repeat_penalty = repeat_penalty
835
+ settings.stream = True
836
+
837
+ messages = BasicChatHistory()
838
+
839
+ for msn in history:
840
+ user = {
841
+ 'role': Roles.user,
842
+ 'content': msn[0]
843
+ }
844
+ assistant = {
845
+ 'role': Roles.assistant,
846
+ 'content': msn[1]
847
+ }
848
+ messages.add_message(user)
849
+ messages.add_message(assistant)
850
+
851
+ progress(0, desc="Translating...")
852
+ stream = agent.get_chat_response(
853
+ message,
854
+ llm_sampling_settings=settings,
855
+ chat_history=messages,
856
+ returns_streaming_generator=True,
857
+ print_output=False
858
+ )
859
+
860
+ progress(0.5, desc="Processing...")
861
+
862
+ outputs = ""
863
+ for output in stream:
864
+ outputs += output
865
+ yield [(outputs, None)]
866
+
867
+
868
+ def dolphin_parse_simple(
869
+ message: str,
870
+ history: list[tuple[str, str]],
871
+ ):
872
+ #if not is_japanese(message): return message
873
+ if dolphin_sysprompt_mode == "Chat with LLM" or not history or len(history) < 1: return message
874
+ try:
875
+ msg = history[-1][0]
876
+ raw_prompt = get_raw_prompt(msg)
877
+ except Exception:
878
+ return ""
879
+ prompts = []
880
+ if dolphin_sysprompt_mode == "Japanese to Danbooru Dictionary" and is_japanese(raw_prompt):
881
+ prompts = list_uniq(jatags_to_danbooru_tags(to_list_ja(raw_prompt)) + ["nsfw", "explicit", "rating_explicit"])
882
+ else:
883
+ prompts = list_uniq(to_list(raw_prompt) + ["nsfw", "explicit", "rating_explicit"])
884
+ return ", ".join(prompts)
lora_dict.json ADDED
The diff for this file is too large to render. See raw diff
 
model_dict.json ADDED
The diff for this file is too large to render. See raw diff
 
modutils.py ADDED
@@ -0,0 +1,1224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import gradio as gr
3
+ from huggingface_hub import HfApi
4
+ import os
5
+ from pathlib import Path
6
+
7
+ from env import (
8
+ HF_LORA_PRIVATE_REPOS1,
9
+ HF_LORA_PRIVATE_REPOS2,
10
+ HF_MODEL_USER_EX,
11
+ HF_MODEL_USER_LIKES,
12
+ directory_loras,
13
+ hf_read_token,
14
+ hf_token,
15
+ CIVITAI_API_KEY,
16
+ )
17
+
18
+
19
+ def get_user_agent():
20
+ return 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
21
+
22
+
23
+ def to_list(s):
24
+ return [x.strip() for x in s.split(",") if not s == ""]
25
+
26
+
27
+ def list_uniq(l):
28
+ return sorted(set(l), key=l.index)
29
+
30
+
31
+ def list_sub(a, b):
32
+ return [e for e in a if e not in b]
33
+
34
+
35
+ def get_local_model_list(dir_path):
36
+ model_list = []
37
+ valid_extensions = ('.ckpt', '.pt', '.pth', '.safetensors', '.bin')
38
+ for file in Path(dir_path).glob("*"):
39
+ if file.suffix in valid_extensions:
40
+ file_path = str(Path(f"{dir_path}/{file.name}"))
41
+ model_list.append(file_path)
42
+ return model_list
43
+
44
+
45
+ def download_things(directory, url, hf_token="", civitai_api_key=""):
46
+ url = url.strip()
47
+
48
+ if "drive.google.com" in url:
49
+ original_dir = os.getcwd()
50
+ os.chdir(directory)
51
+ os.system(f"gdown --fuzzy {url}")
52
+ os.chdir(original_dir)
53
+ elif "huggingface.co" in url:
54
+ url = url.replace("?download=true", "")
55
+ # url = urllib.parse.quote(url, safe=':/') # fix encoding
56
+ if "/blob/" in url:
57
+ url = url.replace("/blob/", "/resolve/")
58
+ user_header = f'"Authorization: Bearer {hf_token}"'
59
+ if hf_token:
60
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
61
+ else:
62
+ os.system (f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
63
+ elif "civitai.com" in url:
64
+ if "?" in url:
65
+ url = url.split("?")[0]
66
+ if civitai_api_key:
67
+ url = url + f"?token={civitai_api_key}"
68
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
69
+ else:
70
+ print("\033[91mYou need an API key to download Civitai models.\033[0m")
71
+ else:
72
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
73
+
74
+
75
+ def escape_lora_basename(basename: str):
76
+ return basename.replace(".", "_").replace(" ", "_").replace(",", "")
77
+
78
+
79
+ def to_lora_key(path: str):
80
+ return escape_lora_basename(Path(path).stem)
81
+
82
+
83
+ def to_lora_path(key: str):
84
+ if Path(key).is_file(): return key
85
+ path = Path(f"{directory_loras}/{escape_lora_basename(key)}.safetensors")
86
+ return str(path)
87
+
88
+
89
+ def safe_float(input):
90
+ output = 1.0
91
+ try:
92
+ output = float(input)
93
+ except Exception:
94
+ output = 1.0
95
+ return output
96
+
97
+
98
+ def save_gallery_images(images, progress=gr.Progress(track_tqdm=True)):
99
+ from datetime import datetime, timezone, timedelta
100
+ progress(0, desc="Updating gallery...")
101
+ dt_now = datetime.now(timezone(timedelta(hours=9)))
102
+ basename = dt_now.strftime('%Y%m%d_%H%M%S_')
103
+ i = 1
104
+ if not images: return images
105
+ output_images = []
106
+ output_paths = []
107
+ for image in images:
108
+ filename = basename + str(i) + ".png"
109
+ i += 1
110
+ oldpath = Path(image[0])
111
+ newpath = oldpath
112
+ try:
113
+ if oldpath.exists():
114
+ newpath = oldpath.resolve().rename(Path(filename).resolve())
115
+ except Exception:
116
+ pass
117
+ finally:
118
+ output_paths.append(str(newpath))
119
+ output_images.append((str(newpath), str(filename)))
120
+ progress(1, desc="Gallery updated.")
121
+ return gr.update(value=output_images), gr.update(value=output_paths), gr.update(visible=True)
122
+
123
+
124
+ def download_private_repo(repo_id, dir_path, is_replace):
125
+ from huggingface_hub import snapshot_download
126
+ if not hf_read_token: return
127
+ try:
128
+ snapshot_download(repo_id=repo_id, local_dir=dir_path, allow_patterns=['*.ckpt', '*.pt', '*.pth', '*.safetensors', '*.bin'], use_auth_token=hf_read_token)
129
+ except Exception as e:
130
+ print(f"Error: Failed to download {repo_id}. ")
131
+ return
132
+ if is_replace:
133
+ for file in Path(dir_path).glob("*"):
134
+ if file.exists() and "." in file.stem or " " in file.stem and file.suffix in ['.ckpt', '.pt', '.pth', '.safetensors', '.bin']:
135
+ newpath = Path(f'{file.parent.name}/{escape_lora_basename(file.stem)}{file.suffix}')
136
+ file.resolve().rename(newpath.resolve())
137
+
138
+
139
+ private_model_path_repo_dict = {} # {"local filepath": "huggingface repo_id", ...}
140
+
141
+
142
+ def get_private_model_list(repo_id, dir_path):
143
+ global private_model_path_repo_dict
144
+ api = HfApi()
145
+ if not hf_read_token: return []
146
+ try:
147
+ files = api.list_repo_files(repo_id, token=hf_read_token)
148
+ except Exception as e:
149
+ print(f"Error: Failed to list {repo_id}. ")
150
+ return []
151
+ model_list = []
152
+ for file in files:
153
+ path = Path(f"{dir_path}/{file}")
154
+ if path.suffix in ['.ckpt', '.pt', '.pth', '.safetensors', '.bin']:
155
+ model_list.append(str(path))
156
+ for model in model_list:
157
+ private_model_path_repo_dict[model] = repo_id
158
+ return model_list
159
+
160
+
161
+ def download_private_file(repo_id, path, is_replace):
162
+ from huggingface_hub import hf_hub_download
163
+ file = Path(path)
164
+ newpath = Path(f'{file.parent.name}/{escape_lora_basename(file.stem)}{file.suffix}') if is_replace else file
165
+ if not hf_read_token or newpath.exists(): return
166
+ filename = file.name
167
+ dirname = file.parent.name
168
+ try:
169
+ hf_hub_download(repo_id=repo_id, filename=filename, local_dir=dirname, use_auth_token=hf_read_token)
170
+ except Exception as e:
171
+ print(f"Error: Failed to download {filename}. ")
172
+ return
173
+ if is_replace:
174
+ file.resolve().rename(newpath.resolve())
175
+
176
+
177
+ def download_private_file_from_somewhere(path, is_replace):
178
+ if not path in private_model_path_repo_dict.keys(): return
179
+ repo_id = private_model_path_repo_dict.get(path, None)
180
+ download_private_file(repo_id, path, is_replace)
181
+
182
+
183
+ model_id_list = []
184
+ def get_model_id_list():
185
+ global model_id_list
186
+ if len(model_id_list) != 0: return model_id_list
187
+ api = HfApi()
188
+ model_ids = []
189
+ try:
190
+ models_likes = []
191
+ for author in HF_MODEL_USER_LIKES:
192
+ models_likes.extend(api.list_models(author=author, cardData=True, sort="likes"))
193
+ models_ex = []
194
+ for author in HF_MODEL_USER_EX:
195
+ models_ex = api.list_models(author=author, cardData=True, sort="last_modified")
196
+ except Exception as e:
197
+ print(f"Error: Failed to list {author}'s models. ")
198
+ return model_ids
199
+ for model in models_likes:
200
+ model_ids.append(model.id) if not model.private else ""
201
+ anime_models = []
202
+ real_models = []
203
+ for model in models_ex:
204
+ if not model.private:
205
+ anime_models.append(model.id) if 'anime' in model.tags else real_models.append(model.id)
206
+ model_ids.extend(anime_models)
207
+ model_ids.extend(real_models)
208
+ model_id_list = model_ids.copy()
209
+ return model_ids
210
+
211
+
212
+ model_id_list = get_model_id_list()
213
+
214
+
215
+ def get_t2i_model_info(repo_id: str):
216
+ api = HfApi()
217
+ try:
218
+ if " " in repo_id or not api.repo_exists(repo_id): return ""
219
+ model = api.model_info(repo_id=repo_id)
220
+ except Exception as e:
221
+ print(f"Error: Failed to get {repo_id}'s info. ")
222
+ return ""
223
+ if model.private or model.gated: return ""
224
+ tags = model.tags
225
+ info = []
226
+ url = f"https://huggingface.co/{repo_id}/"
227
+ if not 'diffusers' in tags: return ""
228
+ if 'diffusers:StableDiffusionXLPipeline' in tags:
229
+ info.append("SDXL")
230
+ elif 'diffusers:StableDiffusionPipeline' in tags:
231
+ info.append("SD1.5")
232
+ if model.card_data and model.card_data.tags:
233
+ info.extend(list_sub(model.card_data.tags, ['text-to-image', 'stable-diffusion', 'stable-diffusion-api', 'safetensors', 'stable-diffusion-xl']))
234
+ info.append(f"DLs: {model.downloads}")
235
+ info.append(f"likes: {model.likes}")
236
+ info.append(model.last_modified.strftime("lastmod: %Y-%m-%d"))
237
+ md = f"Model Info: {', '.join(info)}, [Model Repo]({url})"
238
+ return gr.update(value=md)
239
+
240
+
241
+ def get_tupled_model_list(model_list):
242
+ if not model_list: return []
243
+ tupled_list = []
244
+ for repo_id in model_list:
245
+ api = HfApi()
246
+ try:
247
+ if not api.repo_exists(repo_id): continue
248
+ model = api.model_info(repo_id=repo_id)
249
+ except Exception as e:
250
+ continue
251
+ if model.private or model.gated: continue
252
+ tags = model.tags
253
+ info = []
254
+ if not 'diffusers' in tags: continue
255
+ if 'diffusers:StableDiffusionXLPipeline' in tags:
256
+ info.append("SDXL")
257
+ elif 'diffusers:StableDiffusionPipeline' in tags:
258
+ info.append("SD1.5")
259
+ if model.card_data and model.card_data.tags:
260
+ info.extend(list_sub(model.card_data.tags, ['text-to-image', 'stable-diffusion', 'stable-diffusion-api', 'safetensors', 'stable-diffusion-xl']))
261
+ if "pony" in info:
262
+ info.remove("pony")
263
+ name = f"{repo_id} (Pony🐴, {', '.join(info)})"
264
+ else:
265
+ name = f"{repo_id} ({', '.join(info)})"
266
+ tupled_list.append((name, repo_id))
267
+ return tupled_list
268
+
269
+
270
+ private_lora_dict = {}
271
+ try:
272
+ with open('lora_dict.json', encoding='utf-8') as f:
273
+ d = json.load(f)
274
+ for k, v in d.items():
275
+ private_lora_dict[escape_lora_basename(k)] = v
276
+ except Exception:
277
+ pass
278
+ loras_dict = {"None": ["", "", "", "", ""], "": ["", "", "", "", ""]} | private_lora_dict.copy()
279
+ civitai_not_exists_list = []
280
+ loras_url_to_path_dict = {} # {"URL to download": "local filepath", ...}
281
+ civitai_lora_last_results = {} # {"URL to download": {search results}, ...}
282
+ all_lora_list = []
283
+
284
+
285
+ private_lora_model_list = []
286
+ def get_private_lora_model_lists():
287
+ global private_lora_model_list
288
+ if len(private_lora_model_list) != 0: return private_lora_model_list
289
+ models1 = []
290
+ models2 = []
291
+ for repo in HF_LORA_PRIVATE_REPOS1:
292
+ models1.extend(get_private_model_list(repo, directory_loras))
293
+ for repo in HF_LORA_PRIVATE_REPOS2:
294
+ models2.extend(get_private_model_list(repo, directory_loras))
295
+ models = list_uniq(models1 + sorted(models2))
296
+ private_lora_model_list = models.copy()
297
+ return models
298
+
299
+
300
+ private_lora_model_list = get_private_lora_model_lists()
301
+
302
+
303
+ def get_civitai_info(path):
304
+ global civitai_not_exists_list
305
+ import requests
306
+ from urllib3.util import Retry
307
+ from requests.adapters import HTTPAdapter
308
+ if path in set(civitai_not_exists_list): return ["", "", "", "", ""]
309
+ if not Path(path).exists(): return None
310
+ user_agent = get_user_agent()
311
+ headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
312
+ base_url = 'https://civitai.com/api/v1/model-versions/by-hash/'
313
+ params = {}
314
+ session = requests.Session()
315
+ retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
316
+ session.mount("https://", HTTPAdapter(max_retries=retries))
317
+ import hashlib
318
+ with open(path, 'rb') as file:
319
+ file_data = file.read()
320
+ hash_sha256 = hashlib.sha256(file_data).hexdigest()
321
+ url = base_url + hash_sha256
322
+ try:
323
+ r = session.get(url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
324
+ except Exception as e:
325
+ return ["", "", "", "", ""]
326
+ if not r.ok: return None
327
+ json = r.json()
328
+ if not 'baseModel' in json:
329
+ civitai_not_exists_list.append(path)
330
+ return ["", "", "", "", ""]
331
+ items = []
332
+ items.append(" / ".join(json['trainedWords']))
333
+ items.append(json['baseModel'])
334
+ items.append(json['model']['name'])
335
+ items.append(f"https://civitai.com/models/{json['modelId']}")
336
+ items.append(json['images'][0]['url'])
337
+ return items
338
+
339
+
340
+ def get_lora_model_list():
341
+ loras = list_uniq(get_private_lora_model_lists() + get_local_model_list(directory_loras))
342
+ loras.insert(0, "None")
343
+ loras.insert(0, "")
344
+ return loras
345
+
346
+
347
+ def get_all_lora_list():
348
+ global all_lora_list
349
+ loras = get_lora_model_list()
350
+ all_lora_list = loras.copy()
351
+ return loras
352
+
353
+
354
+ def get_all_lora_tupled_list():
355
+ global loras_dict
356
+ models = get_all_lora_list()
357
+ if not models: return []
358
+ tupled_list = []
359
+ for model in models:
360
+ #if not model: continue # to avoid GUI-related bug
361
+ basename = Path(model).stem
362
+ key = to_lora_key(model)
363
+ items = None
364
+ if key in loras_dict.keys():
365
+ items = loras_dict.get(key, None)
366
+ else:
367
+ items = get_civitai_info(model)
368
+ if items != None:
369
+ loras_dict[key] = items
370
+ name = basename
371
+ value = model
372
+ if items and items[2] != "":
373
+ if items[1] == "Pony":
374
+ name = f"{basename} (for {items[1]}🐴, {items[2]})"
375
+ else:
376
+ name = f"{basename} (for {items[1]}, {items[2]})"
377
+ tupled_list.append((name, value))
378
+ return tupled_list
379
+
380
+
381
+ def update_lora_dict(path):
382
+ global loras_dict
383
+ key = escape_lora_basename(Path(path).stem)
384
+ if key in loras_dict.keys(): return
385
+ items = get_civitai_info(path)
386
+ if items == None: return
387
+ loras_dict[key] = items
388
+
389
+
390
+ def download_lora(dl_urls: str):
391
+ global loras_url_to_path_dict
392
+ dl_path = ""
393
+ before = get_local_model_list(directory_loras)
394
+ urls = []
395
+ for url in [url.strip() for url in dl_urls.split(',')]:
396
+ local_path = f"{directory_loras}/{url.split('/')[-1]}"
397
+ if not Path(local_path).exists():
398
+ download_things(directory_loras, url, hf_token, CIVITAI_API_KEY)
399
+ urls.append(url)
400
+ after = get_local_model_list(directory_loras)
401
+ new_files = list_sub(after, before)
402
+ i = 0
403
+ for file in new_files:
404
+ path = Path(file)
405
+ if path.exists():
406
+ new_path = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
407
+ path.resolve().rename(new_path.resolve())
408
+ loras_url_to_path_dict[urls[i]] = str(new_path)
409
+ update_lora_dict(str(new_path))
410
+ dl_path = str(new_path)
411
+ i += 1
412
+ return dl_path
413
+
414
+
415
+ def copy_lora(path: str, new_path: str):
416
+ import shutil
417
+ if path == new_path: return new_path
418
+ cpath = Path(path)
419
+ npath = Path(new_path)
420
+ if cpath.exists():
421
+ try:
422
+ shutil.copy(str(cpath.resolve()), str(npath.resolve()))
423
+ except Exception:
424
+ return None
425
+ update_lora_dict(str(npath))
426
+ return new_path
427
+ else:
428
+ return None
429
+
430
+
431
+ def download_my_lora(dl_urls: str, lora1: str, lora2: str, lora3: str, lora4: str, lora5: str):
432
+ path = download_lora(dl_urls)
433
+ if path:
434
+ if not lora1 or lora1 == "None":
435
+ lora1 = path
436
+ elif not lora2 or lora2 == "None":
437
+ lora2 = path
438
+ elif not lora3 or lora3 == "None":
439
+ lora3 = path
440
+ elif not lora4 or lora4 == "None":
441
+ lora4 = path
442
+ elif not lora5 or lora5 == "None":
443
+ lora5 = path
444
+ choices = get_all_lora_tupled_list()
445
+ return gr.update(value=lora1, choices=choices), gr.update(value=lora2, choices=choices), gr.update(value=lora3, choices=choices),\
446
+ gr.update(value=lora4, choices=choices), gr.update(value=lora5, choices=choices)
447
+
448
+
449
+ def get_valid_lora_name(query: str):
450
+ path = "None"
451
+ if not query or query == "None": return "None"
452
+ if to_lora_key(query) in loras_dict.keys(): return query
453
+ if query in loras_url_to_path_dict.keys():
454
+ path = loras_url_to_path_dict[query]
455
+ else:
456
+ path = to_lora_path(query.strip().split('/')[-1])
457
+ if Path(path).exists():
458
+ return path
459
+ elif "http" in query:
460
+ dl_file = download_lora(query)
461
+ if dl_file and Path(dl_file).exists(): return dl_file
462
+ else:
463
+ dl_file = find_similar_lora(query)
464
+ if dl_file and Path(dl_file).exists(): return dl_file
465
+ return "None"
466
+
467
+
468
+ def get_valid_lora_path(query: str):
469
+ path = None
470
+ if not query or query == "None": return None
471
+ if to_lora_key(query) in loras_dict.keys(): return query
472
+ if Path(path).exists():
473
+ return path
474
+ else:
475
+ return None
476
+
477
+
478
+ def get_valid_lora_wt(prompt: str, lora_path: str, lora_wt: float):
479
+ import re
480
+ wt = lora_wt
481
+ result = re.findall(f'<lora:{to_lora_key(lora_path)}:(.+?)>', prompt)
482
+ if not result: return wt
483
+ wt = safe_float(result[0][0])
484
+ return wt
485
+
486
+
487
+ def set_prompt_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
488
+ import re
489
+ if not "Classic" in str(prompt_syntax): return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
490
+ lora1 = get_valid_lora_name(lora1)
491
+ lora2 = get_valid_lora_name(lora2)
492
+ lora3 = get_valid_lora_name(lora3)
493
+ lora4 = get_valid_lora_name(lora4)
494
+ lora5 = get_valid_lora_name(lora5)
495
+ if not "<lora" in prompt: return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
496
+ lora1_wt = get_valid_lora_wt(prompt, lora1, lora1_wt)
497
+ lora2_wt = get_valid_lora_wt(prompt, lora2, lora2_wt)
498
+ lora3_wt = get_valid_lora_wt(prompt, lora3, lora3_wt)
499
+ lora4_wt = get_valid_lora_wt(prompt, lora4, lora4_wt)
500
+ lora5_wt = get_valid_lora_wt(prompt, lora5, lora5_wt)
501
+ on1, label1, tag1, md1 = get_lora_info(lora1)
502
+ on2, label2, tag2, md2 = get_lora_info(lora2)
503
+ on3, label3, tag3, md3 = get_lora_info(lora3)
504
+ on4, label4, tag4, md4 = get_lora_info(lora4)
505
+ on5, label5, tag5, md5 = get_lora_info(lora5)
506
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
507
+ prompts = prompt.split(",") if prompt else []
508
+ for p in prompts:
509
+ p = str(p).strip()
510
+ if "<lora" in p:
511
+ result = re.findall(r'<lora:(.+?):(.+?)>', p)
512
+ if not result: continue
513
+ key = result[0][0]
514
+ wt = result[0][1]
515
+ path = to_lora_path(key)
516
+ if not key in loras_dict.keys() or not path:
517
+ path = get_valid_lora_name(path)
518
+ if not path or path == "None": continue
519
+ if path in lora_paths:
520
+ continue
521
+ elif not on1:
522
+ lora1 = path
523
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
524
+ lora1_wt = safe_float(wt)
525
+ on1 = True
526
+ elif not on2:
527
+ lora2 = path
528
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
529
+ lora2_wt = safe_float(wt)
530
+ on2 = True
531
+ elif not on3:
532
+ lora3 = path
533
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
534
+ lora3_wt = safe_float(wt)
535
+ on3 = True
536
+ elif not on4:
537
+ lora4 = path
538
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
539
+ lora4_wt = safe_float(wt)
540
+ on4, label4, tag4, md4 = get_lora_info(lora4)
541
+ elif not on5:
542
+ lora5 = path
543
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
544
+ lora5_wt = safe_float(wt)
545
+ on5 = True
546
+ return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
547
+
548
+
549
+ def get_lora_info(lora_path: str):
550
+ is_valid = False
551
+ tag = ""
552
+ label = ""
553
+ md = "None"
554
+ if not lora_path or lora_path == "None":
555
+ print("LoRA file not found.")
556
+ return is_valid, label, tag, md
557
+ path = Path(lora_path)
558
+ new_path = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
559
+ if not to_lora_key(str(new_path)) in loras_dict.keys() and str(path) not in set(get_all_lora_list()):
560
+ print("LoRA file is not registered.")
561
+ return tag, label, tag, md
562
+ if not new_path.exists():
563
+ download_private_file_from_somewhere(str(path), True)
564
+ basename = new_path.stem
565
+ label = f'Name: {basename}'
566
+ items = loras_dict.get(basename, None)
567
+ if items == None:
568
+ items = get_civitai_info(str(new_path))
569
+ if items != None:
570
+ loras_dict[basename] = items
571
+ if items and items[2] != "":
572
+ tag = items[0]
573
+ label = f'Name: {basename}'
574
+ if items[1] == "Pony":
575
+ label = f'Name: {basename} (for Pony🐴)'
576
+ if items[4]:
577
+ md = f'<img src="{items[4]}" alt="thumbnail" width="150" height="240"><br>[LoRA Model URL]({items[3]})'
578
+ elif items[3]:
579
+ md = f'[LoRA Model URL]({items[3]})'
580
+ is_valid = True
581
+ return is_valid, label, tag, md
582
+
583
+
584
+ def normalize_prompt_list(tags: list[str]):
585
+ prompts = []
586
+ for tag in tags:
587
+ tag = str(tag).strip()
588
+ if tag:
589
+ prompts.append(tag)
590
+ return prompts
591
+
592
+
593
+ def apply_lora_prompt(prompt: str = "", lora_info: str = ""):
594
+ if lora_info == "None": return gr.update(value=prompt)
595
+ tags = prompt.split(",") if prompt else []
596
+ prompts = normalize_prompt_list(tags)
597
+
598
+ lora_tag = lora_info.replace("/",",")
599
+ lora_tags = lora_tag.split(",") if str(lora_info) != "None" else []
600
+ lora_prompts = normalize_prompt_list(lora_tags)
601
+
602
+ empty = [""]
603
+ prompt = ", ".join(list_uniq(prompts + lora_prompts) + empty)
604
+ return gr.update(value=prompt)
605
+
606
+
607
+ def update_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
608
+ import re
609
+ on1, label1, tag1, md1 = get_lora_info(lora1)
610
+ on2, label2, tag2, md2 = get_lora_info(lora2)
611
+ on3, label3, tag3, md3 = get_lora_info(lora3)
612
+ on4, label4, tag4, md4 = get_lora_info(lora4)
613
+ on5, label5, tag5, md5 = get_lora_info(lora5)
614
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
615
+
616
+ output_prompt = prompt
617
+ if "Classic" in str(prompt_syntax):
618
+ prompts = prompt.split(",") if prompt else []
619
+ output_prompts = []
620
+ for p in prompts:
621
+ p = str(p).strip()
622
+ if "<lora" in p:
623
+ result = re.findall(r'<lora:(.+?):(.+?)>', p)
624
+ if not result: continue
625
+ key = result[0][0]
626
+ wt = result[0][1]
627
+ path = to_lora_path(key)
628
+ if not key in loras_dict.keys() or not path: continue
629
+ if path in lora_paths:
630
+ output_prompts.append(f"<lora:{to_lora_key(path)}:{safe_float(wt):.2f}>")
631
+ elif p:
632
+ output_prompts.append(p)
633
+ lora_prompts = []
634
+ if on1: lora_prompts.append(f"<lora:{to_lora_key(lora1)}:{lora1_wt:.2f}>")
635
+ if on2: lora_prompts.append(f"<lora:{to_lora_key(lora2)}:{lora2_wt:.2f}>")
636
+ if on3: lora_prompts.append(f"<lora:{to_lora_key(lora3)}:{lora3_wt:.2f}>")
637
+ if on4: lora_prompts.append(f"<lora:{to_lora_key(lora4)}:{lora4_wt:.2f}>")
638
+ if on5: lora_prompts.append(f"<lora:{to_lora_key(lora5)}:{lora5_wt:.2f}>")
639
+ output_prompt = ", ".join(list_uniq(output_prompts + lora_prompts + [""]))
640
+ choices = get_all_lora_tupled_list()
641
+
642
+ return gr.update(value=output_prompt), gr.update(value=lora1, choices=choices), gr.update(value=lora1_wt),\
643
+ gr.update(value=tag1, label=label1, visible=on1), gr.update(visible=on1), gr.update(value=md1, visible=on1),\
644
+ gr.update(value=lora2, choices=choices), gr.update(value=lora2_wt),\
645
+ gr.update(value=tag2, label=label2, visible=on2), gr.update(visible=on2), gr.update(value=md2, visible=on2),\
646
+ gr.update(value=lora3, choices=choices), gr.update(value=lora3_wt),\
647
+ gr.update(value=tag3, label=label3, visible=on3), gr.update(visible=on3), gr.update(value=md3, visible=on3),\
648
+ gr.update(value=lora4, choices=choices), gr.update(value=lora4_wt),\
649
+ gr.update(value=tag4, label=label4, visible=on4), gr.update(visible=on4), gr.update(value=md4, visible=on4),\
650
+ gr.update(value=lora5, choices=choices), gr.update(value=lora5_wt),\
651
+ gr.update(value=tag5, label=label5, visible=on5), gr.update(visible=on5), gr.update(value=md5, visible=on5)
652
+
653
+
654
+ def get_my_lora(link_url):
655
+ from pathlib import Path
656
+ before = get_local_model_list(directory_loras)
657
+ for url in [url.strip() for url in link_url.split(',')]:
658
+ if not Path(f"{directory_loras}/{url.split('/')[-1]}").exists():
659
+ download_things(directory_loras, url, hf_token, CIVITAI_API_KEY)
660
+ after = get_local_model_list(directory_loras)
661
+ new_files = list_sub(after, before)
662
+ for file in new_files:
663
+ path = Path(file)
664
+ if path.exists():
665
+ new_path = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
666
+ path.resolve().rename(new_path.resolve())
667
+ update_lora_dict(str(new_path))
668
+ new_lora_model_list = get_lora_model_list()
669
+ new_lora_tupled_list = get_all_lora_tupled_list()
670
+
671
+ return gr.update(
672
+ choices=new_lora_tupled_list, value=new_lora_model_list[-1]
673
+ ), gr.update(
674
+ choices=new_lora_tupled_list
675
+ ), gr.update(
676
+ choices=new_lora_tupled_list
677
+ ), gr.update(
678
+ choices=new_lora_tupled_list
679
+ ), gr.update(
680
+ choices=new_lora_tupled_list
681
+ )
682
+
683
+
684
+ def upload_file_lora(files, progress=gr.Progress(track_tqdm=True)):
685
+ progress(0, desc="Uploading...")
686
+ file_paths = [file.name for file in files]
687
+ progress(1, desc="Uploaded.")
688
+ return gr.update(value=file_paths, visible=True), gr.update(visible=True)
689
+
690
+
691
+ def move_file_lora(filepaths):
692
+ import shutil
693
+ for file in filepaths:
694
+ path = Path(shutil.move(Path(file).resolve(), Path(f"./{directory_loras}").resolve()))
695
+ newpath = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
696
+ path.resolve().rename(newpath.resolve())
697
+ update_lora_dict(str(newpath))
698
+
699
+ new_lora_model_list = get_lora_model_list()
700
+ new_lora_tupled_list = get_all_lora_tupled_list()
701
+
702
+ return gr.update(
703
+ choices=new_lora_tupled_list, value=new_lora_model_list[-1]
704
+ ), gr.update(
705
+ choices=new_lora_tupled_list
706
+ ), gr.update(
707
+ choices=new_lora_tupled_list
708
+ ), gr.update(
709
+ choices=new_lora_tupled_list
710
+ ), gr.update(
711
+ choices=new_lora_tupled_list
712
+ )
713
+
714
+
715
+ def get_civitai_info(path):
716
+ global civitai_not_exists_list
717
+ global loras_url_to_path_dict
718
+ import requests
719
+ from requests.adapters import HTTPAdapter
720
+ from urllib3.util import Retry
721
+ default = ["", "", "", "", ""]
722
+ if path in set(civitai_not_exists_list): return default
723
+ if not Path(path).exists(): return None
724
+ user_agent = get_user_agent()
725
+ headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
726
+ base_url = 'https://civitai.com/api/v1/model-versions/by-hash/'
727
+ params = {}
728
+ session = requests.Session()
729
+ retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
730
+ session.mount("https://", HTTPAdapter(max_retries=retries))
731
+ import hashlib
732
+ with open(path, 'rb') as file:
733
+ file_data = file.read()
734
+ hash_sha256 = hashlib.sha256(file_data).hexdigest()
735
+ url = base_url + hash_sha256
736
+ try:
737
+ r = session.get(url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
738
+ except Exception as e:
739
+ print(e)
740
+ return default
741
+ else:
742
+ if not r.ok: return None
743
+ json = r.json()
744
+ if 'baseModel' not in json:
745
+ civitai_not_exists_list.append(path)
746
+ return default
747
+ items = []
748
+ items.append(" / ".join(json['trainedWords'])) # The words (prompts) used to trigger the model
749
+ items.append(json['baseModel']) # Base model (SDXL1.0, Pony, ...)
750
+ items.append(json['model']['name']) # The name of the model version
751
+ items.append(f"https://civitai.com/models/{json['modelId']}") # The repo url for the model
752
+ items.append(json['images'][0]['url']) # The url for a sample image
753
+ loras_url_to_path_dict[path] = json['downloadUrl'] # The download url to get the model file for this specific version
754
+ return items
755
+
756
+
757
+ def search_lora_on_civitai(query: str, allow_model: list[str] = ["Pony", "SDXL 1.0"], limit: int = 100):
758
+ import requests
759
+ from requests.adapters import HTTPAdapter
760
+ from urllib3.util import Retry
761
+ if not query: return None
762
+ user_agent = get_user_agent()
763
+ headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
764
+ base_url = 'https://civitai.com/api/v1/models'
765
+ params = {'query': query, 'types': ['LORA'], 'sort': 'Highest Rated', 'period': 'AllTime',
766
+ 'nsfw': 'true', 'supportsGeneration ': 'true', 'limit': limit}
767
+ session = requests.Session()
768
+ retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
769
+ session.mount("https://", HTTPAdapter(max_retries=retries))
770
+ try:
771
+ r = session.get(base_url, params=params, headers=headers, stream=True, timeout=(3.0, 30))
772
+ except Exception as e:
773
+ print(e)
774
+ return None
775
+ else:
776
+ if not r.ok: return None
777
+ json = r.json()
778
+ if 'items' not in json: return None
779
+ items = []
780
+ for j in json['items']:
781
+ for model in j['modelVersions']:
782
+ item = {}
783
+ if model['baseModel'] not in set(allow_model): continue
784
+ item['name'] = j['name']
785
+ item['creator'] = j['creator']['username']
786
+ item['tags'] = j['tags']
787
+ item['model_name'] = model['name']
788
+ item['base_model'] = model['baseModel']
789
+ item['dl_url'] = model['downloadUrl']
790
+ item['md'] = f'<img src="{model["images"][0]["url"]}" alt="thumbnail" width="150" height="240"><br>[LoRA Model URL](https://civitai.com/models/{j["id"]})'
791
+ items.append(item)
792
+ return items
793
+
794
+
795
+ def search_civitai_lora(query, base_model):
796
+ global civitai_lora_last_results
797
+ items = search_lora_on_civitai(query, base_model)
798
+ if not items: return gr.update(choices=[("", "")], value="", visible=False),\
799
+ gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True)
800
+ civitai_lora_last_results = {}
801
+ choices = []
802
+ for item in items:
803
+ base_model_name = "Pony🐴" if item['base_model'] == "Pony" else item['base_model']
804
+ name = f"{item['name']} (for {base_model_name} / By: {item['creator']} / Tags: {', '.join(item['tags'])})"
805
+ value = item['dl_url']
806
+ choices.append((name, value))
807
+ civitai_lora_last_results[value] = item
808
+ if not choices: return gr.update(choices=[("", "")], value="", visible=False),\
809
+ gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True)
810
+ result = civitai_lora_last_results.get(choices[0][1], "None")
811
+ md = result['md'] if result else ""
812
+ return gr.update(choices=choices, value=choices[0][1], visible=True), gr.update(value=md, visible=True),\
813
+ gr.update(visible=True), gr.update(visible=True)
814
+
815
+
816
+ def select_civitai_lora(search_result):
817
+ if not "http" in search_result: return gr.update(value=""), gr.update(value="None", visible=True)
818
+ result = civitai_lora_last_results.get(search_result, "None")
819
+ md = result['md'] if result else ""
820
+ return gr.update(value=search_result), gr.update(value=md, visible=True)
821
+
822
+
823
+ def find_similar_lora(q: str):
824
+ from rapidfuzz.process import extractOne
825
+ from rapidfuzz.utils import default_process
826
+ query = to_lora_key(q)
827
+ print(f"Finding <lora:{query}:...>...")
828
+ keys = list(private_lora_dict.keys())
829
+ values = [x[2] for x in list(private_lora_dict.values())]
830
+ s = default_process(query)
831
+ e1 = extractOne(s, keys + values, processor=default_process, score_cutoff=80.0)
832
+ key = ""
833
+ if e1:
834
+ e = e1[0]
835
+ if e in set(keys): key = e
836
+ elif e in set(values): key = keys[values.index(e)]
837
+ if key:
838
+ path = to_lora_path(key)
839
+ new_path = to_lora_path(query)
840
+ if not Path(path).exists():
841
+ if not Path(new_path).exists(): download_private_file_from_somewhere(path, True)
842
+ if Path(path).exists() and copy_lora(path, new_path): return new_path
843
+ print(f"Finding <lora:{query}:...> on Civitai...")
844
+ civitai_query = Path(query).stem if Path(query).is_file() else query
845
+ civitai_query = civitai_query.replace("_", " ").replace("-", " ")
846
+ base_model = ["Pony", "SDXL 1.0"]
847
+ items = search_lora_on_civitai(civitai_query, base_model, 1)
848
+ if items:
849
+ item = items[0]
850
+ path = download_lora(item['dl_url'])
851
+ new_path = query if Path(query).is_file() else to_lora_path(query)
852
+ if path and copy_lora(path, new_path): return new_path
853
+ return None
854
+
855
+
856
+ def change_interface_mode(mode: str):
857
+ if mode == "Fast":
858
+ return gr.update(open=False), gr.update(visible=True), gr.update(open=False), gr.update(open=False),\
859
+ gr.update(visible=True), gr.update(open=False), gr.update(visible=True), gr.update(open=False),\
860
+ gr.update(visible=True), gr.update(value="Fast")
861
+ elif mode == "Simple": # t2i mode
862
+ return gr.update(open=True), gr.update(visible=True), gr.update(open=False), gr.update(open=False),\
863
+ gr.update(visible=True), gr.update(open=False), gr.update(visible=False), gr.update(open=True),\
864
+ gr.update(visible=False), gr.update(value="Standard")
865
+ elif mode == "LoRA": # t2i LoRA mode
866
+ return gr.update(open=True), gr.update(visible=True), gr.update(open=True), gr.update(open=False),\
867
+ gr.update(visible=True), gr.update(open=True), gr.update(visible=True), gr.update(open=False),\
868
+ gr.update(visible=False), gr.update(value="Standard")
869
+ else: # Standard
870
+ return gr.update(open=False), gr.update(visible=True), gr.update(open=False), gr.update(open=False),\
871
+ gr.update(visible=True), gr.update(open=False), gr.update(visible=True), gr.update(open=False),\
872
+ gr.update(visible=True), gr.update(value="Standard")
873
+
874
+
875
+ quality_prompt_list = [
876
+ {
877
+ "name": "None",
878
+ "prompt": "",
879
+ "negative_prompt": "lowres",
880
+ },
881
+ {
882
+ "name": "Animagine Common",
883
+ "prompt": "anime artwork, anime style, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres",
884
+ "negative_prompt": "lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
885
+ },
886
+ {
887
+ "name": "Pony Anime Common",
888
+ "prompt": "source_anime, score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres",
889
+ "negative_prompt": "source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends",
890
+ },
891
+ {
892
+ "name": "Pony Common",
893
+ "prompt": "source_anime, score_9, score_8_up, score_7_up",
894
+ "negative_prompt": "source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends",
895
+ },
896
+ {
897
+ "name": "Animagine Standard v3.0",
898
+ "prompt": "masterpiece, best quality",
899
+ "negative_prompt": "lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name",
900
+ },
901
+ {
902
+ "name": "Animagine Standard v3.1",
903
+ "prompt": "masterpiece, best quality, very aesthetic, absurdres",
904
+ "negative_prompt": "lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
905
+ },
906
+ {
907
+ "name": "Animagine Light v3.1",
908
+ "prompt": "(masterpiece), best quality, very aesthetic, perfect face",
909
+ "negative_prompt": "(low quality, worst quality:1.2), very displeasing, 3d, watermark, signature, ugly, poorly drawn",
910
+ },
911
+ {
912
+ "name": "Animagine Heavy v3.1",
913
+ "prompt": "(masterpiece), (best quality), (ultra-detailed), very aesthetic, illustration, disheveled hair, perfect composition, moist skin, intricate details",
914
+ "negative_prompt": "longbody, lowres, bad anatomy, bad hands, missing fingers, pubic hair, extra digit, fewer digits, cropped, worst quality, low quality, very displeasing",
915
+ },
916
+ ]
917
+
918
+
919
+ style_list = [
920
+ {
921
+ "name": "None",
922
+ "prompt": "",
923
+ "negative_prompt": "",
924
+ },
925
+ {
926
+ "name": "Cinematic",
927
+ "prompt": "cinematic still, emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
928
+ "negative_prompt": "cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
929
+ },
930
+ {
931
+ "name": "Photographic",
932
+ "prompt": "cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed",
933
+ "negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly",
934
+ },
935
+ {
936
+ "name": "Anime",
937
+ "prompt": "anime artwork, anime style, vibrant, studio anime, highly detailed",
938
+ "negative_prompt": "photo, deformed, black and white, realism, disfigured, low contrast",
939
+ },
940
+ {
941
+ "name": "Manga",
942
+ "prompt": "manga style, vibrant, high-energy, detailed, iconic, Japanese comic style",
943
+ "negative_prompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, Western comic style",
944
+ },
945
+ {
946
+ "name": "Digital Art",
947
+ "prompt": "concept art, digital artwork, illustrative, painterly, matte painting, highly detailed",
948
+ "negative_prompt": "photo, photorealistic, realism, ugly",
949
+ },
950
+ {
951
+ "name": "Pixel art",
952
+ "prompt": "pixel-art, low-res, blocky, pixel art style, 8-bit graphics",
953
+ "negative_prompt": "sloppy, messy, blurry, noisy, highly detailed, ultra textured, photo, realistic",
954
+ },
955
+ {
956
+ "name": "Fantasy art",
957
+ "prompt": "ethereal fantasy concept art, magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy",
958
+ "negative_prompt": "photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white",
959
+ },
960
+ {
961
+ "name": "Neonpunk",
962
+ "prompt": "neonpunk style, cyberpunk, vaporwave, neon, vibes, vibrant, stunningly beautiful, crisp, detailed, sleek, ultramodern, magenta highlights, dark purple shadows, high contrast, cinematic, ultra detailed, intricate, professional",
963
+ "negative_prompt": "painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured",
964
+ },
965
+ {
966
+ "name": "3D Model",
967
+ "prompt": "professional 3d model, octane render, highly detailed, volumetric, dramatic lighting",
968
+ "negative_prompt": "ugly, deformed, noisy, low poly, blurry, painting",
969
+ },
970
+ ]
971
+
972
+
973
+ optimization_list = {
974
+ "None": [28, 7., 'Euler a', False, 'None', 1.],
975
+ "Default": [28, 7., 'Euler a', False, 'None', 1.],
976
+ "SPO": [28, 7., 'Euler a', True, 'loras/spo_sdxl_10ep_4k-data_lora_diffusers.safetensors', 1.],
977
+ "DPO": [28, 7., 'Euler a', True, 'loras/sdxl-DPO-LoRA.safetensors', 1.],
978
+ "DPO Turbo": [8, 2.5, 'LCM', True, 'loras/sd_xl_dpo_turbo_lora_v1-128dim.safetensors', 1.],
979
+ "SDXL Turbo": [8, 2.5, 'LCM', True, 'loras/sd_xl_turbo_lora_v1.safetensors', 1.],
980
+ "Hyper-SDXL 12step": [12, 5., 'TCD', True, 'loras/Hyper-SDXL-12steps-CFG-lora.safetensors', 1.],
981
+ "Hyper-SDXL 8step": [8, 5., 'TCD', True, 'loras/Hyper-SDXL-8steps-CFG-lora.safetensors', 1.],
982
+ "Hyper-SDXL 4step": [4, 0, 'TCD', True, 'loras/Hyper-SDXL-4steps-lora.safetensors', 1.],
983
+ "Hyper-SDXL 2step": [2, 0, 'TCD', True, 'loras/Hyper-SDXL-2steps-lora.safetensors', 1.],
984
+ "Hyper-SDXL 1step": [1, 0, 'TCD', True, 'loras/Hyper-SDXL-1steps-lora.safetensors', 1.],
985
+ "PCM 16step": [16, 4., 'Euler a trailing', True, 'loras/pcm_sdxl_normalcfg_16step_converted.safetensors', 1.],
986
+ "PCM 8step": [8, 4., 'Euler a trailing', True, 'loras/pcm_sdxl_normalcfg_8step_converted.safetensors', 1.],
987
+ "PCM 4step": [4, 2., 'Euler a trailing', True, 'loras/pcm_sdxl_smallcfg_4step_converted.safetensors', 1.],
988
+ "PCM 2step": [2, 1., 'Euler a trailing', True, 'loras/pcm_sdxl_smallcfg_2step_converted.safetensors', 1.],
989
+ }
990
+
991
+
992
+ def set_optimization(opt, steps_gui, cfg_gui, sampler_gui, clip_skip_gui, lora_gui, lora_scale_gui):
993
+ if not opt in list(optimization_list.keys()): opt = "None"
994
+ def_steps_gui = 28
995
+ def_cfg_gui = 7.
996
+ steps = optimization_list.get(opt, "None")[0]
997
+ cfg = optimization_list.get(opt, "None")[1]
998
+ sampler = optimization_list.get(opt, "None")[2]
999
+ clip_skip = optimization_list.get(opt, "None")[3]
1000
+ lora = optimization_list.get(opt, "None")[4]
1001
+ lora_scale = optimization_list.get(opt, "None")[5]
1002
+ if opt == "None":
1003
+ steps = max(steps_gui, def_steps_gui)
1004
+ cfg = max(cfg_gui, def_cfg_gui)
1005
+ clip_skip = clip_skip_gui
1006
+ elif opt == "SPO" or opt == "DPO":
1007
+ steps = max(steps_gui, def_steps_gui)
1008
+ cfg = max(cfg_gui, def_cfg_gui)
1009
+
1010
+ return gr.update(value=steps), gr.update(value=cfg), gr.update(value=sampler),\
1011
+ gr.update(value=clip_skip), gr.update(value=lora), gr.update(value=lora_scale),
1012
+
1013
+
1014
+ # [sampler_gui, steps_gui, cfg_gui, clip_skip_gui, img_width_gui, img_height_gui, optimization_gui]
1015
+ preset_sampler_setting = {
1016
+ "None": ["Euler a", 28, 7., True, 1024, 1024, "None"],
1017
+ "Anime 3:4 Fast": ["LCM", 8, 2.5, True, 896, 1152, "DPO Turbo"],
1018
+ "Anime 3:4 Standard": ["Euler a", 28, 7., True, 896, 1152, "None"],
1019
+ "Anime 3:4 Heavy": ["Euler a", 40, 7., True, 896, 1152, "None"],
1020
+ "Anime 1:1 Fast": ["LCM", 8, 2.5, True, 1024, 1024, "DPO Turbo"],
1021
+ "Anime 1:1 Standard": ["Euler a", 28, 7., True, 1024, 1024, "None"],
1022
+ "Anime 1:1 Heavy": ["Euler a", 40, 7., True, 1024, 1024, "None"],
1023
+ "Photo 3:4 Fast": ["LCM", 8, 2.5, False, 896, 1152, "DPO Turbo"],
1024
+ "Photo 3:4 Standard": ["DPM++ 2M Karras", 28, 7., False, 896, 1152, "None"],
1025
+ "Photo 3:4 Heavy": ["DPM++ 2M Karras", 40, 7., False, 896, 1152, "None"],
1026
+ "Photo 1:1 Fast": ["LCM", 8, 2.5, False, 1024, 1024, "DPO Turbo"],
1027
+ "Photo 1:1 Standard": ["DPM++ 2M Karras", 28, 7., False, 1024, 1024, "None"],
1028
+ "Photo 1:1 Heavy": ["DPM++ 2M Karras", 40, 7., False, 1024, 1024, "None"],
1029
+ }
1030
+
1031
+
1032
+ def set_sampler_settings(sampler_setting):
1033
+ if not sampler_setting in list(preset_sampler_setting.keys()) or sampler_setting == "None":
1034
+ return gr.update(value="Euler a"), gr.update(value=28), gr.update(value=7.), gr.update(value=True),\
1035
+ gr.update(value=1024), gr.update(value=1024), gr.update(value="None")
1036
+ v = preset_sampler_setting.get(sampler_setting, ["Euler a", 28, 7., True, 1024, 1024])
1037
+ # sampler, steps, cfg, clip_skip, width, height, optimization
1038
+ return gr.update(value=v[0]), gr.update(value=v[1]), gr.update(value=v[2]), gr.update(value=v[3]),\
1039
+ gr.update(value=v[4]), gr.update(value=v[5]), gr.update(value=v[6])
1040
+
1041
+
1042
+ preset_styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
1043
+ preset_quality = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in quality_prompt_list}
1044
+
1045
+
1046
+ def process_style_prompt(prompt: str, neg_prompt: str, styles_key: str = "None", quality_key: str = "None", type: str = "Auto"):
1047
+ def to_list(s):
1048
+ return [x.strip() for x in s.split(",") if not s == ""]
1049
+
1050
+ def list_sub(a, b):
1051
+ return [e for e in a if e not in b]
1052
+
1053
+ def list_uniq(l):
1054
+ return sorted(set(l), key=l.index)
1055
+
1056
+ animagine_ps = to_list("anime artwork, anime style, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres")
1057
+ animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
1058
+ pony_ps = to_list("source_anime, score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
1059
+ pony_nps = to_list("source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
1060
+ prompts = to_list(prompt)
1061
+ neg_prompts = to_list(neg_prompt)
1062
+
1063
+ all_styles_ps = []
1064
+ all_styles_nps = []
1065
+ for d in style_list:
1066
+ all_styles_ps.extend(to_list(str(d.get("prompt", ""))))
1067
+ all_styles_nps.extend(to_list(str(d.get("negative_prompt", ""))))
1068
+
1069
+ all_quality_ps = []
1070
+ all_quality_nps = []
1071
+ for d in quality_prompt_list:
1072
+ all_quality_ps.extend(to_list(str(d.get("prompt", ""))))
1073
+ all_quality_nps.extend(to_list(str(d.get("negative_prompt", ""))))
1074
+
1075
+ quality_ps = to_list(preset_quality[quality_key][0])
1076
+ quality_nps = to_list(preset_quality[quality_key][1])
1077
+ styles_ps = to_list(preset_styles[styles_key][0])
1078
+ styles_nps = to_list(preset_styles[styles_key][1])
1079
+
1080
+ prompts = list_sub(prompts, animagine_ps + pony_ps + all_styles_ps + all_quality_ps)
1081
+ neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps + all_styles_nps + all_quality_nps)
1082
+
1083
+ last_empty_p = [""] if not prompts and type != "None" and type != "Auto" and styles_key != "None" and quality_key != "None" else []
1084
+ last_empty_np = [""] if not neg_prompts and type != "None" and type != "Auto" and styles_key != "None" and quality_key != "None" else []
1085
+
1086
+ if type == "Animagine":
1087
+ prompts = prompts + animagine_ps
1088
+ neg_prompts = neg_prompts + animagine_nps
1089
+ elif type == "Pony":
1090
+ prompts = prompts + pony_ps
1091
+ neg_prompts = neg_prompts + pony_nps
1092
+
1093
+ prompts = prompts + styles_ps + quality_ps
1094
+ neg_prompts = neg_prompts + styles_nps + quality_nps
1095
+
1096
+ prompt = ", ".join(list_uniq(prompts) + last_empty_p)
1097
+ neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
1098
+
1099
+ return gr.update(value=prompt), gr.update(value=neg_prompt), gr.update(value=type)
1100
+
1101
+
1102
+ def set_quick_presets(genre:str = "None", type:str = "Auto", speed:str = "None", aspect:str = "None"):
1103
+ quality = "None"
1104
+ style = "None"
1105
+ sampler = "None"
1106
+ opt = "None"
1107
+
1108
+ if genre == "Anime":
1109
+ if type != "None" and type != "Auto": style = "Anime"
1110
+ if aspect == "1:1":
1111
+ if speed == "Heavy":
1112
+ sampler = "Anime 1:1 Heavy"
1113
+ elif speed == "Fast":
1114
+ sampler = "Anime 1:1 Fast"
1115
+ else:
1116
+ sampler = "Anime 1:1 Standard"
1117
+ elif aspect == "3:4":
1118
+ if speed == "Heavy":
1119
+ sampler = "Anime 3:4 Heavy"
1120
+ elif speed == "Fast":
1121
+ sampler = "Anime 3:4 Fast"
1122
+ else:
1123
+ sampler = "Anime 3:4 Standard"
1124
+ if type == "Pony":
1125
+ quality = "Pony Anime Common"
1126
+ elif type == "Animagine":
1127
+ quality = "Animagine Common"
1128
+ else:
1129
+ quality = "None"
1130
+ elif genre == "Photo":
1131
+ if type != "None" and type != "Auto": style = "Photographic"
1132
+ if aspect == "1:1":
1133
+ if speed == "Heavy":
1134
+ sampler = "Photo 1:1 Heavy"
1135
+ elif speed == "Fast":
1136
+ sampler = "Photo 1:1 Fast"
1137
+ else:
1138
+ sampler = "Photo 1:1 Standard"
1139
+ elif aspect == "3:4":
1140
+ if speed == "Heavy":
1141
+ sampler = "Photo 3:4 Heavy"
1142
+ elif speed == "Fast":
1143
+ sampler = "Photo 3:4 Fast"
1144
+ else:
1145
+ sampler = "Photo 3:4 Standard"
1146
+ if type == "Pony":
1147
+ quality = "Pony Common"
1148
+ else:
1149
+ quality = "None"
1150
+
1151
+ if speed == "Fast":
1152
+ opt = "DPO Turbo"
1153
+ if genre == "Anime" and type != "Pony" and type != "Auto": quality = "Animagine Light v3.1"
1154
+
1155
+ return gr.update(value=quality), gr.update(value=style), gr.update(value=sampler), gr.update(value=opt), gr.update(value=type)
1156
+
1157
+
1158
+ textual_inversion_dict = {}
1159
+ try:
1160
+ with open('textual_inversion_dict.json', encoding='utf-8') as f:
1161
+ textual_inversion_dict = json.load(f)
1162
+ except Exception:
1163
+ pass
1164
+ textual_inversion_file_token_list = []
1165
+
1166
+
1167
+ def get_tupled_embed_list(embed_list):
1168
+ global textual_inversion_file_list
1169
+ tupled_list = []
1170
+ for file in embed_list:
1171
+ token = textual_inversion_dict.get(Path(file).name, [Path(file).stem.replace(",",""), False])[0]
1172
+ tupled_list.append((token, file))
1173
+ textual_inversion_file_token_list.append(token)
1174
+ return tupled_list
1175
+
1176
+
1177
+ def set_textual_inversion_prompt(textual_inversion_gui, prompt_gui, neg_prompt_gui, prompt_syntax_gui):
1178
+ ti_tags = list(textual_inversion_dict.values()) + textual_inversion_file_token_list
1179
+ tags = prompt_gui.split(",") if prompt_gui else []
1180
+ prompts = []
1181
+ for tag in tags:
1182
+ tag = str(tag).strip()
1183
+ if tag and not tag in ti_tags:
1184
+ prompts.append(tag)
1185
+ ntags = neg_prompt_gui.split(",") if neg_prompt_gui else []
1186
+ neg_prompts = []
1187
+ for tag in ntags:
1188
+ tag = str(tag).strip()
1189
+ if tag and not tag in ti_tags:
1190
+ neg_prompts.append(tag)
1191
+ ti_prompts = []
1192
+ ti_neg_prompts = []
1193
+ for ti in textual_inversion_gui:
1194
+ tokens = textual_inversion_dict.get(Path(ti).name, [Path(ti).stem.replace(",",""), False])
1195
+ is_positive = tokens[1] == True or "positive" in Path(ti).parent.name
1196
+ if is_positive: # positive prompt
1197
+ ti_prompts.append(tokens[0])
1198
+ else: # negative prompt (default)
1199
+ ti_neg_prompts.append(tokens[0])
1200
+ empty = [""]
1201
+ prompt = ", ".join(prompts + ti_prompts + empty)
1202
+ neg_prompt = ", ".join(neg_prompts + ti_neg_prompts + empty)
1203
+ return gr.update(value=prompt), gr.update(value=neg_prompt),
1204
+
1205
+
1206
+ def get_model_pipeline(repo_id: str):
1207
+ from huggingface_hub import HfApi
1208
+ api = HfApi()
1209
+ default = "StableDiffusionPipeline"
1210
+ try:
1211
+ if " " in repo_id or not api.repo_exists(repo_id): return default
1212
+ model = api.model_info(repo_id=repo_id)
1213
+ except Exception as e:
1214
+ return default
1215
+ if model.private or model.gated: return default
1216
+ tags = model.tags
1217
+ if not 'diffusers' in tags: return default
1218
+ if 'diffusers:StableDiffusionXLPipeline' in tags:
1219
+ return "StableDiffusionXLPipeline"
1220
+ elif 'diffusers:StableDiffusionPipeline' in tags:
1221
+ return "StableDiffusionPipeline"
1222
+ else:
1223
+ return default
1224
+
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ git-lfs aria2 -y ffmpeg
pre-requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ pip>=23.0.0
requirements.txt ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ git+https://github.com/R3gm/stablepy.git@dev2
2
+ torch==2.2.0
3
+ gdown
4
+ opencv-python
5
+ pytorch-lightning
6
+ torchvision
7
+ accelerate
8
+ transformers
9
+ optimum[onnxruntime]
10
+ spaces
11
+ dartrs
12
+ huggingface_hub
13
+ httpx==0.13.3
14
+ httpcore
15
+ googletrans==4.0.0rc1
16
+ timm
17
+ rapidfuzz
tagger/character_series_dict.csv ADDED
The diff for this file is too large to render. See raw diff
 
tagger/danbooru_e621.csv ADDED
The diff for this file is too large to render. See raw diff
 
tagger/fl2sd3longcap.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoProcessor, AutoModelForCausalLM
2
+ import spaces
3
+ import re
4
+ from PIL import Image
5
+ import torch
6
+
7
+ import subprocess
8
+ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
9
+
10
+ device = "cuda" if torch.cuda.is_available() else "cpu"
11
+ fl_model = AutoModelForCausalLM.from_pretrained('gokaygokay/Florence-2-SD3-Captioner', trust_remote_code=True).to("cpu").eval()
12
+ fl_processor = AutoProcessor.from_pretrained('gokaygokay/Florence-2-SD3-Captioner', trust_remote_code=True)
13
+
14
+
15
+ def fl_modify_caption(caption: str) -> str:
16
+ """
17
+ Removes specific prefixes from captions if present, otherwise returns the original caption.
18
+ Args:
19
+ caption (str): A string containing a caption.
20
+ Returns:
21
+ str: The caption with the prefix removed if it was present, or the original caption.
22
+ """
23
+ # Define the prefixes to remove
24
+ prefix_substrings = [
25
+ ('captured from ', ''),
26
+ ('captured at ', '')
27
+ ]
28
+
29
+ # Create a regex pattern to match any of the prefixes
30
+ pattern = '|'.join([re.escape(opening) for opening, _ in prefix_substrings])
31
+ replacers = {opening.lower(): replacer for opening, replacer in prefix_substrings}
32
+
33
+ # Function to replace matched prefix with its corresponding replacement
34
+ def replace_fn(match):
35
+ return replacers[match.group(0).lower()]
36
+
37
+ # Apply the regex to the caption
38
+ modified_caption = re.sub(pattern, replace_fn, caption, count=1, flags=re.IGNORECASE)
39
+
40
+ # If the caption was modified, return the modified version; otherwise, return the original
41
+ return modified_caption if modified_caption != caption else caption
42
+
43
+
44
+ @spaces.GPU(duration=30)
45
+ def fl_run_example(image):
46
+ task_prompt = "<DESCRIPTION>"
47
+ prompt = task_prompt + "Describe this image in great detail."
48
+
49
+ # Ensure the image is in RGB mode
50
+ if image.mode != "RGB":
51
+ image = image.convert("RGB")
52
+
53
+ fl_model.to(device)
54
+ inputs = fl_processor(text=prompt, images=image, return_tensors="pt").to(device)
55
+ generated_ids = fl_model.generate(
56
+ input_ids=inputs["input_ids"],
57
+ pixel_values=inputs["pixel_values"],
58
+ max_new_tokens=1024,
59
+ num_beams=3
60
+ )
61
+ fl_model.to("cpu")
62
+ generated_text = fl_processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
63
+ parsed_answer = fl_processor.post_process_generation(generated_text, task=task_prompt, image_size=(image.width, image.height))
64
+ return fl_modify_caption(parsed_answer["<DESCRIPTION>"])
65
+
66
+
67
+ def predict_tags_fl2_sd3(image: Image.Image, input_tags: str, algo: list[str]):
68
+ def to_list(s):
69
+ return [x.strip() for x in s.split(",") if not s == ""]
70
+
71
+ def list_uniq(l):
72
+ return sorted(set(l), key=l.index)
73
+
74
+ if not "Use Florence-2-SD3-Long-Captioner" in algo:
75
+ return input_tags
76
+ tag_list = list_uniq(to_list(input_tags) + to_list(fl_run_example(image) + ", "))
77
+ tag_list.remove("")
78
+ return ", ".join(tag_list)
tagger/output.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+
3
+
4
+ @dataclass
5
+ class UpsamplingOutput:
6
+ upsampled_tags: str
7
+
8
+ copyright_tags: str
9
+ character_tags: str
10
+ general_tags: str
11
+ rating_tag: str
12
+ aspect_ratio_tag: str
13
+ length_tag: str
14
+ identity_tag: str
15
+
16
+ elapsed_time: float = 0.0
tagger/tag_group.csv ADDED
The diff for this file is too large to render. See raw diff
 
tagger/tagger.py ADDED
@@ -0,0 +1,556 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import torch
3
+ import gradio as gr
4
+ import spaces
5
+ from transformers import (
6
+ AutoImageProcessor,
7
+ AutoModelForImageClassification,
8
+ )
9
+ from pathlib import Path
10
+
11
+
12
+ WD_MODEL_NAMES = ["p1atdev/wd-swinv2-tagger-v3-hf"]
13
+ WD_MODEL_NAME = WD_MODEL_NAMES[0]
14
+
15
+ device = "cuda" if torch.cuda.is_available() else "cpu"
16
+ default_device = device
17
+
18
+ try:
19
+ wd_model = AutoModelForImageClassification.from_pretrained(WD_MODEL_NAME, trust_remote_code=True).to(default_device).eval()
20
+ wd_processor = AutoImageProcessor.from_pretrained(WD_MODEL_NAME, trust_remote_code=True)
21
+ except Exception as e:
22
+ print(e)
23
+ wd_model = wd_processor = None
24
+
25
+ def _people_tag(noun: str, minimum: int = 1, maximum: int = 5):
26
+ return (
27
+ [f"1{noun}"]
28
+ + [f"{num}{noun}s" for num in range(minimum + 1, maximum + 1)]
29
+ + [f"{maximum+1}+{noun}s"]
30
+ )
31
+
32
+
33
+ PEOPLE_TAGS = (
34
+ _people_tag("girl") + _people_tag("boy") + _people_tag("other") + ["no humans"]
35
+ )
36
+
37
+
38
+ RATING_MAP = {
39
+ "sfw": "safe",
40
+ "general": "safe",
41
+ "sensitive": "sensitive",
42
+ "questionable": "nsfw",
43
+ "explicit": "explicit, nsfw",
44
+ }
45
+ DANBOORU_TO_E621_RATING_MAP = {
46
+ "sfw": "rating_safe",
47
+ "general": "rating_safe",
48
+ "safe": "rating_safe",
49
+ "sensitive": "rating_safe",
50
+ "nsfw": "rating_explicit",
51
+ "explicit, nsfw": "rating_explicit",
52
+ "explicit": "rating_explicit",
53
+ "rating:safe": "rating_safe",
54
+ "rating:general": "rating_safe",
55
+ "rating:sensitive": "rating_safe",
56
+ "rating:questionable, nsfw": "rating_explicit",
57
+ "rating:explicit, nsfw": "rating_explicit",
58
+ }
59
+
60
+
61
+ # https://github.com/toriato/stable-diffusion-webui-wd14-tagger/blob/a9eacb1eff904552d3012babfa28b57e1d3e295c/tagger/ui.py#L368
62
+ kaomojis = [
63
+ "0_0",
64
+ "(o)_(o)",
65
+ "+_+",
66
+ "+_-",
67
+ "._.",
68
+ "<o>_<o>",
69
+ "<|>_<|>",
70
+ "=_=",
71
+ ">_<",
72
+ "3_3",
73
+ "6_9",
74
+ ">_o",
75
+ "@_@",
76
+ "^_^",
77
+ "o_o",
78
+ "u_u",
79
+ "x_x",
80
+ "|_|",
81
+ "||_||",
82
+ ]
83
+
84
+
85
+ def replace_underline(x: str):
86
+ return x.strip().replace("_", " ") if x not in kaomojis else x.strip()
87
+
88
+
89
+ def to_list(s):
90
+ return [x.strip() for x in s.split(",") if not s == ""]
91
+
92
+
93
+ def list_sub(a, b):
94
+ return [e for e in a if e not in b]
95
+
96
+
97
+ def list_uniq(l):
98
+ return sorted(set(l), key=l.index)
99
+
100
+
101
+ def load_dict_from_csv(filename):
102
+ dict = {}
103
+ if not Path(filename).exists():
104
+ if Path('./tagger/', filename).exists(): filename = str(Path('./tagger/', filename))
105
+ else: return dict
106
+ try:
107
+ with open(filename, 'r', encoding="utf-8") as f:
108
+ lines = f.readlines()
109
+ except Exception:
110
+ print(f"Failed to open dictionary file: {filename}")
111
+ return dict
112
+ for line in lines:
113
+ parts = line.strip().split(',')
114
+ dict[parts[0]] = parts[1]
115
+ return dict
116
+
117
+
118
+ anime_series_dict = load_dict_from_csv('character_series_dict.csv')
119
+
120
+
121
+ def character_list_to_series_list(character_list):
122
+ output_series_tag = []
123
+ series_tag = ""
124
+ series_dict = anime_series_dict
125
+ for tag in character_list:
126
+ series_tag = series_dict.get(tag, "")
127
+ if tag.endswith(")"):
128
+ tags = tag.split("(")
129
+ character_tag = "(".join(tags[:-1])
130
+ if character_tag.endswith(" "):
131
+ character_tag = character_tag[:-1]
132
+ series_tag = tags[-1].replace(")", "")
133
+
134
+ if series_tag:
135
+ output_series_tag.append(series_tag)
136
+
137
+ return output_series_tag
138
+
139
+
140
+ def select_random_character(series: str, character: str):
141
+ from random import seed, randrange
142
+ seed()
143
+ character_list = list(anime_series_dict.keys())
144
+ character = character_list[randrange(len(character_list) - 1)]
145
+ series = anime_series_dict.get(character.split(",")[0].strip(), "")
146
+ return series, character
147
+
148
+
149
+ def danbooru_to_e621(dtag, e621_dict):
150
+ def d_to_e(match, e621_dict):
151
+ dtag = match.group(0)
152
+ etag = e621_dict.get(replace_underline(dtag), "")
153
+ if etag:
154
+ return etag
155
+ else:
156
+ return dtag
157
+
158
+ import re
159
+ tag = re.sub(r'[\w ]+', lambda wrapper: d_to_e(wrapper, e621_dict), dtag, 2)
160
+ return tag
161
+
162
+
163
+ danbooru_to_e621_dict = load_dict_from_csv('danbooru_e621.csv')
164
+
165
+
166
+ def convert_danbooru_to_e621_prompt(input_prompt: str = "", prompt_type: str = "danbooru"):
167
+ if prompt_type == "danbooru": return input_prompt
168
+ tags = input_prompt.split(",") if input_prompt else []
169
+ people_tags: list[str] = []
170
+ other_tags: list[str] = []
171
+ rating_tags: list[str] = []
172
+
173
+ e621_dict = danbooru_to_e621_dict
174
+ for tag in tags:
175
+ tag = replace_underline(tag)
176
+ tag = danbooru_to_e621(tag, e621_dict)
177
+ if tag in PEOPLE_TAGS:
178
+ people_tags.append(tag)
179
+ elif tag in DANBOORU_TO_E621_RATING_MAP.keys():
180
+ rating_tags.append(DANBOORU_TO_E621_RATING_MAP.get(tag.replace(" ",""), ""))
181
+ else:
182
+ other_tags.append(tag)
183
+
184
+ rating_tags = sorted(set(rating_tags), key=rating_tags.index)
185
+ rating_tags = [rating_tags[0]] if rating_tags else []
186
+ rating_tags = ["explicit, nsfw"] if rating_tags and rating_tags[0] == "explicit" else rating_tags
187
+
188
+ output_prompt = ", ".join(people_tags + other_tags + rating_tags)
189
+
190
+ return output_prompt
191
+
192
+
193
+ def translate_prompt(prompt: str = ""):
194
+ def translate_to_english(prompt):
195
+ import httpcore
196
+ setattr(httpcore, 'SyncHTTPTransport', 'AsyncHTTPProxy')
197
+ from googletrans import Translator
198
+ translator = Translator()
199
+ try:
200
+ translated_prompt = translator.translate(prompt, src='auto', dest='en').text
201
+ return translated_prompt
202
+ except Exception as e:
203
+ print(e)
204
+ return prompt
205
+
206
+ def is_japanese(s):
207
+ import unicodedata
208
+ for ch in s:
209
+ name = unicodedata.name(ch, "")
210
+ if "CJK UNIFIED" in name or "HIRAGANA" in name or "KATAKANA" in name:
211
+ return True
212
+ return False
213
+
214
+ def to_list(s):
215
+ return [x.strip() for x in s.split(",")]
216
+
217
+ prompts = to_list(prompt)
218
+ outputs = []
219
+ for p in prompts:
220
+ p = translate_to_english(p) if is_japanese(p) else p
221
+ outputs.append(p)
222
+
223
+ return ", ".join(outputs)
224
+
225
+
226
+ def translate_prompt_to_ja(prompt: str = ""):
227
+ def translate_to_japanese(prompt):
228
+ import httpcore
229
+ setattr(httpcore, 'SyncHTTPTransport', 'AsyncHTTPProxy')
230
+ from googletrans import Translator
231
+ translator = Translator()
232
+ try:
233
+ translated_prompt = translator.translate(prompt, src='en', dest='ja').text
234
+ return translated_prompt
235
+ except Exception as e:
236
+ print(e)
237
+ return prompt
238
+
239
+ def is_japanese(s):
240
+ import unicodedata
241
+ for ch in s:
242
+ name = unicodedata.name(ch, "")
243
+ if "CJK UNIFIED" in name or "HIRAGANA" in name or "KATAKANA" in name:
244
+ return True
245
+ return False
246
+
247
+ def to_list(s):
248
+ return [x.strip() for x in s.split(",")]
249
+
250
+ prompts = to_list(prompt)
251
+ outputs = []
252
+ for p in prompts:
253
+ p = translate_to_japanese(p) if not is_japanese(p) else p
254
+ outputs.append(p)
255
+
256
+ return ", ".join(outputs)
257
+
258
+
259
+ def tags_to_ja(itag, dict):
260
+ def t_to_j(match, dict):
261
+ tag = match.group(0)
262
+ ja = dict.get(replace_underline(tag), "")
263
+ if ja:
264
+ return ja
265
+ else:
266
+ return tag
267
+
268
+ import re
269
+ tag = re.sub(r'[\w ]+', lambda wrapper: t_to_j(wrapper, dict), itag, 2)
270
+
271
+ return tag
272
+
273
+
274
+ def convert_tags_to_ja(input_prompt: str = ""):
275
+ tags = input_prompt.split(",") if input_prompt else []
276
+ out_tags = []
277
+
278
+ tags_to_ja_dict = load_dict_from_csv('all_tags_ja_ext.csv')
279
+ dict = tags_to_ja_dict
280
+ for tag in tags:
281
+ tag = replace_underline(tag)
282
+ tag = tags_to_ja(tag, dict)
283
+ out_tags.append(tag)
284
+
285
+ return ", ".join(out_tags)
286
+
287
+
288
+ enable_auto_recom_prompt = True
289
+
290
+
291
+ animagine_ps = to_list("masterpiece, best quality, very aesthetic, absurdres")
292
+ animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
293
+ pony_ps = to_list("score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
294
+ pony_nps = to_list("source_pony, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
295
+ other_ps = to_list("anime artwork, anime style, studio anime, highly detailed, cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed")
296
+ other_nps = to_list("photo, deformed, black and white, realism, disfigured, low contrast, drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly")
297
+ default_ps = to_list("highly detailed, masterpiece, best quality, very aesthetic, absurdres")
298
+ default_nps = to_list("score_6, score_5, score_4, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
299
+ def insert_recom_prompt(prompt: str = "", neg_prompt: str = "", type: str = "None"):
300
+ global enable_auto_recom_prompt
301
+ prompts = to_list(prompt)
302
+ neg_prompts = to_list(neg_prompt)
303
+
304
+ prompts = list_sub(prompts, animagine_ps + pony_ps)
305
+ neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps)
306
+
307
+ last_empty_p = [""] if not prompts and type != "None" else []
308
+ last_empty_np = [""] if not neg_prompts and type != "None" else []
309
+
310
+ if type == "Auto":
311
+ enable_auto_recom_prompt = True
312
+ else:
313
+ enable_auto_recom_prompt = False
314
+ if type == "Animagine":
315
+ prompts = prompts + animagine_ps
316
+ neg_prompts = neg_prompts + animagine_nps
317
+ elif type == "Pony":
318
+ prompts = prompts + pony_ps
319
+ neg_prompts = neg_prompts + pony_nps
320
+
321
+ prompt = ", ".join(list_uniq(prompts) + last_empty_p)
322
+ neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
323
+
324
+ return prompt, neg_prompt
325
+
326
+
327
+ def load_model_prompt_dict():
328
+ import json
329
+ dict = {}
330
+ path = 'model_dict.json' if Path('model_dict.json').exists() else './tagger/model_dict.json'
331
+ try:
332
+ with open('model_dict.json', encoding='utf-8') as f:
333
+ dict = json.load(f)
334
+ except Exception:
335
+ pass
336
+ return dict
337
+
338
+
339
+ model_prompt_dict = load_model_prompt_dict()
340
+
341
+
342
+ def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name: str = "None"):
343
+ if not model_name or not enable_auto_recom_prompt: return prompt, neg_prompt
344
+ prompts = to_list(prompt)
345
+ neg_prompts = to_list(neg_prompt)
346
+ prompts = list_sub(prompts, animagine_ps + pony_ps + other_ps)
347
+ neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps + other_nps)
348
+ last_empty_p = [""] if not prompts and type != "None" else []
349
+ last_empty_np = [""] if not neg_prompts and type != "None" else []
350
+ ps = []
351
+ nps = []
352
+ if model_name in model_prompt_dict.keys():
353
+ ps = to_list(model_prompt_dict[model_name]["prompt"])
354
+ nps = to_list(model_prompt_dict[model_name]["negative_prompt"])
355
+ else:
356
+ ps = default_ps
357
+ nps = default_nps
358
+ prompts = prompts + ps
359
+ neg_prompts = neg_prompts + nps
360
+ prompt = ", ".join(list_uniq(prompts) + last_empty_p)
361
+ neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
362
+ return prompt, neg_prompt
363
+
364
+
365
+ tag_group_dict = load_dict_from_csv('tag_group.csv')
366
+
367
+
368
+ def remove_specific_prompt(input_prompt: str = "", keep_tags: str = "all"):
369
+ def is_dressed(tag):
370
+ import re
371
+ p = re.compile(r'dress|cloth|uniform|costume|vest|sweater|coat|shirt|jacket|blazer|apron|leotard|hood|sleeve|skirt|shorts|pant|loafer|ribbon|necktie|bow|collar|glove|sock|shoe|boots|wear|emblem')
372
+ return p.search(tag)
373
+
374
+ def is_background(tag):
375
+ import re
376
+ p = re.compile(r'background|outline|light|sky|build|day|screen|tree|city')
377
+ return p.search(tag)
378
+
379
+ un_tags = ['solo']
380
+ group_list = ['groups', 'body_parts', 'attire', 'posture', 'objects', 'creatures', 'locations', 'disambiguation_pages', 'commonly_misused_tags', 'phrases', 'verbs_and_gerunds', 'subjective', 'nudity', 'sex_objects', 'sex', 'sex_acts', 'image_composition', 'artistic_license', 'text', 'year_tags', 'metatags']
381
+ keep_group_dict = {
382
+ "body": ['groups', 'body_parts'],
383
+ "dress": ['groups', 'body_parts', 'attire'],
384
+ "all": group_list,
385
+ }
386
+
387
+ def is_necessary(tag, keep_tags, group_dict):
388
+ if keep_tags == "all":
389
+ return True
390
+ elif tag in un_tags or group_dict.get(tag, "") in explicit_group:
391
+ return False
392
+ elif keep_tags == "body" and is_dressed(tag):
393
+ return False
394
+ elif is_background(tag):
395
+ return False
396
+ else:
397
+ return True
398
+
399
+ if keep_tags == "all": return input_prompt
400
+ keep_group = keep_group_dict.get(keep_tags, keep_group_dict["body"])
401
+ explicit_group = list(set(group_list) ^ set(keep_group))
402
+
403
+ tags = input_prompt.split(",") if input_prompt else []
404
+ people_tags: list[str] = []
405
+ other_tags: list[str] = []
406
+
407
+ group_dict = tag_group_dict
408
+ for tag in tags:
409
+ tag = replace_underline(tag)
410
+ if tag in PEOPLE_TAGS:
411
+ people_tags.append(tag)
412
+ elif is_necessary(tag, keep_tags, group_dict):
413
+ other_tags.append(tag)
414
+
415
+ output_prompt = ", ".join(people_tags + other_tags)
416
+
417
+ return output_prompt
418
+
419
+
420
+ def sort_taglist(tags: list[str]):
421
+ if not tags: return []
422
+ character_tags: list[str] = []
423
+ series_tags: list[str] = []
424
+ people_tags: list[str] = []
425
+ group_list = ['groups', 'body_parts', 'attire', 'posture', 'objects', 'creatures', 'locations', 'disambiguation_pages', 'commonly_misused_tags', 'phrases', 'verbs_and_gerunds', 'subjective', 'nudity', 'sex_objects', 'sex', 'sex_acts', 'image_composition', 'artistic_license', 'text', 'year_tags', 'metatags']
426
+ group_tags = {}
427
+ other_tags: list[str] = []
428
+ rating_tags: list[str] = []
429
+
430
+ group_dict = tag_group_dict
431
+ group_set = set(group_dict.keys())
432
+ character_set = set(anime_series_dict.keys())
433
+ series_set = set(anime_series_dict.values())
434
+ rating_set = set(DANBOORU_TO_E621_RATING_MAP.keys()) | set(DANBOORU_TO_E621_RATING_MAP.values())
435
+
436
+ for tag in tags:
437
+ tag = replace_underline(tag)
438
+ if tag in PEOPLE_TAGS:
439
+ people_tags.append(tag)
440
+ elif tag in rating_set:
441
+ rating_tags.append(tag)
442
+ elif tag in group_set:
443
+ elem = group_dict[tag]
444
+ group_tags[elem] = group_tags[elem] + [tag] if elem in group_tags else [tag]
445
+ elif tag in character_set:
446
+ character_tags.append(tag)
447
+ elif tag in series_set:
448
+ series_tags.append(tag)
449
+ else:
450
+ other_tags.append(tag)
451
+
452
+ output_group_tags: list[str] = []
453
+ for k in group_list:
454
+ output_group_tags.extend(group_tags.get(k, []))
455
+
456
+ rating_tags = [rating_tags[0]] if rating_tags else []
457
+ rating_tags = ["explicit, nsfw"] if rating_tags and rating_tags[0] == "explicit" else rating_tags
458
+
459
+ output_tags = character_tags + series_tags + people_tags + output_group_tags + other_tags + rating_tags
460
+
461
+ return output_tags
462
+
463
+
464
+ def sort_tags(tags: str):
465
+ if not tags: return ""
466
+ taglist: list[str] = []
467
+ for tag in tags.split(","):
468
+ taglist.append(tag.strip())
469
+ taglist = list(filter(lambda x: x != "", taglist))
470
+ return ", ".join(sort_taglist(taglist))
471
+
472
+
473
+ def postprocess_results(results: dict[str, float], general_threshold: float, character_threshold: float):
474
+ results = {
475
+ k: v for k, v in sorted(results.items(), key=lambda item: item[1], reverse=True)
476
+ }
477
+
478
+ rating = {}
479
+ character = {}
480
+ general = {}
481
+
482
+ for k, v in results.items():
483
+ if k.startswith("rating:"):
484
+ rating[k.replace("rating:", "")] = v
485
+ continue
486
+ elif k.startswith("character:"):
487
+ character[k.replace("character:", "")] = v
488
+ continue
489
+
490
+ general[k] = v
491
+
492
+ character = {k: v for k, v in character.items() if v >= character_threshold}
493
+ general = {k: v for k, v in general.items() if v >= general_threshold}
494
+
495
+ return rating, character, general
496
+
497
+
498
+ def gen_prompt(rating: list[str], character: list[str], general: list[str]):
499
+ people_tags: list[str] = []
500
+ other_tags: list[str] = []
501
+ rating_tag = RATING_MAP[rating[0]]
502
+
503
+ for tag in general:
504
+ if tag in PEOPLE_TAGS:
505
+ people_tags.append(tag)
506
+ else:
507
+ other_tags.append(tag)
508
+
509
+ all_tags = people_tags + other_tags
510
+
511
+ return ", ".join(all_tags)
512
+
513
+
514
+ @spaces.GPU(duration=30)
515
+ def predict_tags(image: Image.Image, general_threshold: float = 0.3, character_threshold: float = 0.8):
516
+ inputs = wd_processor.preprocess(image, return_tensors="pt")
517
+
518
+ outputs = wd_model(**inputs.to(wd_model.device, wd_model.dtype))
519
+ logits = torch.sigmoid(outputs.logits[0]) # take the first logits
520
+
521
+ # get probabilities
522
+ if device != default_device: wd_model.to(device=device)
523
+ results = {
524
+ wd_model.config.id2label[i]: float(logit.float()) for i, logit in enumerate(logits)
525
+ }
526
+ if device != default_device: wd_model.to(device=default_device)
527
+ # rating, character, general
528
+ rating, character, general = postprocess_results(
529
+ results, general_threshold, character_threshold
530
+ )
531
+ prompt = gen_prompt(
532
+ list(rating.keys()), list(character.keys()), list(general.keys())
533
+ )
534
+ output_series_tag = ""
535
+ output_series_list = character_list_to_series_list(character.keys())
536
+ if output_series_list:
537
+ output_series_tag = output_series_list[0]
538
+ else:
539
+ output_series_tag = ""
540
+ return output_series_tag, ", ".join(character.keys()), prompt, gr.update(interactive=True)
541
+
542
+
543
+ def predict_tags_wd(image: Image.Image, input_tags: str, algo: list[str], general_threshold: float = 0.3,
544
+ character_threshold: float = 0.8, input_series: str = "", input_character: str = ""):
545
+ if not "Use WD Tagger" in algo and len(algo) != 0:
546
+ return input_series, input_character, input_tags, gr.update(interactive=True)
547
+ return predict_tags(image, general_threshold, character_threshold)
548
+
549
+
550
+ def compose_prompt_to_copy(character: str, series: str, general: str):
551
+ characters = character.split(",") if character else []
552
+ serieses = series.split(",") if series else []
553
+ generals = general.split(",") if general else []
554
+ tags = characters + serieses + generals
555
+ cprompt = ",".join(tags) if tags else ""
556
+ return cprompt
tagger/utils.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from dartrs.v2 import AspectRatioTag, LengthTag, RatingTag, IdentityTag
3
+
4
+
5
+ V2_ASPECT_RATIO_OPTIONS: list[AspectRatioTag] = [
6
+ "ultra_wide",
7
+ "wide",
8
+ "square",
9
+ "tall",
10
+ "ultra_tall",
11
+ ]
12
+ V2_RATING_OPTIONS: list[RatingTag] = [
13
+ "sfw",
14
+ "general",
15
+ "sensitive",
16
+ "nsfw",
17
+ "questionable",
18
+ "explicit",
19
+ ]
20
+ V2_LENGTH_OPTIONS: list[LengthTag] = [
21
+ "very_short",
22
+ "short",
23
+ "medium",
24
+ "long",
25
+ "very_long",
26
+ ]
27
+ V2_IDENTITY_OPTIONS: list[IdentityTag] = [
28
+ "none",
29
+ "lax",
30
+ "strict",
31
+ ]
32
+
33
+
34
+ # ref: https://qiita.com/tregu148/items/fccccbbc47d966dd2fc2
35
+ def gradio_copy_text(_text: None):
36
+ gr.Info("Copied!")
37
+
38
+
39
+ COPY_ACTION_JS = """\
40
+ (inputs, _outputs) => {
41
+ // inputs is the string value of the input_text
42
+ if (inputs.trim() !== "") {
43
+ navigator.clipboard.writeText(inputs);
44
+ }
45
+ }"""
46
+
47
+
48
+ def gradio_copy_prompt(prompt: str):
49
+ gr.Info("Copied!")
50
+ return prompt
tagger/v2.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import torch
3
+ from typing import Callable
4
+ from pathlib import Path
5
+
6
+ from dartrs.v2 import (
7
+ V2Model,
8
+ MixtralModel,
9
+ MistralModel,
10
+ compose_prompt,
11
+ LengthTag,
12
+ AspectRatioTag,
13
+ RatingTag,
14
+ IdentityTag,
15
+ )
16
+ from dartrs.dartrs import DartTokenizer
17
+ from dartrs.utils import get_generation_config
18
+
19
+
20
+ import gradio as gr
21
+ from gradio.components import Component
22
+
23
+
24
+ try:
25
+ from output import UpsamplingOutput
26
+ except:
27
+ from .output import UpsamplingOutput
28
+
29
+
30
+ V2_ALL_MODELS = {
31
+ "dart-v2-moe-sft": {
32
+ "repo": "p1atdev/dart-v2-moe-sft",
33
+ "type": "sft",
34
+ "class": MixtralModel,
35
+ },
36
+ "dart-v2-sft": {
37
+ "repo": "p1atdev/dart-v2-sft",
38
+ "type": "sft",
39
+ "class": MistralModel,
40
+ },
41
+ }
42
+
43
+
44
+ def prepare_models(model_config: dict):
45
+ model_name = model_config["repo"]
46
+ tokenizer = DartTokenizer.from_pretrained(model_name)
47
+ model = model_config["class"].from_pretrained(model_name)
48
+
49
+ return {
50
+ "tokenizer": tokenizer,
51
+ "model": model,
52
+ }
53
+
54
+
55
+ def normalize_tags(tokenizer: DartTokenizer, tags: str):
56
+ """Just remove unk tokens."""
57
+ return ", ".join([tag for tag in tokenizer.tokenize(tags) if tag != "<|unk|>"])
58
+
59
+
60
+ @torch.no_grad()
61
+ def generate_tags(
62
+ model: V2Model,
63
+ tokenizer: DartTokenizer,
64
+ prompt: str,
65
+ ban_token_ids: list[int],
66
+ ):
67
+ output = model.generate(
68
+ get_generation_config(
69
+ prompt,
70
+ tokenizer=tokenizer,
71
+ temperature=1,
72
+ top_p=0.9,
73
+ top_k=100,
74
+ max_new_tokens=256,
75
+ ban_token_ids=ban_token_ids,
76
+ ),
77
+ )
78
+
79
+ return output
80
+
81
+
82
+ def _people_tag(noun: str, minimum: int = 1, maximum: int = 5):
83
+ return (
84
+ [f"1{noun}"]
85
+ + [f"{num}{noun}s" for num in range(minimum + 1, maximum + 1)]
86
+ + [f"{maximum+1}+{noun}s"]
87
+ )
88
+
89
+
90
+ PEOPLE_TAGS = (
91
+ _people_tag("girl") + _people_tag("boy") + _people_tag("other") + ["no humans"]
92
+ )
93
+
94
+
95
+ def gen_prompt_text(output: UpsamplingOutput):
96
+ # separate people tags (e.g. 1girl)
97
+ people_tags = []
98
+ other_general_tags = []
99
+
100
+ for tag in output.general_tags.split(","):
101
+ tag = tag.strip()
102
+ if tag in PEOPLE_TAGS:
103
+ people_tags.append(tag)
104
+ else:
105
+ other_general_tags.append(tag)
106
+
107
+ return ", ".join(
108
+ [
109
+ part.strip()
110
+ for part in [
111
+ *people_tags,
112
+ output.character_tags,
113
+ output.copyright_tags,
114
+ *other_general_tags,
115
+ output.upsampled_tags,
116
+ output.rating_tag,
117
+ ]
118
+ if part.strip() != ""
119
+ ]
120
+ )
121
+
122
+
123
+ def elapsed_time_format(elapsed_time: float) -> str:
124
+ return f"Elapsed: {elapsed_time:.2f} seconds"
125
+
126
+
127
+ def parse_upsampling_output(
128
+ upsampler: Callable[..., UpsamplingOutput],
129
+ ):
130
+ def _parse_upsampling_output(*args) -> tuple[str, str, dict]:
131
+ output = upsampler(*args)
132
+
133
+ return (
134
+ gen_prompt_text(output),
135
+ elapsed_time_format(output.elapsed_time),
136
+ gr.update(interactive=True),
137
+ gr.update(interactive=True),
138
+ )
139
+
140
+ return _parse_upsampling_output
141
+
142
+
143
+ class V2UI:
144
+ model_name: str | None = None
145
+ model: V2Model
146
+ tokenizer: DartTokenizer
147
+
148
+ input_components: list[Component] = []
149
+ generate_btn: gr.Button
150
+
151
+ def on_generate(
152
+ self,
153
+ model_name: str,
154
+ copyright_tags: str,
155
+ character_tags: str,
156
+ general_tags: str,
157
+ rating_tag: RatingTag,
158
+ aspect_ratio_tag: AspectRatioTag,
159
+ length_tag: LengthTag,
160
+ identity_tag: IdentityTag,
161
+ ban_tags: str,
162
+ *args,
163
+ ) -> UpsamplingOutput:
164
+ if self.model_name is None or self.model_name != model_name:
165
+ models = prepare_models(V2_ALL_MODELS[model_name])
166
+ self.model = models["model"]
167
+ self.tokenizer = models["tokenizer"]
168
+ self.model_name = model_name
169
+
170
+ # normalize tags
171
+ # copyright_tags = normalize_tags(self.tokenizer, copyright_tags)
172
+ # character_tags = normalize_tags(self.tokenizer, character_tags)
173
+ # general_tags = normalize_tags(self.tokenizer, general_tags)
174
+
175
+ ban_token_ids = self.tokenizer.encode(ban_tags.strip())
176
+
177
+ prompt = compose_prompt(
178
+ prompt=general_tags,
179
+ copyright=copyright_tags,
180
+ character=character_tags,
181
+ rating=rating_tag,
182
+ aspect_ratio=aspect_ratio_tag,
183
+ length=length_tag,
184
+ identity=identity_tag,
185
+ )
186
+
187
+ start = time.time()
188
+ upsampled_tags = generate_tags(
189
+ self.model,
190
+ self.tokenizer,
191
+ prompt,
192
+ ban_token_ids,
193
+ )
194
+ elapsed_time = time.time() - start
195
+
196
+ return UpsamplingOutput(
197
+ upsampled_tags=upsampled_tags,
198
+ copyright_tags=copyright_tags,
199
+ character_tags=character_tags,
200
+ general_tags=general_tags,
201
+ rating_tag=rating_tag,
202
+ aspect_ratio_tag=aspect_ratio_tag,
203
+ length_tag=length_tag,
204
+ identity_tag=identity_tag,
205
+ elapsed_time=elapsed_time,
206
+ )
207
+
208
+
209
+ def parse_upsampling_output_simple(upsampler: UpsamplingOutput):
210
+ return gen_prompt_text(upsampler)
211
+
212
+
213
+ v2 = V2UI()
214
+
215
+
216
+ def v2_upsampling_prompt(model: str = "dart-v2-moe-sft", copyright: str = "", character: str = "",
217
+ general_tags: str = "", rating: str = "nsfw", aspect_ratio: str = "square",
218
+ length: str = "very_long", identity: str = "lax", ban_tags: str = "censored"):
219
+ raw_prompt = parse_upsampling_output_simple(v2.on_generate(model, copyright, character, general_tags,
220
+ rating, aspect_ratio, length, identity, ban_tags))
221
+ return raw_prompt
222
+
223
+
224
+ def load_dict_from_csv(filename):
225
+ dict = {}
226
+ if not Path(filename).exists():
227
+ if Path('./tagger/', filename).exists(): filename = str(Path('./tagger/', filename))
228
+ else: return dict
229
+ try:
230
+ with open(filename, 'r', encoding="utf-8") as f:
231
+ lines = f.readlines()
232
+ except Exception:
233
+ print(f"Failed to open dictionary file: {filename}")
234
+ return dict
235
+ for line in lines:
236
+ parts = line.strip().split(',')
237
+ dict[parts[0]] = parts[1]
238
+ return dict
239
+
240
+
241
+ anime_series_dict = load_dict_from_csv('character_series_dict.csv')
242
+
243
+
244
+ def select_random_character(series: str, character: str):
245
+ from random import seed, randrange
246
+ seed()
247
+ character_list = list(anime_series_dict.keys())
248
+ character = character_list[randrange(len(character_list) - 1)]
249
+ series = anime_series_dict.get(character.split(",")[0].strip(), "")
250
+ return series, character
251
+
252
+
253
+ def v2_random_prompt(general_tags: str = "", copyright: str = "", character: str = "", rating: str = "nsfw",
254
+ aspect_ratio: str = "square", length: str = "very_long", identity: str = "lax",
255
+ ban_tags: str = "censored", model: str = "dart-v2-moe-sft"):
256
+ if copyright == "" and character == "":
257
+ copyright, character = select_random_character("", "")
258
+ raw_prompt = v2_upsampling_prompt(model, copyright, character, general_tags, rating,
259
+ aspect_ratio, length, identity, ban_tags)
260
+ return raw_prompt, copyright, character
textual_inversion_dict.json ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bad_prompt_version2.pt": [
3
+ "bad_prompt",
4
+ false
5
+ ],
6
+ "EasyNegativeV2.safetensors": [
7
+ "EasyNegative",
8
+ false
9
+ ],
10
+ "bad-hands-5.pt": [
11
+ "bad_hand",
12
+ false
13
+ ],
14
+ "negativeXL_A.safetensors": [
15
+ "negativeXL_A",
16
+ false
17
+ ],
18
+ "negativeXL_B.safetensors": [
19
+ "negativeXL_B",
20
+ false
21
+ ],
22
+ "negativeXL_C.safetensors": [
23
+ "negativeXL_C",
24
+ false
25
+ ],
26
+ "negativeXL_D.safetensors": [
27
+ "negativeXL_D",
28
+ false
29
+ ],
30
+ "unaestheticXL2v10.safetensors": [
31
+ "2v10",
32
+ false
33
+ ],
34
+ "unaestheticXL_AYv1.safetensors": [
35
+ "_AYv1",
36
+ false
37
+ ],
38
+ "unaestheticXL_Alb2.safetensors": [
39
+ "_Alb2",
40
+ false
41
+ ],
42
+ "unaestheticXL_Jug6.safetensors": [
43
+ "_Jug6",
44
+ false
45
+ ],
46
+ "unaestheticXL_bp5.safetensors": [
47
+ "_bp5",
48
+ false
49
+ ],
50
+ "unaestheticXL_hk1.safetensors": [
51
+ "_hk1",
52
+ false
53
+ ],
54
+ "unaestheticXLv1.safetensors": [
55
+ "v1.0",
56
+ false
57
+ ],
58
+ "unaestheticXLv13.safetensors": [
59
+ "v1.3",
60
+ false
61
+ ],
62
+ "unaestheticXLv31.safetensors": [
63
+ "v3.1",
64
+ false
65
+ ],
66
+ "unaestheticXL_Sky3.1.safetensors": [
67
+ "_Sky3.1",
68
+ false
69
+ ],
70
+ "SimplePositiveXLv2.safetensors": [
71
+ "SIMPLEPOSITIVEXLV2",
72
+ true
73
+ ]
74
+ }