Spaces:
Running
on
Zero
Running
on
Zero
Upload 2 files
Browse files- app.py +9 -10
- llmdolphin.py +68 -29
app.py
CHANGED
@@ -19,6 +19,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca
|
|
19 |
gr.Markdown("""# Natural Text to SD Prompt Translator With LLM alpha
|
20 |
Text in natural language (English, Japanese, ...) => Prompt
|
21 |
""", elem_classes="title")
|
|
|
22 |
with gr.Group():
|
23 |
chatbot = gr.Chatbot(show_copy_button=True, show_share_button=False, layout="bubble", container=True)
|
24 |
with gr.Row():
|
@@ -98,22 +99,22 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca
|
|
98 |
gr.on(
|
99 |
triggers=[chat_msg.submit, chat_submit.click],
|
100 |
fn=dolphin_respond,
|
101 |
-
inputs=[chat_msg, chatbot, chat_model, chat_sysmsg, chat_tokens, chat_temperature, chat_topp, chat_topk, chat_rp],
|
102 |
outputs=[chatbot],
|
103 |
queue=True,
|
104 |
show_progress="full",
|
105 |
trigger_mode="once",
|
106 |
-
).success(dolphin_parse, [chatbot], [output_text, copy_btn, copy_btn_pony]).success(
|
107 |
convert_danbooru_to_e621_prompt, [output_text, tag_type], [output_text_pony], queue=False,
|
108 |
).success(insert_recom_prompt, [output_text, dummy_np, recom_animagine], [output_text, dummy_np], queue=False,
|
109 |
).success(insert_recom_prompt, [output_text_pony, dummy_np_pony, recom_pony], [output_text_pony, dummy_np_pony], queue=False)
|
110 |
chat_clear.click(lambda: None, None, chatbot, queue=False)
|
111 |
-
chat_model.change(select_dolphin_model, [chat_model], [chat_model, chat_format, chat_model_info], queue=True, show_progress="full")\
|
112 |
.success(lambda: None, None, chatbot, queue=False)
|
113 |
-
chat_format.change(select_dolphin_format, [chat_format], [chat_format], queue=False)\
|
114 |
.success(lambda: None, None, chatbot, queue=False)
|
115 |
-
chat_mode.change(select_dolphin_sysprompt, [chat_mode], [chat_sysmsg], queue=False)
|
116 |
-
chat_lang.change(select_dolphin_language, [chat_lang], [chat_sysmsg], queue=False)
|
117 |
gr.on(
|
118 |
triggers=[chat_add_text.submit, chat_add_submit.click],
|
119 |
fn=add_dolphin_models,
|
@@ -128,10 +129,8 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca
|
|
128 |
|
129 |
generate_image_btn.click(generate_image, [output_text, dummy_np], [result_image], show_progress="full")
|
130 |
|
131 |
-
pg_chat_model.change(select_dolphin_model, [pg_chat_model], [pg_chat_model, pg_chat_format, pg_chat_model_info], queue=True, show_progress="full")
|
132 |
-
|
133 |
-
pg_chat_format.change(select_dolphin_format, [pg_chat_format], [pg_chat_format], queue=False)#\
|
134 |
-
#.success(lambda: None, None, pg_chatbot, queue=False)
|
135 |
gr.on(
|
136 |
triggers=[pg_chat_add_text.submit, pg_chat_add_submit.click],
|
137 |
fn=add_dolphin_models,
|
|
|
19 |
gr.Markdown("""# Natural Text to SD Prompt Translator With LLM alpha
|
20 |
Text in natural language (English, Japanese, ...) => Prompt
|
21 |
""", elem_classes="title")
|
22 |
+
state = gr.State(value={})
|
23 |
with gr.Group():
|
24 |
chatbot = gr.Chatbot(show_copy_button=True, show_share_button=False, layout="bubble", container=True)
|
25 |
with gr.Row():
|
|
|
99 |
gr.on(
|
100 |
triggers=[chat_msg.submit, chat_submit.click],
|
101 |
fn=dolphin_respond,
|
102 |
+
inputs=[chat_msg, chatbot, chat_model, chat_sysmsg, chat_tokens, chat_temperature, chat_topp, chat_topk, chat_rp, state],
|
103 |
outputs=[chatbot],
|
104 |
queue=True,
|
105 |
show_progress="full",
|
106 |
trigger_mode="once",
|
107 |
+
).success(dolphin_parse, [chatbot, state], [output_text, copy_btn, copy_btn_pony]).success(
|
108 |
convert_danbooru_to_e621_prompt, [output_text, tag_type], [output_text_pony], queue=False,
|
109 |
).success(insert_recom_prompt, [output_text, dummy_np, recom_animagine], [output_text, dummy_np], queue=False,
|
110 |
).success(insert_recom_prompt, [output_text_pony, dummy_np_pony, recom_pony], [output_text_pony, dummy_np_pony], queue=False)
|
111 |
chat_clear.click(lambda: None, None, chatbot, queue=False)
|
112 |
+
chat_model.change(select_dolphin_model, [chat_model, state], [chat_model, chat_format, chat_model_info, state], queue=True, show_progress="full")\
|
113 |
.success(lambda: None, None, chatbot, queue=False)
|
114 |
+
chat_format.change(select_dolphin_format, [chat_format, state], [chat_format, state], queue=False)\
|
115 |
.success(lambda: None, None, chatbot, queue=False)
|
116 |
+
chat_mode.change(select_dolphin_sysprompt, [chat_mode, state], [chat_sysmsg, state], queue=False)
|
117 |
+
chat_lang.change(select_dolphin_language, [chat_lang, state], [chat_sysmsg, state], queue=False)
|
118 |
gr.on(
|
119 |
triggers=[chat_add_text.submit, chat_add_submit.click],
|
120 |
fn=add_dolphin_models,
|
|
|
129 |
|
130 |
generate_image_btn.click(generate_image, [output_text, dummy_np], [result_image], show_progress="full")
|
131 |
|
132 |
+
pg_chat_model.change(select_dolphin_model, [pg_chat_model, state], [pg_chat_model, pg_chat_format, pg_chat_model_info, state], queue=True, show_progress="full")
|
133 |
+
pg_chat_format.change(select_dolphin_format, [pg_chat_format, state], [pg_chat_format, state], queue=False)
|
|
|
|
|
134 |
gr.on(
|
135 |
triggers=[pg_chat_add_text.submit, pg_chat_add_submit.click],
|
136 |
fn=add_dolphin_models,
|
llmdolphin.py
CHANGED
@@ -3,6 +3,7 @@ import gradio as gr
|
|
3 |
from pathlib import Path
|
4 |
import re
|
5 |
import torch
|
|
|
6 |
from huggingface_hub import hf_hub_download, HfApi
|
7 |
from llama_cpp import Llama
|
8 |
from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
|
@@ -75,6 +76,26 @@ llm_models = {
|
|
75 |
"ChatWaifu_22B_v2.0_preview.Q4_K_S.gguf": ["mradermacher/ChatWaifu_22B_v2.0_preview-GGUF", MessagesFormatterType.MISTRAL],
|
76 |
"ChatWaifu_v1.4.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.4-GGUF", MessagesFormatterType.MISTRAL],
|
77 |
"ChatWaifu_v1.3.1.Q4_K_M.gguf": ["mradermacher/ChatWaifu_v1.3.1-GGUF", MessagesFormatterType.MISTRAL],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
"Fraken-Maid-TW-Slerp.i1-Q5_K_M.gguf": ["mradermacher/Fraken-Maid-TW-Slerp-i1-GGUF", MessagesFormatterType.MISTRAL],
|
79 |
"KunoichiLake-2x7b.Q4_K_M.gguf": ["mradermacher/KunoichiLake-2x7b-GGUF", MessagesFormatterType.MISTRAL],
|
80 |
"Llama-3.1-SuperNova-8B-Lite_TIES_with_Base-Q4_K_M.gguf": ["bartowski/Llama-3.1-SuperNova-8B-Lite_TIES_with_Base-GGUF", MessagesFormatterType.LLAMA_3],
|
@@ -908,25 +929,35 @@ llm_formats = {
|
|
908 |
llm_languages = ["English", "Japanese", "Chinese", "Korean", "Spanish", "Portuguese", "German", "French", "Finnish", "Russian"]
|
909 |
llm_models_tupled_list = []
|
910 |
default_llm_model_filename = list(llm_models.keys())[0]
|
911 |
-
override_llm_format = None
|
912 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
913 |
|
914 |
|
915 |
-
def to_list(s):
|
916 |
return [x.strip() for x in s.split(",") if not s == ""]
|
917 |
|
918 |
|
919 |
-
def list_uniq(l):
|
920 |
return sorted(set(l), key=l.index)
|
921 |
|
922 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
923 |
@wrapt_timeout_decorator.timeout(dec_timeout=3.5)
|
924 |
-
def to_list_ja(s):
|
925 |
s = re.sub(r'[γγ]', ',', s)
|
926 |
return [x.strip() for x in s.split(",") if not s == ""]
|
927 |
|
928 |
|
929 |
-
def is_japanese(s):
|
930 |
import unicodedata
|
931 |
for ch in s:
|
932 |
name = unicodedata.name(ch, "")
|
@@ -964,7 +995,7 @@ def download_llm_models():
|
|
964 |
llm_models_tupled_list.append((name, value))
|
965 |
|
966 |
|
967 |
-
def download_llm_model(filename):
|
968 |
if not filename in llm_models.keys(): return default_llm_model_filename
|
969 |
try:
|
970 |
hf_hub_download(repo_id = llm_models[filename][0], filename = filename, local_dir = llm_models_dir)
|
@@ -975,7 +1006,7 @@ def download_llm_model(filename):
|
|
975 |
return filename
|
976 |
|
977 |
|
978 |
-
def get_dolphin_model_info(filename):
|
979 |
md = "None"
|
980 |
items = llm_models.get(filename, None)
|
981 |
if items:
|
@@ -983,20 +1014,18 @@ def get_dolphin_model_info(filename):
|
|
983 |
return md
|
984 |
|
985 |
|
986 |
-
def select_dolphin_model(filename, progress=gr.Progress(track_tqdm=True)):
|
987 |
-
|
988 |
-
override_llm_format = None
|
989 |
progress(0, desc="Loading model...")
|
990 |
value = download_llm_model(filename)
|
991 |
progress(1, desc="Model loaded.")
|
992 |
md = get_dolphin_model_info(filename)
|
993 |
-
return gr.update(value=value, choices=get_dolphin_models()), gr.update(value=get_dolphin_model_format(value)), gr.update(value=md)
|
994 |
|
995 |
|
996 |
-
def select_dolphin_format(format_name):
|
997 |
-
|
998 |
-
|
999 |
-
return gr.update(value=format_name)
|
1000 |
|
1001 |
|
1002 |
download_llm_model(default_llm_model_filename)
|
@@ -1017,14 +1046,14 @@ def get_key_from_value(d, val):
|
|
1017 |
return None
|
1018 |
|
1019 |
|
1020 |
-
def get_dolphin_model_format(filename):
|
1021 |
if not filename in llm_models.keys(): filename = default_llm_model_filename
|
1022 |
format = llm_models[filename][1]
|
1023 |
format_name = get_key_from_value(llm_formats, format)
|
1024 |
return format_name
|
1025 |
|
1026 |
|
1027 |
-
def add_dolphin_models(query, format_name):
|
1028 |
global llm_models
|
1029 |
api = HfApi()
|
1030 |
add_models = {}
|
@@ -1055,8 +1084,6 @@ def add_dolphin_models(query, format_name):
|
|
1055 |
return gr.update(choices=choices, value=choices[-1][1])
|
1056 |
|
1057 |
|
1058 |
-
dolphin_output_language = "English"
|
1059 |
-
dolphin_sysprompt_mode = "Default"
|
1060 |
dolphin_system_prompt = {"Default": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
|
1061 |
The message must have the following [Tags] generated in strict accordance with the following [Rules]:
|
1062 |
```
|
@@ -1247,8 +1274,11 @@ Output should be enclosed in //GENBEGIN//:// and //://GENEND//. The text to be g
|
|
1247 |
"Chat with LLM": r"You are a helpful AI assistant. Respond in <LANGUAGE>."}
|
1248 |
|
1249 |
|
1250 |
-
def get_dolphin_sysprompt():
|
1251 |
-
|
|
|
|
|
|
|
1252 |
return prompt
|
1253 |
|
1254 |
|
@@ -1256,23 +1286,23 @@ def get_dolphin_sysprompt_mode():
|
|
1256 |
return list(dolphin_system_prompt.keys())
|
1257 |
|
1258 |
|
1259 |
-
def select_dolphin_sysprompt(key: str):
|
1260 |
-
|
1261 |
if not key in dolphin_system_prompt.keys():
|
1262 |
dolphin_sysprompt_mode = "Default"
|
1263 |
else:
|
1264 |
dolphin_sysprompt_mode = key
|
1265 |
-
|
|
|
1266 |
|
1267 |
|
1268 |
def get_dolphin_languages():
|
1269 |
return llm_languages
|
1270 |
|
1271 |
|
1272 |
-
def select_dolphin_language(lang: str):
|
1273 |
-
|
1274 |
-
|
1275 |
-
return gr.update(value=get_dolphin_sysprompt())
|
1276 |
|
1277 |
|
1278 |
@wrapt_timeout_decorator.timeout(dec_timeout=5.0)
|
@@ -1293,11 +1323,12 @@ def dolphin_respond(
|
|
1293 |
top_p: float = 0.95,
|
1294 |
top_k: int = 40,
|
1295 |
repeat_penalty: float = 1.1,
|
|
|
1296 |
progress=gr.Progress(track_tqdm=True),
|
1297 |
):
|
1298 |
try:
|
1299 |
progress(0, desc="Processing...")
|
1300 |
-
|
1301 |
if override_llm_format:
|
1302 |
chat_template = override_llm_format
|
1303 |
else:
|
@@ -1363,8 +1394,10 @@ def dolphin_respond(
|
|
1363 |
|
1364 |
def dolphin_parse(
|
1365 |
history: list[tuple[str, str]],
|
|
|
1366 |
):
|
1367 |
try:
|
|
|
1368 |
if dolphin_sysprompt_mode == "Chat with LLM" or not history or len(history) < 1:
|
1369 |
return "", gr.update(), gr.update()
|
1370 |
msg = history[-1][0]
|
@@ -1392,12 +1425,14 @@ def dolphin_respond_auto(
|
|
1392 |
top_p: float = 0.95,
|
1393 |
top_k: int = 40,
|
1394 |
repeat_penalty: float = 1.1,
|
|
|
1395 |
progress=gr.Progress(track_tqdm=True),
|
1396 |
):
|
1397 |
try:
|
1398 |
#if not is_japanese(message): return [(None, None)]
|
1399 |
progress(0, desc="Processing...")
|
1400 |
|
|
|
1401 |
if override_llm_format:
|
1402 |
chat_template = override_llm_format
|
1403 |
else:
|
@@ -1465,9 +1500,11 @@ def dolphin_respond_auto(
|
|
1465 |
def dolphin_parse_simple(
|
1466 |
message: str,
|
1467 |
history: list[tuple[str, str]],
|
|
|
1468 |
):
|
1469 |
try:
|
1470 |
#if not is_japanese(message): return message
|
|
|
1471 |
if dolphin_sysprompt_mode == "Chat with LLM" or not history or len(history) < 1: return message
|
1472 |
msg = history[-1][0]
|
1473 |
raw_prompt = get_raw_prompt(msg)
|
@@ -1499,8 +1536,10 @@ def respond_playground(
|
|
1499 |
top_p,
|
1500 |
top_k,
|
1501 |
repeat_penalty,
|
|
|
1502 |
):
|
1503 |
try:
|
|
|
1504 |
if override_llm_format:
|
1505 |
chat_template = override_llm_format
|
1506 |
else:
|
|
|
3 |
from pathlib import Path
|
4 |
import re
|
5 |
import torch
|
6 |
+
from typing import Any
|
7 |
from huggingface_hub import hf_hub_download, HfApi
|
8 |
from llama_cpp import Llama
|
9 |
from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
|
|
|
76 |
"ChatWaifu_22B_v2.0_preview.Q4_K_S.gguf": ["mradermacher/ChatWaifu_22B_v2.0_preview-GGUF", MessagesFormatterType.MISTRAL],
|
77 |
"ChatWaifu_v1.4.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.4-GGUF", MessagesFormatterType.MISTRAL],
|
78 |
"ChatWaifu_v1.3.1.Q4_K_M.gguf": ["mradermacher/ChatWaifu_v1.3.1-GGUF", MessagesFormatterType.MISTRAL],
|
79 |
+
"Flowable-Docs-Llama-3.1-8B.Q5_K_M.gguf": ["mradermacher/Flowable-Docs-Llama-3.1-8B-GGUF", MessagesFormatterType.LLAMA_3],
|
80 |
+
"slimorca-gemma2-9b-fft.Q4_K_M.gguf": ["mradermacher/slimorca-gemma2-9b-fft-GGUF", MessagesFormatterType.ALPACA],
|
81 |
+
"TQ2.5-14B-Sugarquill-v1-Q4_K_M.gguf": ["bartowski/TQ2.5-14B-Sugarquill-v1-GGUF", MessagesFormatterType.OPEN_CHAT],
|
82 |
+
"magnum-v3-9b-customgemma2.i1-Q4_K_M.gguf": ["mradermacher/magnum-v3-9b-customgemma2-i1-GGUF", MessagesFormatterType.ALPACA],
|
83 |
+
"Captain_BMO-12B.Q4_K_M.gguf": ["mradermacher/Captain_BMO-12B-GGUF", MessagesFormatterType.MISTRAL],
|
84 |
+
"LemonP-8B-Model_Stock.i1-Q5_K_M.gguf": ["mradermacher/LemonP-8B-Model_Stock-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
85 |
+
"Ice0.31-08.11-RP.i1-Q5_K_M.gguf": ["mradermacher/Ice0.31-08.11-RP-i1-GGUF", MessagesFormatterType.MISTRAL],
|
86 |
+
"EVA-Qwen2.5-14B-v0.2.i1-Q4_K_M.gguf": ["mradermacher/EVA-Qwen2.5-14B-v0.2-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
|
87 |
+
"L3.1-Dark-Planet-SpinFire-Uncensored-8B-D_AU-Q4_k_m.gguf": ["DavidAU/L3.1-Dark-Planet-SpinFire-Uncensored-8B-GGUF", MessagesFormatterType.LLAMA_3],
|
88 |
+
"EndlessRP-v2-7B.Q5_K_M.gguf": ["mradermacher/EndlessRP-v2-7B-GGUF", MessagesFormatterType.MISTRAL],
|
89 |
+
"badger-lambda-0-llama-3-8b.i1-Q5_K_M.gguf": ["mradermacher/badger-lambda-0-llama-3-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
90 |
+
"L3.1-Artemis-e2-8B.i1-Q5_K_M.gguf": ["mradermacher/L3.1-Artemis-e2-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
91 |
+
"MN-12B-Inferor-v0.0.Q4_K_M.gguf": ["mradermacher/MN-12B-Inferor-v0.0-GGUF", MessagesFormatterType.MISTRAL],
|
92 |
+
"Eclipse-13B-dpo.i1-Q4_K_M.gguf": ["mradermacher/Eclipse-13B-dpo-i1-GGUF", MessagesFormatterType.MISTRAL],
|
93 |
+
"Epic_Fiction-8b-v0.1.Q5_K_M.gguf": ["mradermacher/Epic_Fiction-8b-v0.1-GGUF", MessagesFormatterType.MISTRAL],
|
94 |
+
"Llama-3-8B-StoryGenerator.i1-Q5_K_M.gguf": ["mradermacher/Llama-3-8B-StoryGenerator-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
95 |
+
"badger-mu-llama-3-8b.i1-Q5_K_M.gguf": ["mradermacher/badger-mu-llama-3-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
96 |
+
"badger-writer-llama-3-8b.i1-Q5_K_M.gguf": ["mradermacher/badger-writer-llama-3-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
|
97 |
+
"Qwen-Rui-SE.Q5_K_M.gguf": ["mradermacher/Qwen-Rui-SE-GGUF", MessagesFormatterType.OPEN_CHAT],
|
98 |
+
"Valor-7B-v0.1.i1-Q4_K_M.gguf": ["mradermacher/Valor-7B-v0.1-i1-GGUF", MessagesFormatterType.MISTRAL],
|
99 |
"Fraken-Maid-TW-Slerp.i1-Q5_K_M.gguf": ["mradermacher/Fraken-Maid-TW-Slerp-i1-GGUF", MessagesFormatterType.MISTRAL],
|
100 |
"KunoichiLake-2x7b.Q4_K_M.gguf": ["mradermacher/KunoichiLake-2x7b-GGUF", MessagesFormatterType.MISTRAL],
|
101 |
"Llama-3.1-SuperNova-8B-Lite_TIES_with_Base-Q4_K_M.gguf": ["bartowski/Llama-3.1-SuperNova-8B-Lite_TIES_with_Base-GGUF", MessagesFormatterType.LLAMA_3],
|
|
|
929 |
llm_languages = ["English", "Japanese", "Chinese", "Korean", "Spanish", "Portuguese", "German", "French", "Finnish", "Russian"]
|
930 |
llm_models_tupled_list = []
|
931 |
default_llm_model_filename = list(llm_models.keys())[0]
|
|
|
932 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
933 |
|
934 |
|
935 |
+
def to_list(s: str):
|
936 |
return [x.strip() for x in s.split(",") if not s == ""]
|
937 |
|
938 |
|
939 |
+
def list_uniq(l: list):
|
940 |
return sorted(set(l), key=l.index)
|
941 |
|
942 |
|
943 |
+
def get_state(state: dict, key: str):
|
944 |
+
if key in state.keys(): return state[key]
|
945 |
+
else:
|
946 |
+
print(f"State '{key}' not found.")
|
947 |
+
return None
|
948 |
+
|
949 |
+
|
950 |
+
def set_state(state: dict, key: str, value: Any):
|
951 |
+
state[key] = value
|
952 |
+
|
953 |
+
|
954 |
@wrapt_timeout_decorator.timeout(dec_timeout=3.5)
|
955 |
+
def to_list_ja(s: str):
|
956 |
s = re.sub(r'[γγ]', ',', s)
|
957 |
return [x.strip() for x in s.split(",") if not s == ""]
|
958 |
|
959 |
|
960 |
+
def is_japanese(s: str):
|
961 |
import unicodedata
|
962 |
for ch in s:
|
963 |
name = unicodedata.name(ch, "")
|
|
|
995 |
llm_models_tupled_list.append((name, value))
|
996 |
|
997 |
|
998 |
+
def download_llm_model(filename: str):
|
999 |
if not filename in llm_models.keys(): return default_llm_model_filename
|
1000 |
try:
|
1001 |
hf_hub_download(repo_id = llm_models[filename][0], filename = filename, local_dir = llm_models_dir)
|
|
|
1006 |
return filename
|
1007 |
|
1008 |
|
1009 |
+
def get_dolphin_model_info(filename: str):
|
1010 |
md = "None"
|
1011 |
items = llm_models.get(filename, None)
|
1012 |
if items:
|
|
|
1014 |
return md
|
1015 |
|
1016 |
|
1017 |
+
def select_dolphin_model(filename: str, state: dict, progress=gr.Progress(track_tqdm=True)):
|
1018 |
+
set_state(state, "override_llm_format", None)
|
|
|
1019 |
progress(0, desc="Loading model...")
|
1020 |
value = download_llm_model(filename)
|
1021 |
progress(1, desc="Model loaded.")
|
1022 |
md = get_dolphin_model_info(filename)
|
1023 |
+
return gr.update(value=value, choices=get_dolphin_models()), gr.update(value=get_dolphin_model_format(value)), gr.update(value=md), state
|
1024 |
|
1025 |
|
1026 |
+
def select_dolphin_format(format_name: str, state: dict):
|
1027 |
+
set_state(state, "override_llm_format", llm_formats[format_name])
|
1028 |
+
return gr.update(value=format_name), state
|
|
|
1029 |
|
1030 |
|
1031 |
download_llm_model(default_llm_model_filename)
|
|
|
1046 |
return None
|
1047 |
|
1048 |
|
1049 |
+
def get_dolphin_model_format(filename: str):
|
1050 |
if not filename in llm_models.keys(): filename = default_llm_model_filename
|
1051 |
format = llm_models[filename][1]
|
1052 |
format_name = get_key_from_value(llm_formats, format)
|
1053 |
return format_name
|
1054 |
|
1055 |
|
1056 |
+
def add_dolphin_models(query: str, format_name: str):
|
1057 |
global llm_models
|
1058 |
api = HfApi()
|
1059 |
add_models = {}
|
|
|
1084 |
return gr.update(choices=choices, value=choices[-1][1])
|
1085 |
|
1086 |
|
|
|
|
|
1087 |
dolphin_system_prompt = {"Default": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
|
1088 |
The message must have the following [Tags] generated in strict accordance with the following [Rules]:
|
1089 |
```
|
|
|
1274 |
"Chat with LLM": r"You are a helpful AI assistant. Respond in <LANGUAGE>."}
|
1275 |
|
1276 |
|
1277 |
+
def get_dolphin_sysprompt(state: dict={}):
|
1278 |
+
dolphin_sysprompt_mode = get_state(state, "dolphin_sysprompt_mode")
|
1279 |
+
dolphin_output_language = get_state(state, "dolphin_output_language")
|
1280 |
+
prompt = re.sub('<LANGUAGE>', dolphin_output_language if dolphin_output_language else llm_languages[0],
|
1281 |
+
dolphin_system_prompt.get(dolphin_sysprompt_mode, list(dolphin_system_prompt.keys())[0]))
|
1282 |
return prompt
|
1283 |
|
1284 |
|
|
|
1286 |
return list(dolphin_system_prompt.keys())
|
1287 |
|
1288 |
|
1289 |
+
def select_dolphin_sysprompt(key: str, state: dict):
|
1290 |
+
dolphin_sysprompt_mode = get_state(state, "dolphin_sysprompt_mode")
|
1291 |
if not key in dolphin_system_prompt.keys():
|
1292 |
dolphin_sysprompt_mode = "Default"
|
1293 |
else:
|
1294 |
dolphin_sysprompt_mode = key
|
1295 |
+
set_state(state, "dolphin_sysprompt_mode", dolphin_sysprompt_mode)
|
1296 |
+
return gr.update(value=get_dolphin_sysprompt(state)), state
|
1297 |
|
1298 |
|
1299 |
def get_dolphin_languages():
|
1300 |
return llm_languages
|
1301 |
|
1302 |
|
1303 |
+
def select_dolphin_language(lang: str, state: dict):
|
1304 |
+
set_state(state, "dolphin_output_language", lang)
|
1305 |
+
return gr.update(value=get_dolphin_sysprompt()), state
|
|
|
1306 |
|
1307 |
|
1308 |
@wrapt_timeout_decorator.timeout(dec_timeout=5.0)
|
|
|
1323 |
top_p: float = 0.95,
|
1324 |
top_k: int = 40,
|
1325 |
repeat_penalty: float = 1.1,
|
1326 |
+
state: dict = {},
|
1327 |
progress=gr.Progress(track_tqdm=True),
|
1328 |
):
|
1329 |
try:
|
1330 |
progress(0, desc="Processing...")
|
1331 |
+
override_llm_format = get_state(state, "override_llm_format")
|
1332 |
if override_llm_format:
|
1333 |
chat_template = override_llm_format
|
1334 |
else:
|
|
|
1394 |
|
1395 |
def dolphin_parse(
|
1396 |
history: list[tuple[str, str]],
|
1397 |
+
state: dict,
|
1398 |
):
|
1399 |
try:
|
1400 |
+
dolphin_sysprompt_mode = get_state(state, "dolphin_sysprompt_mode")
|
1401 |
if dolphin_sysprompt_mode == "Chat with LLM" or not history or len(history) < 1:
|
1402 |
return "", gr.update(), gr.update()
|
1403 |
msg = history[-1][0]
|
|
|
1425 |
top_p: float = 0.95,
|
1426 |
top_k: int = 40,
|
1427 |
repeat_penalty: float = 1.1,
|
1428 |
+
state: dict = {},
|
1429 |
progress=gr.Progress(track_tqdm=True),
|
1430 |
):
|
1431 |
try:
|
1432 |
#if not is_japanese(message): return [(None, None)]
|
1433 |
progress(0, desc="Processing...")
|
1434 |
|
1435 |
+
override_llm_format = get_state(state, "override_llm_format")
|
1436 |
if override_llm_format:
|
1437 |
chat_template = override_llm_format
|
1438 |
else:
|
|
|
1500 |
def dolphin_parse_simple(
|
1501 |
message: str,
|
1502 |
history: list[tuple[str, str]],
|
1503 |
+
state: dict,
|
1504 |
):
|
1505 |
try:
|
1506 |
#if not is_japanese(message): return message
|
1507 |
+
dolphin_sysprompt_mode = get_state(state, "dolphin_sysprompt_mode")
|
1508 |
if dolphin_sysprompt_mode == "Chat with LLM" or not history or len(history) < 1: return message
|
1509 |
msg = history[-1][0]
|
1510 |
raw_prompt = get_raw_prompt(msg)
|
|
|
1536 |
top_p,
|
1537 |
top_k,
|
1538 |
repeat_penalty,
|
1539 |
+
state,
|
1540 |
):
|
1541 |
try:
|
1542 |
+
override_llm_format = get_state(state, "override_llm_format")
|
1543 |
if override_llm_format:
|
1544 |
chat_template = override_llm_format
|
1545 |
else:
|