Nymbo commited on
Commit
31b96f7
·
verified ·
1 Parent(s): 88cd3e7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -69
app.py CHANGED
@@ -1,69 +1,69 @@
1
- import gradio as gr
2
- from chatllm import (
3
- chat_response, get_llm_model, set_llm_model, get_llm_model_info,
4
- get_llm_language, set_llm_language, get_llm_sysprompt, get_llm_sysprompt_mode,
5
- set_llm_sysprompt_mode,
6
- )
7
-
8
- # Custom CSS for Gradio app
9
- css = '''
10
- .gradio-container{max-width: 1000px !important}
11
- h1{text-align:center}
12
- footer { visibility: hidden }
13
- '''
14
-
15
- # Create Gradio interface
16
- with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", css=css) as app:
17
- with gr.Column():
18
- with gr.Group():
19
- chatbot = gr.Chatbot(likeable=False, show_copy_button=True, show_share_button=False, layout="bubble", container=True)
20
- with gr.Row():
21
- chat_query = gr.Textbox(label="Search Query", placeholder="hatsune miku", value="", scale=3)
22
- chat_clear = gr.Button("🗑️ Clear", scale=1)
23
- with gr.Row():
24
- chat_msg = gr.Textbox(label="Message", placeholder="Input message with or without query and press Enter or click Sumbit.", value="", scale=3)
25
- chat_submit = gr.Button("Submit", scale=1)
26
- with gr.Accordion("Additional inputs", open=False):
27
- chat_model = gr.Dropdown(choices=get_llm_model(), value=get_llm_model()[0], allow_custom_value=True, label="Model")
28
- chat_model_info = gr.Markdown(value=get_llm_model_info(get_llm_model()[0]), label="Model info")
29
- with gr.Row():
30
- chat_mode = gr.Dropdown(choices=get_llm_sysprompt_mode(), value=get_llm_sysprompt_mode()[0], allow_custom_value=False, label="Mode")
31
- chat_lang = gr.Dropdown(choices=get_llm_language(), value="language same as user input", allow_custom_value=True, label="Output language")
32
- chat_tokens = gr.Slider(minimum=1, maximum=4096, value=2000, step=1, label="Max tokens")
33
- chat_temp = gr.Slider(minimum=0.1, maximum=4.0, value=0.9, step=0.1, label="Temperature")
34
- chat_topp = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
35
- chat_fp = gr.Slider(minimum=0.0, maximum=2.0, value=0.0, step=0.1, label="Frequency penalty")
36
- chat_sysmsg = gr.Textbox(value=get_llm_sysprompt(), interactive=True, label="System message")
37
- examples = gr.Examples(
38
- examples = [
39
- ["Describe this person.", "Kafuu Chino from Gochiusa"],
40
- ["Hello", ""],
41
- ],
42
- inputs=[chat_msg, chat_query],
43
- )
44
- gr.Markdown(
45
- f"""This demo was created in reference to the following demos.<br>
46
- [prithivMLmods/WEB-DAC](https://huggingface.co/spaces/prithivMLmods/WEB-DAC),
47
- [featherless-ai/try-this-model](https://huggingface.co/spaces/featherless-ai/try-this-model),
48
- """
49
- )
50
- gr.DuplicateButton(value="Duplicate Space")
51
- gr.Markdown(f"Just a few edits to *model.py* are all it takes to complete your own collection.")
52
- gr.on(
53
- triggers=[chat_msg.submit, chat_query.submit, chat_submit.click],
54
- fn=chat_response,
55
- inputs=[chat_msg, chatbot, chat_query, chat_tokens, chat_temp, chat_topp, chat_fp],
56
- outputs=[chatbot],
57
- queue=True,
58
- show_progress="full",
59
- trigger_mode="once",
60
- )
61
- chat_clear.click(lambda: (None, None, None), None, [chatbot, chat_msg, chat_query], queue=False)
62
- chat_model.change(set_llm_model, [chat_model], [chat_model, chat_model_info], queue=True, show_progress="full")\
63
- .success(lambda: None, None, chatbot, queue=False)
64
- chat_mode.change(set_llm_sysprompt_mode, [chat_mode], [chat_sysmsg], queue=False)
65
- chat_lang.change(set_llm_language, [chat_lang], [chat_sysmsg], queue=False)
66
-
67
- if __name__ == "__main__":
68
- app.queue()
69
- app.launch()
 
1
+ import gradio as gr
2
+ from chatllm import (
3
+ chat_response, get_llm_model, set_llm_model, get_llm_model_info,
4
+ get_llm_language, set_llm_language, get_llm_sysprompt, get_llm_sysprompt_mode,
5
+ set_llm_sysprompt_mode,
6
+ )
7
+
8
+ # Custom CSS for Gradio app
9
+ css = '''
10
+ .gradio-container{max-width: 1000px !important}
11
+ h1{text-align:center}
12
+ footer { visibility: hidden }
13
+ '''
14
+
15
+ # Create Gradio interface
16
+ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as app:
17
+ with gr.Column():
18
+ with gr.Group():
19
+ chatbot = gr.Chatbot(likeable=False, show_copy_button=True, show_share_button=False, layout="bubble", container=True)
20
+ with gr.Row():
21
+ chat_query = gr.Textbox(label="Search Query", placeholder="hatsune miku", value="", scale=3)
22
+ chat_clear = gr.Button("🗑️ Clear", scale=1)
23
+ with gr.Row():
24
+ chat_msg = gr.Textbox(label="Message", placeholder="Input message with or without query and press Enter or click Sumbit.", value="", scale=3)
25
+ chat_submit = gr.Button("Submit", scale=1)
26
+ with gr.Accordion("Additional inputs", open=False):
27
+ chat_model = gr.Dropdown(choices=get_llm_model(), value=get_llm_model()[0], allow_custom_value=True, label="Model")
28
+ chat_model_info = gr.Markdown(value=get_llm_model_info(get_llm_model()[0]), label="Model info")
29
+ with gr.Row():
30
+ chat_mode = gr.Dropdown(choices=get_llm_sysprompt_mode(), value=get_llm_sysprompt_mode()[0], allow_custom_value=False, label="Mode")
31
+ chat_lang = gr.Dropdown(choices=get_llm_language(), value="language same as user input", allow_custom_value=True, label="Output language")
32
+ chat_tokens = gr.Slider(minimum=1, maximum=4096, value=2000, step=1, label="Max tokens")
33
+ chat_temp = gr.Slider(minimum=0.1, maximum=4.0, value=0.9, step=0.1, label="Temperature")
34
+ chat_topp = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
35
+ chat_fp = gr.Slider(minimum=0.0, maximum=2.0, value=0.0, step=0.1, label="Frequency penalty")
36
+ chat_sysmsg = gr.Textbox(value=get_llm_sysprompt(), interactive=True, label="System message")
37
+ examples = gr.Examples(
38
+ examples = [
39
+ ["Describe this person.", "Kafuu Chino from Gochiusa"],
40
+ ["Hello", ""],
41
+ ],
42
+ inputs=[chat_msg, chat_query],
43
+ )
44
+ gr.Markdown(
45
+ f"""This demo was created in reference to the following demos.<br>
46
+ [prithivMLmods/WEB-DAC](https://huggingface.co/spaces/prithivMLmods/WEB-DAC),
47
+ [featherless-ai/try-this-model](https://huggingface.co/spaces/featherless-ai/try-this-model),
48
+ """
49
+ )
50
+ gr.DuplicateButton(value="Duplicate Space")
51
+ gr.Markdown(f"Just a few edits to *model.py* are all it takes to complete your own collection.")
52
+ gr.on(
53
+ triggers=[chat_msg.submit, chat_query.submit, chat_submit.click],
54
+ fn=chat_response,
55
+ inputs=[chat_msg, chatbot, chat_query, chat_tokens, chat_temp, chat_topp, chat_fp],
56
+ outputs=[chatbot],
57
+ queue=True,
58
+ show_progress="full",
59
+ trigger_mode="once",
60
+ )
61
+ chat_clear.click(lambda: (None, None, None), None, [chatbot, chat_msg, chat_query], queue=False)
62
+ chat_model.change(set_llm_model, [chat_model], [chat_model, chat_model_info], queue=True, show_progress="full")\
63
+ .success(lambda: None, None, chatbot, queue=False)
64
+ chat_mode.change(set_llm_sysprompt_mode, [chat_mode], [chat_sysmsg], queue=False)
65
+ chat_lang.change(set_llm_language, [chat_lang], [chat_sysmsg], queue=False)
66
+
67
+ if __name__ == "__main__":
68
+ app.queue()
69
+ app.launch()