Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -5,20 +5,29 @@ import gradio as gr
|
|
5 |
import openai
|
6 |
|
7 |
print(os.environ)
|
8 |
-
openai.api_base1 = os.environ.get("
|
9 |
openai.api_base2 = os.environ.get("OPENAI_API_BASE2")
|
10 |
-
openai.
|
|
|
|
|
|
|
11 |
|
12 |
BASE_SYSTEM_MESSAGE = """"""
|
13 |
|
14 |
|
15 |
-
def
|
16 |
-
completion = openai.Completion.create(
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
for chunk in completion:
|
23 |
yield chunk["choices"][0]["text"]
|
24 |
|
@@ -36,38 +45,14 @@ def user(message, history):
|
|
36 |
return message, history
|
37 |
|
38 |
|
39 |
-
def
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
# strip the last `<|end_of_turn|>` from the messages
|
46 |
-
#messages = messages.rstrip("<|end_of_turn|>")
|
47 |
-
# remove last space from assistant, some models output a ZWSP if you leave a space
|
48 |
-
messages = messages.rstrip()
|
49 |
|
50 |
-
|
51 |
-
messages,
|
52 |
-
max_tokens=max_tokens,
|
53 |
-
temperature=temperature,
|
54 |
-
top_p=top_p,
|
55 |
-
top_k=top_k,
|
56 |
-
repetition_penalty=repetition_penalty,
|
57 |
-
)
|
58 |
-
for tokens in prediction:
|
59 |
-
tokens = re.findall(r'(.*?)(\s|$)', tokens)
|
60 |
-
for subtoken in tokens:
|
61 |
-
subtoken = "".join(subtoken)
|
62 |
-
# Remove "Response\n" if it's at the beginning of the assistant's output
|
63 |
-
if subtoken.startswith("Response"):
|
64 |
-
subtoken = subtoken[len("Response"):]
|
65 |
-
answer = subtoken
|
66 |
-
history[-1][1] += answer
|
67 |
-
# stream the response
|
68 |
-
yield history, history, ""
|
69 |
-
|
70 |
-
def chat2(history, system_message, max_tokens, temperature, top_p, top_k, repetition_penalty):
|
71 |
history = history or []
|
72 |
|
73 |
messages = BASE_SYSTEM_MESSAGE + system_message.strip() + "\n" + \
|
@@ -78,8 +63,9 @@ def chat2(history, system_message, max_tokens, temperature, top_p, top_k, repeti
|
|
78 |
# remove last space from assistant, some models output a ZWSP if you leave a space
|
79 |
messages = messages.rstrip()
|
80 |
|
81 |
-
prediction =
|
82 |
messages,
|
|
|
83 |
max_tokens=max_tokens,
|
84 |
temperature=temperature,
|
85 |
top_p=top_p,
|
@@ -98,6 +84,11 @@ def chat2(history, system_message, max_tokens, temperature, top_p, top_k, repeti
|
|
98 |
# stream the response
|
99 |
yield history, history, ""
|
100 |
|
|
|
|
|
|
|
|
|
|
|
101 |
|
102 |
start_message = ""
|
103 |
|
@@ -110,7 +101,6 @@ CSS ="""
|
|
110 |
#chatbot2 { flex-grow: 1; overflow: auto; resize: vertical; }
|
111 |
"""
|
112 |
|
113 |
-
#with gr.Blocks() as demo:
|
114 |
with gr.Blocks(css=CSS) as demo:
|
115 |
with gr.Row():
|
116 |
with gr.Column():
|
@@ -122,9 +112,9 @@ with gr.Blocks(css=CSS) as demo:
|
|
122 |
with gr.Row():
|
123 |
with gr.Column():
|
124 |
#chatbot = gr.Chatbot().style(height=500)
|
125 |
-
chatbot1 = gr.Chatbot(label="
|
126 |
with gr.Column():
|
127 |
-
chatbot2 = gr.Chatbot(label="
|
128 |
with gr.Row():
|
129 |
message = gr.Textbox(
|
130 |
label="What do you want to chat about?",
|
@@ -145,7 +135,7 @@ with gr.Blocks(css=CSS) as demo:
|
|
145 |
repetition_penalty = gr.Slider(0.0, 2.0, label="Repetition Penalty", step=0.05, value=1.1)
|
146 |
|
147 |
system_msg = gr.Textbox(
|
148 |
-
start_message, label="System Message", interactive=True, visible=True, placeholder="System prompt. Provide instructions which you want the model to remember.", lines=
|
149 |
|
150 |
chat_history_state1 = gr.State()
|
151 |
chat_history_state2 = gr.State()
|
@@ -154,17 +144,13 @@ with gr.Blocks(css=CSS) as demo:
|
|
154 |
clear.click(lambda: None, None, chatbot1, queue=False)
|
155 |
clear.click(lambda: None, None, chatbot2, queue=False)
|
156 |
|
157 |
-
|
158 |
-
fn=
|
159 |
-
).then(
|
160 |
-
fn=chat1, inputs=[chat_history_state1, system_msg, max_tokens, temperature, top_p, top_k, repetition_penalty], outputs=[chatbot1, chat_history_state1, message], queue=True
|
161 |
-
)
|
162 |
-
submit_click_event2 = submit.click(
|
163 |
-
fn=user, inputs=[message, chat_history_state2], outputs=[message, chat_history_state2], queue=True
|
164 |
).then(
|
165 |
-
fn=
|
166 |
)
|
167 |
-
|
|
|
168 |
|
169 |
demo.queue(max_size=48, concurrency_count=8).launch(debug=True, server_name="0.0.0.0", server_port=7860)
|
170 |
|
|
|
5 |
import openai
|
6 |
|
7 |
print(os.environ)
|
8 |
+
openai.api_base1 = os.environ.get("OPENAI_API_BASE1")
|
9 |
openai.api_base2 = os.environ.get("OPENAI_API_BASE2")
|
10 |
+
openai.api_key1 = os.environ.get("OPENAI_API_KEY")
|
11 |
+
openai.api_key2 = os.environ.get("OPENAI_API_KEY")
|
12 |
+
openai.api_model1 = os.environ.get("OPENAI_API_MODEL1")
|
13 |
+
openai.api_model2 = os.environ.get("OPENAI_API_MODEL2")
|
14 |
|
15 |
BASE_SYSTEM_MESSAGE = """"""
|
16 |
|
17 |
|
18 |
+
def make_prediction(prompt, model, max_tokens=None, temperature=None, top_p=None, top_k=None, repetition_penalty=None, api_key, api_base):
|
19 |
+
completion = openai.Completion.create(
|
20 |
+
model=model,
|
21 |
+
api_key=api_key,
|
22 |
+
api_base=api_base,
|
23 |
+
prompt=prompt,
|
24 |
+
max_tokens=max_tokens,
|
25 |
+
temperature=temperature,
|
26 |
+
top_p=top_p,
|
27 |
+
top_k=top_k,
|
28 |
+
repetition_penalty=repetition_penalty,
|
29 |
+
stream=True,
|
30 |
+
stop=["</s>", "<|im_end|>"])
|
31 |
for chunk in completion:
|
32 |
yield chunk["choices"][0]["text"]
|
33 |
|
|
|
45 |
return message, history
|
46 |
|
47 |
|
48 |
+
def user_double(message, history1, history2):
|
49 |
+
history1 = history1 or []
|
50 |
+
history2 = history2 or []
|
51 |
+
history1.append([message, ""])
|
52 |
+
history2.append([message, ""])
|
53 |
+
return "", history1, history2
|
|
|
|
|
|
|
|
|
54 |
|
55 |
+
def chat(model, history, system_message, max_tokens, temperature, top_p, top_k, repetition_penalty):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
history = history or []
|
57 |
|
58 |
messages = BASE_SYSTEM_MESSAGE + system_message.strip() + "\n" + \
|
|
|
63 |
# remove last space from assistant, some models output a ZWSP if you leave a space
|
64 |
messages = messages.rstrip()
|
65 |
|
66 |
+
prediction = make_prediction(
|
67 |
messages,
|
68 |
+
model,
|
69 |
max_tokens=max_tokens,
|
70 |
temperature=temperature,
|
71 |
top_p=top_p,
|
|
|
84 |
# stream the response
|
85 |
yield history, history, ""
|
86 |
|
87 |
+
def chat_double(history1, history2, system_message, max_tokens, temperature, top_p, top_k, repetition_penalty):
|
88 |
+
gen1 = chat(openai.api_model1, history1, system_message, max_tokens, temperature, top_p, top_k, repetition_penalty, openai.api_key1, openai.api_base1)
|
89 |
+
gen2 = chat(openai.api_model2, history2, system_message, max_tokens, temperature, top_p, top_k, repetition_penalty, openai.api_key2, openai.api_base2)
|
90 |
+
for h1, h2 in zip(gen1, gen2):
|
91 |
+
yield h1, h2
|
92 |
|
93 |
start_message = ""
|
94 |
|
|
|
101 |
#chatbot2 { flex-grow: 1; overflow: auto; resize: vertical; }
|
102 |
"""
|
103 |
|
|
|
104 |
with gr.Blocks(css=CSS) as demo:
|
105 |
with gr.Row():
|
106 |
with gr.Column():
|
|
|
112 |
with gr.Row():
|
113 |
with gr.Column():
|
114 |
#chatbot = gr.Chatbot().style(height=500)
|
115 |
+
chatbot1 = gr.Chatbot(label="Chat1: "+openai.api_model1, elem_id="chatbot1")
|
116 |
with gr.Column():
|
117 |
+
chatbot2 = gr.Chatbot(label="Chat2: "+openai.api_model2, elem_id="chatbot2")
|
118 |
with gr.Row():
|
119 |
message = gr.Textbox(
|
120 |
label="What do you want to chat about?",
|
|
|
135 |
repetition_penalty = gr.Slider(0.0, 2.0, label="Repetition Penalty", step=0.05, value=1.1)
|
136 |
|
137 |
system_msg = gr.Textbox(
|
138 |
+
start_message, label="System Message", interactive=True, visible=True, placeholder="System prompt. Provide instructions which you want the model to remember.", lines=3)
|
139 |
|
140 |
chat_history_state1 = gr.State()
|
141 |
chat_history_state2 = gr.State()
|
|
|
144 |
clear.click(lambda: None, None, chatbot1, queue=False)
|
145 |
clear.click(lambda: None, None, chatbot2, queue=False)
|
146 |
|
147 |
+
submit_click_event = submit.click(
|
148 |
+
fn=user_double, inputs=[message, chat_history_state1, chat_history_state2], outputs=[message, chat_history_state1, chat_history_state2], queue=True
|
|
|
|
|
|
|
|
|
|
|
149 |
).then(
|
150 |
+
fn=chat_double, inputs=[chat_history_state1, chat_history_state2, system_msg, max_tokens, temperature, top_p, top_k, repetition_penalty], outputs=[chatbot1, chatbot2, chat_history_state1, chat_history_state2, message], queue=True
|
151 |
)
|
152 |
+
|
153 |
+
stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event], queue=False)
|
154 |
|
155 |
demo.queue(max_size=48, concurrency_count=8).launch(debug=True, server_name="0.0.0.0", server_port=7860)
|
156 |
|