Update app.py
Browse files
app.py
CHANGED
@@ -100,7 +100,7 @@ def truncate_history(history: list[tuple[str, str]], system_message: str, max_le
|
|
100 |
turn_tokens = user_tokens + assistant_tokens
|
101 |
|
102 |
if current_length + turn_tokens <= max_length:
|
103 |
-
truncated_history.insert(0, (user_msg, assistant_msg))
|
104 |
current_length += turn_tokens
|
105 |
else:
|
106 |
break # Stop adding turns if we exceed the limit
|
@@ -114,39 +114,40 @@ def respond(
|
|
114 |
max_tokens,
|
115 |
temperature,
|
116 |
top_p,
|
|
|
117 |
):
|
118 |
"""Responds to a user message, maintaining conversation history, using special tokens and message list."""
|
119 |
-
|
120 |
-
if message.lower() == "clear memory"
|
121 |
return "", [] # Return empty message and empty history to reset the chat
|
122 |
|
123 |
formatted_system_message = system_message # Use the system_message argument
|
124 |
-
truncated_history = truncate_history(history, formatted_system_message, MAX_CONTEXT_LENGTH - max_tokens - 100)
|
125 |
|
126 |
-
messages = [{"role": "system", "content": formatted_system_message}]
|
127 |
for user_msg, assistant_msg in truncated_history:
|
128 |
if user_msg:
|
129 |
-
messages.append({"role": "user", "content": f"<|user|>\n{user_msg}</s>"})
|
130 |
if assistant_msg:
|
131 |
-
messages.append({"role": "assistant", "content": f"<|assistant|>\n{assistant_msg}</s>"})
|
132 |
|
133 |
-
messages.append({"role": "user", "content": f"<|user|>\n{message}</s>"})
|
134 |
|
135 |
response = ""
|
136 |
try:
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
except Exception as e:
|
148 |
-
|
149 |
-
|
150 |
|
151 |
# --- Gradio Interface ---
|
152 |
demo = gr.ChatInterface(
|
@@ -156,17 +157,12 @@ demo = gr.ChatInterface(
|
|
156 |
value=default_nvc_prompt_template,
|
157 |
label="System message",
|
158 |
visible=True,
|
159 |
-
lines=10
|
160 |
),
|
161 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
162 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
163 |
-
gr.Slider(
|
164 |
-
|
165 |
-
maximum=1.0,
|
166 |
-
value=0.95,
|
167 |
-
step=0.05,
|
168 |
-
label="Top-p (nucleus sampling)",
|
169 |
-
),
|
170 |
],
|
171 |
)
|
172 |
|
|
|
100 |
turn_tokens = user_tokens + assistant_tokens
|
101 |
|
102 |
if current_length + turn_tokens <= max_length:
|
103 |
+
truncated_history.insert(0, (user_msg, assistant_msg))
|
104 |
current_length += turn_tokens
|
105 |
else:
|
106 |
break # Stop adding turns if we exceed the limit
|
|
|
114 |
max_tokens,
|
115 |
temperature,
|
116 |
top_p,
|
117 |
+
clear_memory # Added extra parameter to match the 7 inputs provided
|
118 |
):
|
119 |
"""Responds to a user message, maintaining conversation history, using special tokens and message list."""
|
120 |
+
# Check for the clear memory command (or if the Clear Memory button is triggered)
|
121 |
+
if message.lower() == "clear memory" or clear_memory:
|
122 |
return "", [] # Return empty message and empty history to reset the chat
|
123 |
|
124 |
formatted_system_message = system_message # Use the system_message argument
|
125 |
+
truncated_history = truncate_history(history, formatted_system_message, MAX_CONTEXT_LENGTH - max_tokens - 100)
|
126 |
|
127 |
+
messages = [{"role": "system", "content": formatted_system_message}]
|
128 |
for user_msg, assistant_msg in truncated_history:
|
129 |
if user_msg:
|
130 |
+
messages.append({"role": "user", "content": f"<|user|>\n{user_msg}</s>"})
|
131 |
if assistant_msg:
|
132 |
+
messages.append({"role": "assistant", "content": f"<|assistant|>\n{assistant_msg}</s>"})
|
133 |
|
134 |
+
messages.append({"role": "user", "content": f"<|user|>\n{message}</s>"})
|
135 |
|
136 |
response = ""
|
137 |
try:
|
138 |
+
for chunk in client.chat_completion(
|
139 |
+
messages,
|
140 |
+
max_tokens=max_tokens,
|
141 |
+
stream=True,
|
142 |
+
temperature=temperature,
|
143 |
+
top_p=top_p,
|
144 |
+
):
|
145 |
+
token = chunk.choices[0].delta.content
|
146 |
+
response += token
|
147 |
+
yield response
|
148 |
except Exception as e:
|
149 |
+
print(f"An error occurred: {e}")
|
150 |
+
yield "I'm sorry, I encountered an error. Please try again."
|
151 |
|
152 |
# --- Gradio Interface ---
|
153 |
demo = gr.ChatInterface(
|
|
|
157 |
value=default_nvc_prompt_template,
|
158 |
label="System message",
|
159 |
visible=True,
|
160 |
+
lines=10 # Increased height for more space to read the prompt
|
161 |
),
|
162 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
163 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
164 |
+
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
|
165 |
+
gr.Button("Clear Memory"),
|
|
|
|
|
|
|
|
|
|
|
166 |
],
|
167 |
)
|
168 |
|