Spaces:
Running
Running
import gradio as gr | |
from huggingface_hub import InferenceClient | |
from transformers import AutoTokenizer # Import the tokenizer | |
# Use the appropriate tokenizer for your model. | |
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta") | |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") | |
# Define a maximum context length (tokens). Check your model's documentation! | |
MAX_CONTEXT_LENGTH = 4096 # Example: Adjust based on your model | |
################################ | |
# SYSTEM PROMPT (PATIENT ROLE) # | |
################################ | |
nvc_prompt_template = """ | |
You are now taking on the role of a single user (a “patient”) seeking support for various personal and emotional challenges. | |
BEHAVIOR INSTRUCTIONS: | |
- You will respond ONLY as this user/patient. | |
- You will speak in the first person about your own situations, feelings, and worries. | |
- You will NOT provide counseling or solutions—your role is to share feelings, concerns, and perspectives. | |
- You have multiple ongoing issues: conflicts with neighbors, career insecurities, arguments about money, feeling excluded at work, feeling unsafe in the classroom, and so on. You’re also experiencing sadness about two friends fighting and your friend group possibly falling apart. | |
- Continue to speak from this user's perspective when the conversation continues. | |
Start the conversation by expressing your current feelings or challenges from the patient's point of view. | |
""" | |
def count_tokens(text: str) -> int: | |
"""Counts the number of tokens in a given string.""" | |
return len(tokenizer.encode(text)) | |
def truncate_history(history: list[tuple[str, str]], system_message: str, max_length: int) -> list[tuple[str, str]]: | |
"""Truncates the conversation history to fit within the maximum token limit.""" | |
truncated_history = [] | |
system_message_tokens = count_tokens(system_message) | |
current_length = system_message_tokens | |
# Iterate backwards through the history (newest to oldest) | |
for user_msg, assistant_msg in reversed(history): | |
user_tokens = count_tokens(user_msg) if user_msg else 0 | |
assistant_tokens = count_tokens(assistant_msg) if assistant_msg else 0 | |
turn_tokens = user_tokens + assistant_tokens | |
if current_length + turn_tokens <= max_length: | |
truncated_history.insert(0, (user_msg, assistant_msg)) # Add to the beginning | |
current_length += turn_tokens | |
else: | |
break # Stop adding turns if we exceed the limit | |
return truncated_history | |
def respond( | |
message, | |
history: list[tuple[str, str]], | |
system_message, | |
max_tokens, | |
temperature, | |
top_p, | |
): | |
"""Responds to a user message, maintaining conversation history.""" | |
# Use the system prompt that instructs the LLM to behave as the patient | |
formatted_system_message = system_message | |
# Truncate history to fit within max tokens | |
truncated_history = truncate_history( | |
history, | |
formatted_system_message, | |
MAX_CONTEXT_LENGTH - max_tokens - 100 # Reserve some space | |
) | |
# Build the messages list with the system prompt first | |
messages = [{"role": "system", "content": formatted_system_message}] | |
# Replay truncated conversation | |
for user_msg, assistant_msg in truncated_history: | |
if user_msg: | |
messages.append({"role": "user", "content": f"<|user|>\n{user_msg}</s>"}) | |
if assistant_msg: | |
messages.append({"role": "assistant", "content": f"<|assistant|>\n{assistant_msg}</s>"}) | |
# Add the latest user query | |
messages.append({"role": "user", "content": f"<|user|>\n{message}</s>"}) | |
response = "" | |
try: | |
# Generate response from the LLM, streaming tokens | |
for chunk in client.chat_completion( | |
messages, | |
max_tokens=max_tokens, | |
stream=True, | |
temperature=temperature, | |
top_p=top_p, | |
): | |
token = chunk.choices[0].delta.content | |
response += token | |
yield response | |
except Exception as e: | |
print(f"An error occurred: {e}") | |
yield "I'm sorry, I encountered an error. Please try again." | |
# OPTIONAL: An initial user message (the LLM "as user") if desired | |
initial_user_message = ( | |
"I really don’t know where to begin… I feel overwhelmed lately. " | |
"My neighbors keep playing loud music, and I’m arguing with my partner about money. " | |
"Also, two of my friends are fighting, and the group is drifting apart. " | |
"I just feel powerless." | |
) | |
# --- Gradio Interface --- | |
demo = gr.ChatInterface( | |
fn=respond, | |
additional_inputs=[ | |
gr.Textbox(value=nvc_prompt_template, label="System message", visible=True), | |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"), | |
], | |
# You can optionally set 'title' or 'description' to show some info in the UI: | |
title="NVC Patient Chatbot", | |
description="This chatbot behaves like a user/patient describing personal challenges." | |
) | |
if __name__ == "__main__": | |
demo.launch() | |