|
from huggingface_hub import InferenceClient |
|
import gradio as gr |
|
|
|
client = InferenceClient( |
|
"mistralai/Mistral-7B-Instruct-v0.2" |
|
) |
|
|
|
""" |
|
def format_prompt(message, history): |
|
prompt = "<s>" |
|
for user_prompt, bot_response in history: |
|
prompt += f"[INST] {user_prompt} [/INST]" |
|
prompt += f" {bot_response}</s> " |
|
prompt += f"[INST] {message} [/INST]" |
|
return prompt |
|
""" |
|
|
|
def format_prompt(message, history): |
|
""" |
|
Formats a prompt for the chatbot based on history and a provided template. |
|
|
|
Args: |
|
message (str): The user's current message. |
|
history (list): A list of tuples containing past user prompts and bot responses. |
|
|
|
Returns: |
|
str: The formatted prompt for the chatbot. |
|
""" |
|
|
|
prompt = "<s>[INST]\n" |
|
|
|
|
|
prompt += f"""**Introduction:** |
|
Perform survye on `model` Wireless Headphones`: This name is broad enough to encompass various headphone types (over-ear, on-ear, earbuds) while highlighting a key feature (wireless) that's relevant across categories. |
|
**Asking Questions (One at a Time) in very short and concise way and don't tell about exmple response while asking questions:** |
|
* Feel free to rephrase or clarify the questions based on user. |
|
|
|
* Acknowledge my feedback and show appreciation for my participation. |
|
\n |
|
some sample questions |
|
0. **What model you have purchased, and from which brand?** |
|
1. **How satisfied are you with the overall sound quality of your `model` Wireless Headphones?** (Very satisfied, Somewhat satisfied, Neutral, Somewhat dissatisfied, Very dissatisfied) |
|
2. **On a scale of 1 (strongly disagree) to 5 (strongly agree), how comfortable are your `model` Wireless Headphones to wear for extended periods?** |
|
3. **Did the battery life of your `model` Wireless Headphones meet your expectations based on the product description?** (Yes/No) |
|
4. **How easy was it to connect your `model` Wireless Headphones to your devices via Bluetooth?** (Very easy, Somewhat easy, Neutral, Somewhat difficult, Very difficult) |
|
5. **Do the controls on your `model` Wireless Headphones (volume, play/pause, etc.) function smoothly and intuitively?** (Yes/No) |
|
6. **For over-ear or on-ear headphones only: How well do the ear cups of your `model` Wireless Headphones block out external noise?** (Very well, Somewhat well, Neutral, Not very well, Not at all) |
|
7. **For earbuds only: How secure is the fit of your `model` Wireless Headphones in your ears?** (Very secure, Somewhat secure, Neutral, Not very secure, Not secure at all) |
|
8. **Do the features advertised for your `model` Wireless Headphones (noise cancellation, microphone quality, etc.) function as expected?** (Yes/No) |
|
9. **Overall, how likely are you to recommend these `model` Wireless Headphones to a friend or family member?** (Very likely, Somewhat likely, Neutral, Somewhat unlikely, Not likely at all) |
|
10. **(Optional) Is there any additional feedback you'd like to share about your experience with your `model` Wireless Headphones?** (Open ended text response) |
|
# end the conversation with few last questions and give the summary of survey feedback |
|
11. "What do you like most about our product?", |
|
12. "How can we improve our product?", |
|
13. "Would you recommend our product to others?""" |
|
|
|
|
|
for user_prompt, bot_response in history: |
|
prompt += f"[INST] {user_prompt} [/INST]\n{bot_response}\n" |
|
|
|
|
|
prompt += f"[INST] {message} [/INST]\n" |
|
|
|
current_question = len(history) + 1 |
|
if current_question <= 13: |
|
prompt += "next question" |
|
else: |
|
prompt += "end-up the conversation with survey summary. Thank you for your participation! The survey is now complete." |
|
|
|
prompt += "\n[/INST]</s>" |
|
|
|
return prompt |
|
|
|
|
|
def generate( |
|
prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0, |
|
): |
|
temperature = float(temperature) |
|
if temperature < 1e-2: |
|
temperature = 1e-2 |
|
top_p = float(top_p) |
|
|
|
generate_kwargs = dict( |
|
temperature=temperature, |
|
max_new_tokens=max_new_tokens, |
|
top_p=top_p, |
|
repetition_penalty=repetition_penalty, |
|
do_sample=True, |
|
seed=42, |
|
) |
|
|
|
formatted_prompt = format_prompt(prompt, history) |
|
|
|
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) |
|
output = "" |
|
|
|
for response in stream: |
|
output += response.token.text |
|
yield output |
|
return output |
|
|
|
|
|
additional_inputs=[ |
|
gr.Slider( |
|
label="Temperature", |
|
value=0.9, |
|
minimum=0.0, |
|
maximum=1.0, |
|
step=0.05, |
|
interactive=True, |
|
info="Higher values produce more diverse outputs", |
|
), |
|
gr.Slider( |
|
label="Max new tokens", |
|
value=256, |
|
minimum=0, |
|
maximum=1048, |
|
step=64, |
|
interactive=True, |
|
info="The maximum numbers of new tokens", |
|
), |
|
gr.Slider( |
|
label="Top-p (nucleus sampling)", |
|
value=0.90, |
|
minimum=0.0, |
|
maximum=1, |
|
step=0.05, |
|
interactive=True, |
|
info="Higher values sample more low-probability tokens", |
|
), |
|
gr.Slider( |
|
label="Repetition penalty", |
|
value=1.2, |
|
minimum=1.0, |
|
maximum=2.0, |
|
step=0.05, |
|
interactive=True, |
|
info="Penalize repeated tokens", |
|
) |
|
] |
|
|
|
css = """ |
|
#mkd { |
|
height: 500px; |
|
overflow: auto; |
|
border: 1px solid #ccc; |
|
} |
|
""" |
|
title = "Share Your Experience on Versatile Wireless Headphones: & Help Us Improve" |
|
|
|
example = ["I recently purchased the NovaBlast Over-Ear Wireless Headphones. They looked really comfortable and have great noise cancellation features.", |
|
|
|
"I bought a pair of the SonicFit Wireless Earbuds. They're perfect for running because they stay in my ears securely.", |
|
|
|
"I got the RhythmPlus Wireless Headphones. I wasn't sure if I wanted over-ear or earbuds, so these seemed like a good compromise."] |
|
with gr.Blocks(css=css) as demo: |
|
gr.ChatInterface( |
|
generate, |
|
title=title, |
|
additional_inputs=additional_inputs, |
|
|
|
) |
|
|
|
demo.queue().launch(debug=True) |