Arnic commited on
Commit
5e17a09
1 Parent(s): 7423722

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -14
app.py CHANGED
@@ -1,10 +1,10 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- # Initialize the client with your model
5
  client = InferenceClient("Arnic/gemma2-2b-it-Pubmed20k-TPU")
6
 
7
- # Define response function
8
  def respond(
9
  message,
10
  history: list[tuple[str, str]],
@@ -13,23 +13,20 @@ def respond(
13
  temperature,
14
  top_p,
15
  ):
 
16
  system_message = (
17
  "You are a good listener. You advise relaxation exercises, suggest avoiding negative thoughts, "
18
  "and guide through steps to manage stress. Let's discuss what's on your mind, "
19
  "or ask me for a quick relaxation exercise."
20
  )
21
 
22
- # Format history and system message as prompt text
23
- chat_history = ""
24
  for user_msg, bot_reply in history:
25
- if user_msg:
26
- chat_history += f"User: {user_msg}\n"
27
- if bot_reply:
28
- chat_history += f"Assistant: {bot_reply}\n"
29
-
30
- prompt = f"{system_message}\n\n{chat_history}User: {message}\nAssistant:"
31
 
32
- # Generate response using the InferenceClient text generation method
33
  response = client.text_generation(
34
  prompt=prompt,
35
  max_new_tokens=max_tokens,
@@ -37,11 +34,11 @@ def respond(
37
  top_p=top_p
38
  )
39
 
40
- # Extract and yield the text response
41
- generated_text = response["generated_text"].replace(prompt, "").strip()
42
  yield generated_text
43
 
44
- # Set up Gradio interface
45
  demo = gr.ChatInterface(
46
  respond,
47
  additional_inputs=[
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ # Initialize the client with your model from Hugging Face Hub
5
  client = InferenceClient("Arnic/gemma2-2b-it-Pubmed20k-TPU")
6
 
7
+ # Define the function to handle chat responses
8
  def respond(
9
  message,
10
  history: list[tuple[str, str]],
 
13
  temperature,
14
  top_p,
15
  ):
16
+ # System message to set the chatbot's tone
17
  system_message = (
18
  "You are a good listener. You advise relaxation exercises, suggest avoiding negative thoughts, "
19
  "and guide through steps to manage stress. Let's discuss what's on your mind, "
20
  "or ask me for a quick relaxation exercise."
21
  )
22
 
23
+ # Format prompt with system message, chat history, and user message
24
+ prompt = system_message + "\n\n"
25
  for user_msg, bot_reply in history:
26
+ prompt += f"User: {user_msg}\nAssistant: {bot_reply}\n"
27
+ prompt += f"User: {message}\nAssistant:"
 
 
 
 
28
 
29
+ # Call the text generation API
30
  response = client.text_generation(
31
  prompt=prompt,
32
  max_new_tokens=max_tokens,
 
34
  top_p=top_p
35
  )
36
 
37
+ # Extract the response text and yield it as output
38
+ generated_text = response.get("generated_text", "").replace(prompt, "").strip()
39
  yield generated_text
40
 
41
+ # Gradio UI setup
42
  demo = gr.ChatInterface(
43
  respond,
44
  additional_inputs=[