felipeserafim001 commited on
Commit
0440184
1 Parent(s): 7023a7a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -13
app.py CHANGED
@@ -1,11 +1,13 @@
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
- #client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
  client = InferenceClient("gpt-omni/mini-omni2")
 
 
9
 
10
  def respond(
11
  message,
@@ -27,17 +29,48 @@ def respond(
27
 
28
  response = ""
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
 
43
  """
@@ -46,7 +79,7 @@ For information on how to customize the ChatInterface, peruse the gradio docs: h
46
  demo = gr.ChatInterface(
47
  respond,
48
  additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
  gr.Slider(
@@ -61,4 +94,4 @@ demo = gr.ChatInterface(
61
 
62
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
1
+
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
5
  """
6
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
  """
 
8
  client = InferenceClient("gpt-omni/mini-omni2")
9
+ #client = InferenceClient("unsloth/Llama-3.2-1B-Instruct")
10
+
11
 
12
  def respond(
13
  message,
 
29
 
30
  response = ""
31
 
 
 
 
 
 
 
 
 
32
 
33
+ try:
34
+ for message in client.chat_completion(
35
+ messages,
36
+ max_tokens=max_tokens,
37
+ stream=True,
38
+ temperature=temperature,
39
+ top_p=top_p,
40
+ ):
41
+ # Ensure the message has a valid structure
42
+ if not message or not isinstance(message, dict):
43
+ continue
44
+
45
+ try:
46
+ # Extract content and finish reason
47
+ content = message.choices[0].delta.content
48
+ finish_reason = message.choices[0].finish_reason
49
+
50
+ # Check if the content is empty
51
+ if content.strip() == "":
52
+ # If the finish reason is 'stop', it's expected and we can break the loop
53
+ if finish_reason == "stop":
54
+ print("Stream ended normally.")
55
+ break
56
+ else:
57
+ print("Received unexpected empty content, skipping...")
58
+ continue
59
+
60
+ response += content
61
+ yield response
62
+
63
+ except (AttributeError, IndexError, KeyError) as e:
64
+ print(f"Error processing message: {e}")
65
+ continue
66
+
67
+ except Exception as e:
68
+ print(f"Unexpected error: {e}")
69
+ yield "An error occurred while generating the response."
70
+
71
+ # Final check if the response is empty
72
+ if response.strip() == "":
73
+ yield "No response generated. Please try again or adjust the settings."
74
 
75
 
76
  """
 
79
  demo = gr.ChatInterface(
80
  respond,
81
  additional_inputs=[
82
+ gr.Textbox(value="You are a friendly Chatbot. Your name is Juninho.", label="System message"),
83
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
84
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
85
  gr.Slider(
 
94
 
95
 
96
  if __name__ == "__main__":
97
+ demo.launch()