Respair commited on
Commit
35ffb85
·
verified ·
1 Parent(s): 459af54

Update chat_app_remote.py

Browse files
Files changed (1) hide show
  1. chat_app_remote.py +5 -32
chat_app_remote.py CHANGED
@@ -16,18 +16,6 @@ custom_css = """
16
  .gradio-container {
17
  justify-content: flex-start !important;
18
  }
19
- .send-btn {
20
- width: 140px;
21
- height: 40px;
22
- min-width: 140px;
23
- padding: 0;
24
- }
25
- .centered-col {
26
- display: flex;
27
- flex-direction: column;
28
- align-items: center;
29
- justify-content: center;
30
- }
31
  """
32
 
33
  def create_frontend_demo():
@@ -64,14 +52,13 @@ def create_frontend_demo():
64
  placeholder="Start chatting with Aira..."
65
  )
66
 
67
- # Use a Column with a custom class to center its items
68
- with gr.Column(elem_classes="centered-col"):
69
  msg = gr.Textbox(
70
  show_label=False,
71
  placeholder="Enter text and press enter",
72
  container=True
73
  )
74
- send_btn = gr.Button("➤", elem_classes="send-btn")
75
 
76
  audio_output = gr.Audio(
77
  label="Aira's Response",
@@ -87,7 +74,7 @@ def create_frontend_demo():
87
  label="Audio Input",
88
  streaming=False
89
  )
90
-
91
  with gr.Tab("Options"):
92
  with gr.Column():
93
  session_input = gr.Textbox(
@@ -104,7 +91,7 @@ def create_frontend_demo():
104
  You can talk to her in English or Japanese, but she will only respond in Japanese (Subs over dubs, bros) ask her to give you a Subtitle if you can't talk in Japanese. <br>
105
 
106
  The majority of the latency depends on the HF's inference api.
107
- LLM is not fine-tuned or optimized at all. the current state of conversational off-the-shelf japanese LLM seem to be less than remarkable, please beware of that.
108
 
109
  1. Enter your Session ID above or leave blank for a new one
110
  2. Click 'Set Session ID' to confirm
@@ -123,12 +110,6 @@ def create_frontend_demo():
123
  inputs=[msg, chatbot, session_id_state],
124
  outputs=[msg, chatbot, audio_output, session_id_state, session_display]
125
  )
126
- # Also allow clicking the send button
127
- send_btn.click(
128
- respond,
129
- inputs=[msg, chatbot, session_id_state],
130
- outputs=[msg, chatbot, audio_output, session_id_state, session_display]
131
- )
132
 
133
  def set_session(user_id):
134
  result = client.predict(
@@ -150,30 +131,22 @@ def create_frontend_demo():
150
 
151
  try:
152
  sample_rate, audio_array = audio_data
153
-
154
  with tempfile.NamedTemporaryFile(suffix='.wav', delete=True) as temp:
155
  wavfile.write(temp.name, sample_rate, audio_array)
156
-
157
  audio = {"path": temp.name, "meta": {"_type": "gradio.FileData"}}
158
-
159
- # Get the result while the temporary file still exists
160
  result = client.predict(
161
  audio,
162
  history,
163
  session_id,
164
  api_name="/handle_audio"
165
  )
166
-
167
- # Unpack and construct the display text
168
  audio_path, new_history, new_session_id = result
169
  display_text = f"Current Session ID: {new_session_id}"
170
-
171
  return audio_path, new_history, new_session_id, display_text
172
-
173
  except Exception as e:
174
  print(f"Error processing audio: {str(e)}")
175
  import traceback
176
- traceback.print_exc() # This will print the full error traceback
177
  return None, history, session_id, f"Error processing audio. Session ID: {session_id}"
178
 
179
  audio_input.stop_recording(
 
16
  .gradio-container {
17
  justify-content: flex-start !important;
18
  }
 
 
 
 
 
 
 
 
 
 
 
 
19
  """
20
 
21
  def create_frontend_demo():
 
52
  placeholder="Start chatting with Aira..."
53
  )
54
 
55
+ # Place just the text box (removing the send button)
56
+ with gr.Column():
57
  msg = gr.Textbox(
58
  show_label=False,
59
  placeholder="Enter text and press enter",
60
  container=True
61
  )
 
62
 
63
  audio_output = gr.Audio(
64
  label="Aira's Response",
 
74
  label="Audio Input",
75
  streaming=False
76
  )
77
+
78
  with gr.Tab("Options"):
79
  with gr.Column():
80
  session_input = gr.Textbox(
 
91
  You can talk to her in English or Japanese, but she will only respond in Japanese (Subs over dubs, bros) ask her to give you a Subtitle if you can't talk in Japanese. <br>
92
 
93
  The majority of the latency depends on the HF's inference api.
94
+ LLM is not fine-tuned or optimized at all. the current state of conversational off-the-shelf japanese LLM seems to be less than remarkable, please beware of that.
95
 
96
  1. Enter your Session ID above or leave blank for a new one
97
  2. Click 'Set Session ID' to confirm
 
110
  inputs=[msg, chatbot, session_id_state],
111
  outputs=[msg, chatbot, audio_output, session_id_state, session_display]
112
  )
 
 
 
 
 
 
113
 
114
  def set_session(user_id):
115
  result = client.predict(
 
131
 
132
  try:
133
  sample_rate, audio_array = audio_data
 
134
  with tempfile.NamedTemporaryFile(suffix='.wav', delete=True) as temp:
135
  wavfile.write(temp.name, sample_rate, audio_array)
 
136
  audio = {"path": temp.name, "meta": {"_type": "gradio.FileData"}}
 
 
137
  result = client.predict(
138
  audio,
139
  history,
140
  session_id,
141
  api_name="/handle_audio"
142
  )
 
 
143
  audio_path, new_history, new_session_id = result
144
  display_text = f"Current Session ID: {new_session_id}"
 
145
  return audio_path, new_history, new_session_id, display_text
 
146
  except Exception as e:
147
  print(f"Error processing audio: {str(e)}")
148
  import traceback
149
+ traceback.print_exc()
150
  return None, history, session_id, f"Error processing audio. Session ID: {session_id}"
151
 
152
  audio_input.stop_recording(