Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,8 @@ import json
|
|
3 |
import re
|
4 |
import tempfile
|
5 |
from importlib.resources import files
|
6 |
-
|
|
|
7 |
import click
|
8 |
import gradio as gr
|
9 |
import numpy as np
|
@@ -60,25 +61,46 @@ chat_model_state = None
|
|
60 |
chat_tokenizer_state = None
|
61 |
|
62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
@gpu_decorator
|
64 |
-
def generate_response(messages
|
65 |
-
"""Generate response using
|
66 |
-
|
67 |
-
messages
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
70 |
)
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
|
84 |
@gpu_decorator
|
@@ -210,24 +232,9 @@ Have a conversation with an AI using your reference voice!
|
|
210 |
}
|
211 |
]
|
212 |
)
|
|
|
|
|
213 |
|
214 |
-
@gpu_decorator
|
215 |
-
def process_audio_input(audio_path, text, history, conv_state):
|
216 |
-
if not audio_path and not text.strip():
|
217 |
-
return history, conv_state, ""
|
218 |
-
|
219 |
-
if audio_path:
|
220 |
-
text = preprocess_ref_audio_text(audio_path, text)[1]
|
221 |
-
|
222 |
-
if not text.strip():
|
223 |
-
return history, conv_state, ""
|
224 |
-
|
225 |
-
conv_state.append({"role": "user", "content": text})
|
226 |
-
history.append((text, None))
|
227 |
-
response = generate_response(conv_state, chat_model_state, chat_tokenizer_state)
|
228 |
-
conv_state.append({"role": "assistant", "content": response})
|
229 |
-
history[-1] = (text, response)
|
230 |
-
return history, conv_state, ""
|
231 |
|
232 |
@gpu_decorator
|
233 |
def generate_audio_response(history, ref_audio, ref_text, remove_silence):
|
|
|
3 |
import re
|
4 |
import tempfile
|
5 |
from importlib.resources import files
|
6 |
+
from groq import Groq
|
7 |
+
import os
|
8 |
import click
|
9 |
import gradio as gr
|
10 |
import numpy as np
|
|
|
61 |
chat_tokenizer_state = None
|
62 |
|
63 |
|
64 |
+
|
65 |
+
groq_token = os.getenv("Groq_TOKEN", None)
|
66 |
+
client = Groq(
|
67 |
+
api_key=groq_token,
|
68 |
+
)
|
69 |
+
|
70 |
@gpu_decorator
|
71 |
+
def generate_response(messages):
|
72 |
+
"""Generate response using Groq"""
|
73 |
+
chat_completion = client.chat.completions.create(
|
74 |
+
messages=[
|
75 |
+
{
|
76 |
+
"role": "user",
|
77 |
+
"content": messages,
|
78 |
+
}
|
79 |
+
] if isinstance(messages, str) else messages,
|
80 |
+
model="llama-3.3-70b-versatile",
|
81 |
+
stream=False,
|
82 |
)
|
83 |
+
return chat_completion.choices[0].message.content # this may need to be fixed
|
84 |
+
|
85 |
+
|
86 |
+
@gpu_decorator
|
87 |
+
def process_audio_input(audio_path, text, history, conv_state):
|
88 |
+
if not audio_path and not text.strip():
|
89 |
+
return history, conv_state, ""
|
90 |
+
|
91 |
+
if audio_path:
|
92 |
+
text = preprocess_ref_audio_text(audio_path, text)[1]
|
93 |
+
|
94 |
+
if not text.strip():
|
95 |
+
return history, conv_state, ""
|
96 |
+
|
97 |
+
conv_state.append({"role": "user", "content": text})
|
98 |
+
history.append((text, None))
|
99 |
+
response = generate_response(conv_state)
|
100 |
+
conv_state.append({"role": "assistant", "content": response})
|
101 |
+
history[-1] = (text, response)
|
102 |
+
return history, conv_state, ""
|
103 |
+
|
104 |
|
105 |
|
106 |
@gpu_decorator
|
|
|
232 |
}
|
233 |
]
|
234 |
)
|
235 |
+
|
236 |
+
|
237 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
238 |
|
239 |
@gpu_decorator
|
240 |
def generate_audio_response(history, ref_audio, ref_text, remove_silence):
|