Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
-
import gradio as gr
|
3 |
from gtts import gTTS
|
4 |
import IPython.display as ipd
|
5 |
|
@@ -53,24 +53,10 @@ def generate(
|
|
53 |
|
54 |
for response in stream:
|
55 |
output += response.token.text
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
return output
|
60 |
|
61 |
-
|
62 |
-
|
63 |
-
output_text = gr.Textbox(label="Respuesta")
|
64 |
-
|
65 |
-
def update_output_text(output):
|
66 |
-
output_text.update(output)
|
67 |
-
|
68 |
-
chat_interface = gr.Interface(
|
69 |
-
fn=generate,
|
70 |
-
inputs=[input_text],
|
71 |
-
outputs=[output_audio, output_text],
|
72 |
-
live=True,
|
73 |
-
theme="compact",
|
74 |
-
)
|
75 |
-
|
76 |
-
chat_interface.launch(share=True, debug=True)
|
|
|
1 |
+
import streamlit as st
|
2 |
from huggingface_hub import InferenceClient
|
|
|
3 |
from gtts import gTTS
|
4 |
import IPython.display as ipd
|
5 |
|
|
|
53 |
|
54 |
for response in stream:
|
55 |
output += response.token.text
|
56 |
+
st.text(output)
|
57 |
+
ipd.Audio(text_to_speech(output), autoplay=True)
|
58 |
+
|
59 |
return output
|
60 |
|
61 |
+
user_input = st.text_input(label="Usuario", value="Escribe aquí tu mensaje")
|
62 |
+
output = generate(user_input, history=None) # Ajusta 'history' según sea necesario
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|