Spaces:
Running
Running
File size: 6,748 Bytes
7af90a1 0b20754 7af90a1 8fc93c5 7af90a1 8fc93c5 05c91fb 7af90a1 8fc93c5 7af90a1 8fc93c5 7af90a1 8fc93c5 7af90a1 35ffb85 745da3b 05b2058 8fc93c5 05b2058 7af90a1 35ffb85 7af90a1 35ffb85 84bc3ba 7af90a1 84bc3ba 7af90a1 84bc3ba 7af90a1 05b2058 7af90a1 35ffb85 7af90a1 01e40f4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
import gradio as gr
from gradio_client import Client
import uuid
import warnings
import numpy as np
import json
import os
from gradio_client import Client, FileData, handle_file
warnings.filterwarnings("ignore")
import tempfile
import scipy.io.wavfile as wavfile
client = Client(os.environ['src'])
custom_css = """
.gradio-container {
justify-content: flex-start !important;
}
"""
def create_frontend_demo():
def chat_function(message, history, session_id):
if not session_id:
session_id = "user_" + uuid.uuid4().hex[:8]
result = client.predict(
message, # message
history, # history
session_id, # session id
fn_index=0 # function to call in the backend
)
# The backend returns: empty_string, history, audio_path, display_text
_, new_history, audio_path, display_text = result
# For audio, we need to return the path string directly
return "", new_history, audio_path, session_id, display_text
with gr.Blocks(css=custom_css, theme="Respair/[email protected]") as demo:
session_id_state = gr.State("")
with gr.Tabs() as tabs:
with gr.Tab("Chat"):
session_display = gr.Markdown("Current Session ID: None", label="Session ID")
chatbot = gr.Chatbot(
label="Conversation History",
height=400,
avatar_images=[
"photo_2024-03-01_22-30-42.jpg",
"colored_blured.png"
],
placeholder="Start chatting with Aira..."
)
# Place just the text box (removing the send button)
with gr.Column():
msg = gr.Textbox(
show_label=False,
placeholder="Enter text and press enter",
container=True
)
audio_output = gr.Audio(
label="Aira's Response",
type="filepath",
streaming=False,
autoplay=True
)
with gr.Row():
audio_input = gr.Audio(
sources=["microphone"],
type="numpy",
label="Audio Input",
streaming=False
)
with gr.Tab("Options"):
with gr.Column():
session_input = gr.Textbox(
value="",
label="Session ID (leave blank for new session)"
)
gen_id_btn = gr.Button("Set Session ID")
session_msg = gr.Markdown("")
clear_btn = gr.Button("Clear Conversation")
gr.Markdown("""
This is a personal project I wanted to do for a while (G̶o̶t̶t̶a̶ ̶m̶a̶k̶e̶ ̶u̶s̶e̶ ̶o̶f̶ ̶t̶h̶i̶s̶ ̶P̶r̶o̶ ̶s̶u̶b̶ ̶p̶e̶r̶k̶s̶ ̶w̶h̶i̶l̶e̶ ̶I̶ ̶h̶a̶v̶e̶ ̶i̶t̶). <br>
Aira's voice is made to be unique, it doesn't belong to any real person out there. <br>
You can talk to her in English or Japanese, but she will only respond in Japanese (Subs over dubs, bros) ask her to give you a Subtitle if you can't talk in Japanese. <br>
The majority of the latency depends on the HF's inference api.
LLM is not fine-tuned or optimized at all. the current state of conversational off-the-shelf japanese LLM seems to be less than remarkable, please beware of that.
1. Enter your Session ID above or leave blank for a new one
2. Click 'Set Session ID' to confirm
3. Use 'Clear Conversation' to reset the chat
4. Your conversation history is saved based on your Session ID
I'll try to keep this demo up for as long as I can afford.
""")
def respond(message, chat_history, session_id):
return chat_function(message, chat_history, session_id)
# Allow submission via Enter key in the text box:
msg.submit(
respond,
inputs=[msg, chatbot, session_id_state],
outputs=[msg, chatbot, audio_output, session_id_state, session_display]
)
def set_session(user_id):
result = client.predict(
user_id,
fn_index=1
)
new_id, display_text = result
return new_id, "", display_text
gen_id_btn.click(
set_session,
inputs=[session_input],
outputs=[session_id_state, session_msg, session_display]
)
def handle_audio(audio_data, history, session_id):
if audio_data is None:
return None, history, session_id, f"Current Session ID: {session_id}"
try:
sample_rate, audio_array = audio_data
with tempfile.NamedTemporaryFile(suffix='.wav', delete=True) as temp:
wavfile.write(temp.name, sample_rate, audio_array)
audio = {"path": temp.name, "meta": {"_type": "gradio.FileData"}}
result = client.predict(
audio,
history,
session_id,
api_name="/handle_audio"
)
audio_path, new_history, new_session_id = result
display_text = f"Current Session ID: {new_session_id}"
return audio_path, new_history, new_session_id, display_text
except Exception as e:
print(f"Error processing audio: {str(e)}")
import traceback
traceback.print_exc()
return None, history, session_id, f"Error processing audio. Session ID: {session_id}"
audio_input.stop_recording(
handle_audio,
inputs=[audio_input, chatbot, session_id_state],
outputs=[audio_output, chatbot, session_id_state, session_display]
)
clear_btn.click(
lambda: [],
None,
[chatbot]
)
return demo
if __name__ == "__main__":
demo = create_frontend_demo()
demo.launch(show_error=True) |