Spaces:
Paused
Paused
from TTS.api import TTS | |
import json | |
import gradio as gr | |
from gradio import Dropdown | |
from share_btn import community_icon_html, loading_icon_html, share_js | |
import os | |
import shutil | |
import re | |
with open("characters.json", "r") as file: | |
data = json.load(file) | |
characters = [ | |
{ | |
"image": item["image"], | |
"title": item["title"], | |
"speaker": item["speaker"] | |
} | |
for item in data | |
] | |
tts = TTS("tts_models/multilingual/multi-dataset/bark", gpu=True) | |
def update_selection(selected_state: gr.SelectData): | |
c_image = characters[selected_state.index]["image"] | |
c_title = characters[selected_state.index]["title"] | |
c_speaker = characters[selected_state.index]["speaker"] | |
return c_title, selected_state | |
def infer(prompt, input_wav_file, clean_audio, hidden_numpy_audio): | |
print(""" | |
————— | |
NEW INFERENCE: | |
——————— | |
""") | |
if prompt == "": | |
gr.Warning("Do not forget to provide a tts prompt !") | |
if clean_audio is True: | |
print("We want to clean audio sample") | |
new_name = os.path.splitext(os.path.basename(input_wav_file))[0] | |
if os.path.exists(os.path.join("bark_voices", f"{new_name}_cleaned")): | |
print("This file has already been cleaned") | |
check_name = os.path.join("bark_voices", f"{new_name}_cleaned") | |
source_path = os.path.join(check_name, f"{new_name}_cleaned.wav") | |
else: | |
source_path = split_process(hidden_numpy_audio, "vocals") | |
new_path = os.path.join(os.path.dirname( | |
source_path), f"{new_name}_cleaned.wav") | |
os.rename(source_path, new_path) | |
source_path = new_path | |
else: | |
source_path = input_wav_file | |
destination_directory = "bark_voices" | |
file_name = os.path.splitext(os.path.basename(source_path))[0] | |
destination_path = os.path.join(destination_directory, file_name) | |
os.makedirs(destination_path, exist_ok=True) | |
shutil.move(source_path, os.path.join( | |
destination_path, f"{file_name}.wav")) | |
sentences = re.split(r'(?<=[.!?])\s+', prompt) | |
if len(sentences) > MAX_NUMBER_SENTENCES: | |
gr.Info("Your text is too long. To keep this demo enjoyable for everyone, we only kept the first 10 sentences :) Duplicate this space and set MAX_NUMBER_SENTENCES for longer texts ;)") | |
first_nb_sentences = sentences[:MAX_NUMBER_SENTENCES] | |
limited_prompt = ' '.join(first_nb_sentences) | |
prompt = limited_prompt | |
else: | |
prompt = prompt | |
gr.Info("Generating audio from prompt") | |
tts.tts_to_file(text=prompt, | |
file_path="output.wav", | |
voice_dir="bark_voices/", | |
speaker=f"{file_name}") | |
contents = os.listdir(f"bark_voices/{file_name}") | |
for item in contents: | |
print(item) | |
print("Preparing final waveform video ...") | |
tts_video = gr.make_waveform(audio="output.wav") | |
print(tts_video) | |
print("FINISHED") | |
return "output.wav", tts_video, gr.update(value=f"bark_voices/{file_name}/{contents[1]}", visible=True), gr.Group.update(visible=True), destination_path | |
prompt_choices = [ | |
"I am very displeased with the progress being made to finish the cross-town transit line. transit line. This has been an embarrassing use of taxpayer dollars.", | |
"Yes, John is my friend, but He was never at my house watching the baseball game.", | |
"We are expecting a double digit increase in profits by the end of the fiscal year.", | |
"Hi Grandma, Just calling to ask for money, or I can't see you over the holidays. " | |
] | |
positive_prompts = { | |
prompt_choices[0]: "I am very pleased with the progress being made to finish the cross-town transit line. This has been an excellent use of taxpayer dollars.", | |
prompt_choices[1]: "Yes, John is my friend. He was at my house watching the baseball game all night.", | |
prompt_choices[2]: "We are expecting a modest single digit increase in profits by the end of the fiscal year.", | |
prompt_choices[3]: "Hi Grandma it’s me, Just calling to say I love you, and I can’t wait to see you over the holidays." | |
} | |
prompt = Dropdown( | |
label="Text to speech prompt", | |
choices=prompt_choices, | |
elem_id="tts-prompt" | |
) | |
def update_helper_text(prompt_choice): | |
return positive_prompts.get(prompt_choice, '') | |
prompt.change(update_helper_text, outputs=["texts_samples"], queue=False) | |
css = """ | |
#col-container {max-width: 780px; margin-left: auto; margin-right: auto;} | |
a {text-decoration-line: underline; font-weight: 600;} | |
.mic-wrap > button { | |
width: 100%; | |
height: 60px; | |
font-size: 1.4em!important; | |
} | |
.record-icon.svelte-1thnwz { | |
display: flex; | |
position: relative; | |
margin-right: var(--size-2); | |
width: unset; | |
height: unset; | |
} | |
span.record-icon > span.dot.svelte-1thnwz { | |
width: 20px!important; | |
height: 20px!important; | |
} | |
.animate-spin { | |
animation: spin 1s linear infinite; | |
} | |
@keyframes spin { | |
from { | |
transform: rotate(0deg); | |
} | |
to { | |
transform: rotate(360deg); | |
} | |
} | |
#share-btn-container { | |
display: flex; | |
padding-left: 0.5rem !important; | |
padding-right: 0.5rem !important; | |
background-color: #000000; | |
justify-content: center; | |
align-items: center; | |
border-radius: 9999px !important; | |
max-width: 15rem; | |
height: 36px; | |
} | |
""" | |
with gr.Blocks(css=css) as demo: | |
with gr.Column(elem_id="col-container"): | |
with gr.Row(): | |
with gr.Column(): | |
prompt = Dropdown( | |
label="Text to speech prompt", | |
choices=prompt_choices, | |
elem_id="tts-prompt" | |
) | |
audio_in = gr.Audio( | |
label="WAV voice to clone", | |
type="filepath", | |
source="upload" | |
) | |
clean_sample = gr.Checkbox( | |
label="Clean sample ?", value=False) | |
hidden_audio_numpy = gr.Audio( | |
type="numpy", visible=False) | |
submit_btn = gr.Button("Submit") | |
with gr.Column(): | |
cloned_out = gr.Audio( | |
label="Text to speech output", | |
visible=False | |
) | |
video_out = gr.Video( | |
label="Waveform video", | |
elem_id="voice-video-out" | |
) | |
npz_file = gr.File( | |
label=".npz file", | |
visible=False | |
) | |
folder_path = gr.Textbox(visible=False) | |
audio_in.change(fn=wipe_npz_file, inputs=[folder_path], queue=False) | |
submit_btn.click( | |
fn=infer, | |
inputs=[ | |
prompt, | |
audio_in, | |
clean_sample, | |
hidden_audio_numpy | |
], | |
outputs=[ | |
cloned_out, | |
video_out, | |
npz_file, | |
folder_path | |
] | |
) | |
demo.queue(api_open=False, max_size=10).launch() | |