WhisperSpeech / app.py
Tonic's picture
Update app.py
65a292c verified
raw
history blame
No virus
4.94 kB
import spaces
import tempfile
import gradio as gr
import os
from whisperspeech.pipeline import Pipeline
import torch
import soundfile as sf
import numpy as np
import torch.nn.functional as F
from whisperspeech.languages import LANGUAGES
from whisperspeech.pipeline import Pipeline
title = """# 🙋🏻‍♂️ Welcome to🌟Tonic's🌬️💬📝WhisperSpeech
You can use this ZeroGPU Space to test out the current model [🌬️💬📝collabora/whisperspeech](https://huggingface.co./collabora/whisperspeech). 🌬️💬📝collabora/whisperspeech is An Open Source text-to-speech system built by inverting Whisper. Previously known as spear-tts-pytorch. It's like Stable Diffusion but for speech – both powerful and easily customizable.
You can also use 🌬️💬📝WhisperSpeech by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co./spaces/Tonic/laion-whisper?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface: [TeamTonic](https://huggingface.co./TeamTonic) & [MultiTransformer](https://huggingface.co./MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [Poly](https://github.com/tonic-ai/poly) 🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
"""
@spaces.GPU
def whisper_speech_demo(text, lang, speaker_audio=None, mix_lang=None, mix_text=None):
pipe = Pipeline()
speaker_url = None
if speaker_audio is not None:
speaker_url = speaker_audio.name
if mix_lang and mix_text:
mixed_langs = lang.split(',') + mix_lang.split(',')
mixed_texts = [text] + mix_text.split(',')
stoks = pipe.t2s.generate(mixed_texts, lang=mixed_langs)
audio_data = pipe.generate(stoks, speaker_url, lang=mixed_langs[0])
else:
audio_data = pipe.generate(text, speaker_url, lang)
resample_audio = resampler(newsr=24000)
audio_data_resampled = next(resample_audio([{'sample_rate': 22050, 'samples': audio_data.cpu()}]))['samples_24k']
# Normalize
with tempfile.NamedTemporaryFile(delete=False, suffix='.wav') as tmp_file:
tmp_file_name = tmp_file.name
audio_np = audio_data_resampled.numpy() # Convert to numpy array
if audio_np.max() > 1.0 or audio_np.min() < -1.0:
audio_np = audio_np / np.max(np.abs(audio_np))
sf.write(tmp_file_name, audio_np, 24000, 'PCM_24') # Write with a sample rate of 24000 Hz
return tmp_file_name
with gr.Blocks() as demo:
gr.Markdown(title)
with gr.Tabs():
with gr.TabItem("🌬️💬📝Standard TTS"):
with gr.Row():
text_input_standard = gr.Textbox(label="Enter text")
lang_input_standard = gr.Dropdown(choices=list(LANGUAGES.keys()), label="Language")
speaker_input_standard = gr.Audio(label="Upload or Record Speaker Audio (optional)", sources=["upload", "microphone"], type="filepath")
generate_button_standard = gr.Button("Generate Speech")
output_audio_standard = gr.Audio(label="🌬️💬📝WhisperSpeech")
generate_button_standard.click(
whisper_speech_demo,
inputs=[text_input_standard, lang_input_standard, speaker_input_standard, None, None],
outputs=output_audio_standard
)
with gr.TabItem("🌬️💬📝Mixed Language TTS"):
with gr.Row():
mix_text_input_mixed = gr.Textbox(label="Enter mixed language text", placeholder="e.g., Hello, Cześć")
mix_lang_input_mixed = gr.CheckboxGroup(choices=list(LANGUAGES.keys()), label="Select Languages")
generate_button_mixed = gr.Button("Generate Mixed Speech")
output_audio_mixed = gr.Audio(label="Mixed🌬️💬📝WhisperSpeech")
generate_button_mixed.click(
whisper_speech_demo,
inputs=[None, None, None, mix_lang_input_mixed, mix_text_input_mixed],
outputs=output_audio_mixed
)
demo.launch()