import spaces import gradio as gr import os from whisperspeech.pipeline import Pipeline import torch import torch.nn.functional as F from whisperspeech.languages import LANGUAGES from whisperspeech.pipeline import Pipeline import tempfil title = """#🙋🏻‍♂️ Welcome to🌟Tonic's🌬️💬📝WhisperSpeech You can use this ZeroGPU Space to test out the current model [🌬️💬📝collabora/whisperspeech](https://huggingface.co./collabora/whisperspeech). 🌬️💬📝collabora/whisperspeech is An Open Source text-to-speech system built by inverting Whisper. Previously known as spear-tts-pytorch. It's like Stable Diffusion but for speech – both powerful and easily customizable. You can also use 🌬️💬📝WhisperSpeech by cloning this space. 🧬🔬🔍 Simply click here: Duplicate Space Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface: [TeamTonic](https://huggingface.co./TeamTonic) & [MultiTransformer](https://huggingface.co./MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [Poly](https://github.com/tonic-ai/poly) 🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗 """ @spaces.GPU def whisper_speech_demo(text, lang, speaker_audio=None, mix_lang=None, mix_text=None): pipe = Pipeline(s2a_ref='collabora/whisperspeech:s2a-q4-tiny-en+pl.model') # Use uploaded speaker audio if provided speaker_url = None if speaker_audio is not None: speaker_url = speaker_audio.name if mix_lang and mix_text: mixed_langs = lang.split(',') + mix_lang.split(',') mixed_texts = [text] + mix_text.split(',') stoks = pipe.t2s.generate(mixed_texts, lang=mixed_langs) audio_data = pipe.generate(stoks, speaker_url, lang=mixed_langs[0]) else: audio_data = pipe.generate(text, speaker_url, lang) with tempfile.NamedTemporaryFile(delete=False, suffix='.wav') as tmp_file: tmp_file_name = tmp_file.name with open(tmp_file_name, 'wb') as file: file.write(audio_data) return tmp_file_name with gr.Blocks() as demo: gr.Markdown(title) with gr.Row(): text_input = gr.Textbox(label="Enter text") lang_input = gr.Dropdown(choices=list(LANGUAGES.keys()), label="Language") speaker_input = gr.File(label="Upload Speaker Audio (optional)", type="file", accepts=["audio/*"]) with gr.Row(): mix_lang_input = gr.Textbox(label="Mixed Languages (optional, comma-separated)", placeholder="e.g., en,pl") mix_text_input = gr.Textbox(label="Mixed Texts (optional, for mixed languages)", placeholder="e.g., Hello, Cześć") with gr.Row(): submit_button = gr.Button("Generate Speech") output_audio = gr.Audio(label="Generated Speech") submit_button.click( whisper_speech_demo, inputs=[text_input, lang_input, speaker_input, mix_lang_input, mix_text_input], outputs=output_audio ) demo.launch()