File size: 5,874 Bytes
33d9042
4c1c145
3796c5b
33d9042
 
 
 
9488c79
 
33d9042
 
 
da61538
33d9042
85d5a02
 
33d9042
 
 
 
 
 
0416a60
806b774
a71b09f
9d5b6f7
7500d41
 
 
 
 
 
5123302
fac0b8f
7500d41
9d5b6f7
33d9042
 
 
 
 
8b6e3fd
a69166e
9d5b6f7
5033513
9c74b19
9c3ab74
5fd796b
decaf77
9c3ab74
7500d41
 
5033513
5fd796b
5033513
33d9042
 
 
8b6e3fd
 
65a292c
8b6e3fd
65a292c
 
 
9d5b6f7
 
65a292c
8b6e3fd
65a292c
abd3a21
 
 
 
 
 
65a292c
8b6e3fd
9d5b6f7
 
abd3a21
65a292c
abd3a21
65a292c
 
 
 
8b6e3fd
abd3a21
8b6e3fd
 
33d9042
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import spaces
import tempfile
import wave
import gradio as gr
import os
from whisperspeech.pipeline import Pipeline
import torch
import soundfile as sf
import numpy as np
import torch.nn.functional as F
from whisperspeech.languages import LANGUAGES
from whisperspeech.pipeline import Pipeline
from whisperspeech.utils import resampler

title = """# 🙋🏻‍♂️ Welcome to🌟Tonic's🌬️💬📝WhisperSpeech

You can use this ZeroGPU Space to test out the current model [🌬️💬📝collabora/whisperspeech](https://huggingface.co./collabora/whisperspeech). 🌬️💬📝collabora/whisperspeech is An Open Source text-to-speech system built by inverting Whisper. Previously known as spear-tts-pytorch. It's like Stable Diffusion but for speech – both powerful and easily customizable.
You can also use 🌬️💬📝WhisperSpeech by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co./spaces/Tonic/laion-whisper?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3> 
Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community 👻  [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface: [TeamTonic](https://huggingface.co./TeamTonic) & [MultiTransformer](https://huggingface.co./MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [Poly](https://github.com/tonic-ai/poly) 🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
"""

@spaces.GPU
def whisper_speech_demo(text, lang, speaker_audio, mix_lang, mix_text):
    print(f"Text: {text}, Lang: {lang}, Speaker Audio: {speaker_audio}, Mix Lang: {mix_lang}, Mix Text: {mix_text}")
    pipe = Pipeline()
    speaker_url = speaker_audio if speaker_audio is not None else None
    if isinstance(lang, list):
        if not lang:
            raise ValueError("Language list is empty.")
        lang = lang[0]  
    elif not isinstance(lang, str):
        raise ValueError("Language is not specified correctly.")

    if mix_lang and mix_text:
        mixed_langs = mix_lang.split(',') if isinstance(mix_lang, str) else mix_lang
        mixed_texts = mix_text.split(',')
        stoks = pipe.t2s.generate(mixed_texts, lang=mixed_langs)
        audio_data = pipe.generate(stoks, speaker_url, lang=mixed_langs[0])
    else:
        audio_data = pipe.generate(text, speaker_url, lang)

    resample_audio = resampler(newsr=24000)
    audio_data_resampled = next(resample_audio([{'sample_rate': 24000, 'samples': audio_data.cpu()}]))['samples_24k']
    audio_np = audio_data_resampled.cpu().numpy()
    audio_np = audio_np / np.max(np.abs(audio_np))
    audio_np = np.asarray(audio_np, dtype=np.float32)
    
    audio_stereo = np.stack((audio_np, audio_np), axis=-1)
    audio_stereo = audio_stereo.reshape(-1, 2)
    
    print("Audio Array Shape:", audio_stereo.shape)
    print("Audio Array Dtype:", audio_stereo.dtype)
    with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as tmp_file:
        sf.write(tmp_file.name, audio_stereo, 24000, format='WAV', subtype='PCM_16')
    return tmp_file.name

with gr.Blocks() as demo:
    gr.Markdown(title)

    with gr.Tabs():
        with gr.TabItem("🌬️💬📝Standard TTS"):
            with gr.Row():
                text_input_standard = gr.Textbox(label="Enter text")
                lang_input_standard = gr.Dropdown(choices=list(LANGUAGES.keys()), label="Language")
                speaker_input_standard = gr.Audio(label="Upload or Record Speaker Audio (optional)", sources=["upload", "microphone"], type="filepath")
                placeholder_mix_lang = gr.Textbox(visible=False) 
                placeholder_mix_text = gr.Textbox(visible=False) 
                generate_button_standard = gr.Button("Generate Speech")
            output_audio_standard = gr.Audio(label="🌬️💬📝WhisperSpeech")
    
            generate_button_standard.click(
                whisper_speech_demo,
                inputs=[text_input_standard, lang_input_standard, speaker_input_standard, placeholder_mix_lang, placeholder_mix_text],
                outputs=output_audio_standard
            )
    
        with gr.TabItem("🌬️💬📝Mixed Language TTS"):
            with gr.Row():
                placeholder_text_input = gr.Textbox(visible=False)
                placeholder_lang_input = gr.Dropdown(choices=[], visible=False)  
                placeholder_speaker_input = gr.Audio(visible=False)  
                mix_lang_input_mixed = gr.CheckboxGroup(choices=list(LANGUAGES.keys()), label="Select Languages")
                mix_text_input_mixed = gr.Textbox(label="Enter mixed language text", placeholder="e.g., Hello, Cześć")
                generate_button_mixed = gr.Button("Generate Mixed Speech")
            output_audio_mixed = gr.Audio(label="Mixed🌬️💬📝WhisperSpeech")
    
            generate_button_mixed.click(
                whisper_speech_demo,
                inputs=[placeholder_text_input, placeholder_lang_input, placeholder_speaker_input, mix_lang_input_mixed, mix_text_input_mixed],
                outputs=output_audio_mixed
            )

demo.launch()