sukantan commited on
Commit
e34d347
·
1 Parent(s): 2364977

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -96
app.py CHANGED
@@ -1,102 +1,19 @@
1
- import gradio as gr
2
- import time
3
- import torch
4
- import scipy.io.wavfile
5
- from espnet2.bin.tts_inference import Text2Speech
6
- from espnet2.utils.types import str_or_none
7
-
8
- tagen = 'kan-bayashi/ljspeech_vits'
9
- vocoder_tagen = "none"
10
-
11
- text2speechen = Text2Speech.from_pretrained(
12
- model_tag=str_or_none(tagen),
13
- vocoder_tag=str_or_none(vocoder_tagen),
14
- device="cpu",
15
- # Only for Tacotron 2 & Transformer
16
- threshold=0.5,
17
- # Only for Tacotron 2
18
- minlenratio=0.0,
19
- maxlenratio=10.0,
20
- use_att_constraint=False,
21
- backward_window=1,
22
- forward_window=3,
23
- # Only for FastSpeech & FastSpeech2 & VITS
24
- speed_control_alpha=1.0,
25
- # Only for VITS
26
- noise_scale=0.333,
27
- noise_scale_dur=0.333,
28
- )
29
-
30
 
31
- tagjp = 'kan-bayashi/jsut_full_band_vits_prosody'
32
- vocoder_tagjp = 'none'
33
 
34
- text2speechjp = Text2Speech.from_pretrained(
35
- model_tag=str_or_none(tagjp),
36
- vocoder_tag=str_or_none(vocoder_tagjp),
37
- device="cpu",
38
- # Only for Tacotron 2 & Transformer
39
- threshold=0.5,
40
- # Only for Tacotron 2
41
- minlenratio=0.0,
42
- maxlenratio=10.0,
43
- use_att_constraint=False,
44
- backward_window=1,
45
- forward_window=3,
46
- # Only for FastSpeech & FastSpeech2 & VITS
47
- speed_control_alpha=1.0,
48
- # Only for VITS
49
- noise_scale=0.333,
50
- noise_scale_dur=0.333,
51
- )
52
 
53
- tagch = 'kan-bayashi/csmsc_full_band_vits'
54
- vocoder_tagch = "none"
 
55
 
56
- text2speechch = Text2Speech.from_pretrained(
57
- model_tag=str_or_none(tagch),
58
- vocoder_tag=str_or_none(vocoder_tagch),
59
- device="cpu",
60
- # Only for Tacotron 2 & Transformer
61
- threshold=0.5,
62
- # Only for Tacotron 2
63
- minlenratio=0.0,
64
- maxlenratio=10.0,
65
- use_att_constraint=False,
66
- backward_window=1,
67
- forward_window=3,
68
- # Only for FastSpeech & FastSpeech2 & VITS
69
- speed_control_alpha=1.0,
70
- # Only for VITS
71
- noise_scale=0.333,
72
- noise_scale_dur=0.333,
73
  )
74
 
75
- def inference(text,lang):
76
- with torch.no_grad():
77
- if lang == "english":
78
- wav = text2speechen(text)["wav"]
79
- scipy.io.wavfile.write("out.wav",text2speechen.fs , wav.view(-1).cpu().numpy())
80
- if lang == "chinese":
81
- wav = text2speechch(text)["wav"]
82
- scipy.io.wavfile.write("out.wav",text2speechch.fs , wav.view(-1).cpu().numpy())
83
- if lang == "japanese":
84
- wav = text2speechjp(text)["wav"]
85
- scipy.io.wavfile.write("out.wav",text2speechjp.fs , wav.view(-1).cpu().numpy())
86
- return "out.wav"
87
- title = "ESPnet2-TTS"
88
- description = "Gradio demo for ESPnet2-TTS: Extending the Edge of TTS Research. To use it, simply add your audio, or click one of the examples to load them. Read more at the links below."
89
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2110.07840' target='_blank'>ESPnet2-TTS: Extending the Edge of TTS Research</a> | <a href='https://github.com/espnet/espnet' target='_blank'>Github Repo</a></p>"
90
-
91
- examples=[['This paper describes ESPnet2-TTS, an end-to-end text-to-speech (E2E-TTS) toolkit. ESPnet2-TTS extends our earlier version, ESPnet-TTS, by adding many new features, including: on-the-fly flexible pre-processing, joint training with neural vocoders, and state-of-the-art TTS models with extensions like full-band E2E text-to-waveform modeling, which simplify the training pipeline and further enhance TTS performance. The unified design of our recipes enables users to quickly reproduce state-of-the-art E2E-TTS results',"english"],['レシピの統一された設計により、ユーザーは最先端のE2E-TTSの結果をすばやく再現できます。また、推論用の統合Pythonインターフェースで事前にトレーニングされたモデルを多数提供し、ユーザーがベースラインサンプルを生成してデモを構築するための迅速な手段を提供します。',"japanese"],['对英语和日语语料库的实验评估表明,我们提供的模型合成了与真实情况相当的话语,达到了最先进的水平',"chinese"]]
92
-
93
- gr.Interface(
94
- inference,
95
- [gr.inputs.Textbox(label="input text",lines=10),gr.inputs.Radio(choices=["english", "chinese", "japanese"], type="value", default="english", label="language")],
96
- gr.outputs.Audio(type="file", label="Output"),
97
- title=title,
98
- description=description,
99
- article=article,
100
- enable_queue=True,
101
- examples=examples
102
- ).launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
 
2
+ from transformers import pipeline
3
+ import gradio as gr
4
 
5
+ pipe = pipeline(model="Ranjit/Whisper_Small_Odia_10k_steps") # change to "your-username/the-name-you-picked"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
+ def transcribe(audio):
8
+ text = pipe(audio)["text"]
9
+ return text
10
 
11
+ iface = gr.Interface(
12
+ fn=transcribe,
13
+ inputs=gr.Audio(source="microphone", type="filepath"),
14
+ outputs="text",
15
+ title="Whisper Small Odia",
16
+ description="Realtime demo for Odia speech recognition using a fine-tuned Whisper small model.",
 
 
 
 
 
 
 
 
 
 
 
17
  )
18
 
19
+ iface.launch()