LPhilp1943 commited on
Commit
41ad754
1 Parent(s): 13d82ac
Files changed (1) hide show
  1. app.py +67 -0
app.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torchaudio
3
+ from speechbrain.pretrained import EncoderClassifier, Tacotron2, HIFIGAN, ASR
4
+ import os
5
+ import soundfile as sf
6
+
7
+ # Ensure output directory exists
8
+ os.makedirs("output_audio", exist_ok=True)
9
+
10
+ # Load models
11
+ encoder = EncoderClassifier.from_hparams(source="speechbrain/spkrec-ecapa-voxceleb", savedir="models/encoder")
12
+ tacotron2 = Tacotron2.from_hparams(source="speechbrain/tts-tacotron2-ljspeech", savedir="models/tacotron2")
13
+ hifigan = HIFIGAN.from_hparams(source="speechbrain/tts-hifigan-ljspeech", savedir="models/hifigan")
14
+ asr = ASR.from_hparams(source="speechbrain/asr-transformer-librispeech", savedir="models/asr")
15
+
16
+ def speech_to_text(input_audio):
17
+ sig, sr = torchaudio.load(input_audio)
18
+ transcription = asr.transcribe_file(input_audio)
19
+ return transcription
20
+
21
+ def speech_to_speech(input_audio, target_text):
22
+ # Load and encode speaker from input audio
23
+ signal, fs = torchaudio.load(input_audio)
24
+ if fs != 16000:
25
+ signal = torchaudio.transforms.Resample(orig_freq=fs, new_freq=16000)(signal)
26
+ embedding = encoder.encode_batch(signal)
27
+
28
+ # Synthesize speech from text
29
+ mel_output, mel_length, alignment = tacotron2.encode_text(target_text, embedding)
30
+ waveform = hifigan.decode_batch(mel_output)
31
+
32
+ # Save output audio
33
+ output_path = "output_audio/synthesized_speech.wav"
34
+ sf.write(output_path, waveform.squeeze().cpu().numpy(), 22050)
35
+ return output_path
36
+
37
+ def text_to_speech(text):
38
+ mel_output, mel_length, alignment = tacotron2.encode_text(text)
39
+ waveform = hifigan.decode_batch(mel_output)
40
+
41
+ output_path = "output_audio/text_to_speech.wav"
42
+ sf.write(output_path, waveform.squeeze().cpu().numpy(), 22050)
43
+ return output_path
44
+
45
+ iface = gr.Interface(
46
+ fn={
47
+ "Speech to Text": speech_to_text,
48
+ "Text to Speech": text_to_speech,
49
+ "Speech to Speech": speech_to_speech
50
+ },
51
+ inputs={
52
+ "Speech to Text": gr.inputs.Audio(source="upload", type="file"),
53
+ "Text to Speech": gr.inputs.Textbox(label="Text"),
54
+ "Speech to Speech": [gr.inputs.Audio(source="upload", type="file"), gr.inputs.Textbox(label="Target Text")]
55
+ },
56
+ outputs={
57
+ "Speech to Text": gr.outputs.Textbox(label="Transcription"),
58
+ "Text to Speech": gr.outputs.Audio(type="file", label="Synthesized Speech"),
59
+ "Speech to Speech": gr.outputs.Audio(type="file", label="Synthesized Speech")
60
+ },
61
+ title="Speech Processing App",
62
+ description="Upload an audio file or enter text to perform various speech processing tasks.",
63
+ layout="vertical"
64
+ )
65
+
66
+ if __name__ == "__main__":
67
+ iface.launch()