add deep-translator
Browse files
app.py
CHANGED
@@ -2,48 +2,54 @@ import gradio as gr
|
|
2 |
import numpy as np
|
3 |
import torch
|
4 |
from datasets import load_dataset
|
5 |
-
|
6 |
-
from transformers import
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
10 |
|
|
|
11 |
# load speech translation checkpoint
|
12 |
-
asr_pipe = pipeline("automatic-speech-recognition",
|
|
|
13 |
|
14 |
-
# load text-to-speech
|
15 |
-
|
16 |
-
|
17 |
-
model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts").to(device)
|
18 |
-
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
|
19 |
-
|
20 |
-
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
|
21 |
-
speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
|
22 |
|
23 |
|
24 |
def translate(audio):
|
25 |
-
outputs = asr_pipe(audio, max_new_tokens=256,
|
|
|
26 |
return outputs["text"]
|
27 |
|
28 |
|
29 |
def synthesise(text):
|
30 |
-
inputs =
|
31 |
-
|
32 |
-
|
|
|
33 |
|
34 |
|
35 |
def speech_to_speech_translation(audio):
|
36 |
translated_text = translate(audio)
|
37 |
-
|
|
|
|
|
38 |
synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
|
39 |
return 16000, synthesised_speech
|
40 |
|
41 |
|
42 |
title = "Cascaded STST"
|
43 |
description = """
|
44 |
-
Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in
|
45 |
-
[
|
46 |
-
|
47 |
![Cascaded STST](https://huggingface.co/datasets/huggingface-course/audio-course-images/resolve/main/s2st_cascaded.png "Diagram of cascaded speech to speech translation")
|
48 |
"""
|
49 |
|
@@ -51,7 +57,7 @@ demo = gr.Blocks()
|
|
51 |
|
52 |
mic_translate = gr.Interface(
|
53 |
fn=speech_to_speech_translation,
|
54 |
-
inputs=gr.Audio(
|
55 |
outputs=gr.Audio(label="Generated Speech", type="numpy"),
|
56 |
title=title,
|
57 |
description=description,
|
@@ -59,7 +65,7 @@ mic_translate = gr.Interface(
|
|
59 |
|
60 |
file_translate = gr.Interface(
|
61 |
fn=speech_to_speech_translation,
|
62 |
-
inputs=gr.Audio(
|
63 |
outputs=gr.Audio(label="Generated Speech", type="numpy"),
|
64 |
examples=[["./example.wav"]],
|
65 |
title=title,
|
@@ -67,6 +73,7 @@ file_translate = gr.Interface(
|
|
67 |
)
|
68 |
|
69 |
with demo:
|
70 |
-
gr.TabbedInterface([mic_translate, file_translate],
|
|
|
71 |
|
72 |
demo.launch()
|
|
|
2 |
import numpy as np
|
3 |
import torch
|
4 |
from datasets import load_dataset
|
5 |
+
from deep_translator import GoogleTranslator
|
6 |
+
from transformers import (
|
7 |
+
AutoTokenizer,
|
8 |
+
SpeechT5ForTextToSpeech,
|
9 |
+
SpeechT5HifiGan,
|
10 |
+
SpeechT5Processor,
|
11 |
+
VitsModel,
|
12 |
+
pipeline,
|
13 |
+
)
|
14 |
|
15 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
16 |
|
17 |
+
# device = "cpu"
|
18 |
# load speech translation checkpoint
|
19 |
+
asr_pipe = pipeline("automatic-speech-recognition",
|
20 |
+
model="openai/whisper-base", device=device)
|
21 |
|
22 |
+
# load text-to-speech mms-tts-id model (speaker embeddings included)
|
23 |
+
model = VitsModel.from_pretrained("facebook/mms-tts-ind")
|
24 |
+
tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-ind")
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
|
27 |
def translate(audio):
|
28 |
+
outputs = asr_pipe(audio, max_new_tokens=256,
|
29 |
+
generate_kwargs={"task": "translate"})
|
30 |
return outputs["text"]
|
31 |
|
32 |
|
33 |
def synthesise(text):
|
34 |
+
inputs = tokenizer(text=text, return_tensors="pt")
|
35 |
+
with torch.no_grad():
|
36 |
+
speech = model(**inputs).waveform
|
37 |
+
return speech.reshape(-1, 1).cpu()
|
38 |
|
39 |
|
40 |
def speech_to_speech_translation(audio):
|
41 |
translated_text = translate(audio)
|
42 |
+
google_translated = GoogleTranslator(
|
43 |
+
source="en", target="id").translate(translated_text)
|
44 |
+
synthesised_speech = synthesise(google_translated)
|
45 |
synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
|
46 |
return 16000, synthesised_speech
|
47 |
|
48 |
|
49 |
title = "Cascaded STST"
|
50 |
description = """
|
51 |
+
Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in Indonesian. Demo uses OpenAI's [Whisper Base](https://huggingface.co/openai/whisper-base) model for speech translation, and Microsoft's
|
52 |
+
[MMS TTS IND](https://huggingface.co/facebook/mms-tts-ind) model for text-to-speech:
|
|
|
53 |
![Cascaded STST](https://huggingface.co/datasets/huggingface-course/audio-course-images/resolve/main/s2st_cascaded.png "Diagram of cascaded speech to speech translation")
|
54 |
"""
|
55 |
|
|
|
57 |
|
58 |
mic_translate = gr.Interface(
|
59 |
fn=speech_to_speech_translation,
|
60 |
+
inputs=gr.Audio(sources="microphone", type="filepath"),
|
61 |
outputs=gr.Audio(label="Generated Speech", type="numpy"),
|
62 |
title=title,
|
63 |
description=description,
|
|
|
65 |
|
66 |
file_translate = gr.Interface(
|
67 |
fn=speech_to_speech_translation,
|
68 |
+
inputs=gr.Audio(sources="upload", type="filepath"),
|
69 |
outputs=gr.Audio(label="Generated Speech", type="numpy"),
|
70 |
examples=[["./example.wav"]],
|
71 |
title=title,
|
|
|
73 |
)
|
74 |
|
75 |
with demo:
|
76 |
+
gr.TabbedInterface([mic_translate, file_translate],
|
77 |
+
["Microphone", "Audio File"])
|
78 |
|
79 |
demo.launch()
|