capstonedubtrack
commited on
Commit
·
e06d9ec
1
Parent(s):
9ca8aff
Update app.py
Browse files
app.py
CHANGED
@@ -62,8 +62,9 @@ def inference(language,speed,voice,video ):
|
|
62 |
translation = translator.translate(text, src = 'en', dest='te', slow=con)
|
63 |
tts = gTTS(translation.text, lang= "hi")
|
64 |
tts.save('input_audio.wav')
|
|
|
65 |
os.system('mv ./Wav2Lip/* .')
|
66 |
-
os.system("python inference.py --checkpoint_path ./wav2lip_gan.pth --
|
67 |
return "./results/result_voice.mp4"
|
68 |
|
69 |
iface = gr.Interface(inference, inputs=[gr.inputs.Radio(["Tamil", "Hindi", "Bengali", "Telugu"], label = "Enter language to translate to"), gr.inputs.Radio(["Slow", "Fast"], label = "Enter speaking speed"), gr.inputs.Radio(["Male", "Female"], label = "Enter preferred voice"), gr.inputs.Video(type="mp4", source="upload", label="Video to be Translated", optional=False)], outputs=["video"], title=title, description=description, article=article, enable_queue=True)
|
|
|
62 |
translation = translator.translate(text, src = 'en', dest='te', slow=con)
|
63 |
tts = gTTS(translation.text, lang= "hi")
|
64 |
tts.save('input_audio.wav')
|
65 |
+
audio = "input_audio.wav"
|
66 |
os.system('mv ./Wav2Lip/* .')
|
67 |
+
os.system("python inference.py --checkpoint_path ./wav2lip_gan.pth --face {} --audio {}".format(video, audio))
|
68 |
return "./results/result_voice.mp4"
|
69 |
|
70 |
iface = gr.Interface(inference, inputs=[gr.inputs.Radio(["Tamil", "Hindi", "Bengali", "Telugu"], label = "Enter language to translate to"), gr.inputs.Radio(["Slow", "Fast"], label = "Enter speaking speed"), gr.inputs.Radio(["Male", "Female"], label = "Enter preferred voice"), gr.inputs.Video(type="mp4", source="upload", label="Video to be Translated", optional=False)], outputs=["video"], title=title, description=description, article=article, enable_queue=True)
|