Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
import moviepy.editor as mp | |
from deep_translator import GoogleTranslator | |
from pydub import AudioSegment | |
import os | |
import tempfile | |
import torch | |
from transformers import WhisperProcessor, WhisperForConditionalGeneration | |
import spaces | |
def extract_audio(video_path): | |
video = mp.VideoFileClip(video_path) | |
audio = video.audio | |
audio_path = tempfile.mktemp(suffix=".wav") | |
audio.write_audiofile(audio_path) | |
return audio_path | |
def generate_subtitles(audio_path): | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
processor = WhisperProcessor.from_pretrained("openai/whisper-large-v3") | |
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v3").to(device) | |
# Load and preprocess the audio | |
audio_input, _ = librosa.load(audio_path, sr=16000) | |
input_features = processor(audio_input, sampling_rate=16000, return_tensors="pt").input_features.to(device) | |
# Generate token ids | |
predicted_ids = model.generate(input_features) | |
# Decode token ids to text | |
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) | |
# For simplicity, we're returning a single segment with the full transcription | |
# In a more advanced implementation, you might want to split this into multiple segments | |
return [{"start": 0, "end": len(audio_input) / 16000, "text": transcription[0]}] | |
def translate_subtitles(subtitles, target_language): | |
translator = GoogleTranslator(source='auto', target=target_language) | |
translated_subtitles = [] | |
for segment in subtitles: | |
translated_text = translator.translate(segment["text"]) | |
translated_subtitles.append({ | |
"start": segment["start"], | |
"end": segment["end"], | |
"text": translated_text | |
}) | |
return translated_subtitles | |
def add_subtitles_to_video(video_path, subtitles, output_path): | |
video = mp.VideoFileClip(video_path) | |
subtitles_clips = [ | |
mp.TextClip(txt=subtitle["text"], fontsize=24, color='white', bg_color='black', font='Arial') | |
.set_position(('center', 'bottom')) | |
.set_duration(subtitle["end"] - subtitle["start"]) | |
.set_start(subtitle["start"]) | |
for subtitle in subtitles | |
] | |
final_video = mp.CompositeVideoClip([video] + subtitles_clips) | |
final_video.write_videofile(output_path, codec="libx264", audio_codec="aac") | |
def process_video(video_path, target_language): | |
# Extract audio from video | |
audio_path = extract_audio(video_path) | |
# Generate subtitles using Whisper | |
subtitles = generate_subtitles(audio_path) | |
# Translate subtitles | |
translated_subtitles = translate_subtitles(subtitles, target_language) | |
# Add translated subtitles to video | |
output_path = tempfile.mktemp(suffix=".mp4") | |
add_subtitles_to_video(video_path, translated_subtitles, output_path) | |
return output_path | |
def gradio_interface(video, target_language): | |
output_video = process_video(video.name, target_language) | |
return output_video | |
iface = gr.Interface( | |
fn=gradio_interface, | |
inputs=[ | |
gr.Video(label="Upload Video"), | |
gr.Dropdown(choices=["es", "fr", "de", "it", "ja", "ko", "zh-CN"], label="Target Language") | |
], | |
outputs=gr.Video(label="Processed Video"), | |
title="Video Subtitle Translator", | |
description="Upload a video, and get it back with translated subtitles!" | |
) | |
iface.launch() |