Spaces:
Runtime error
Runtime error
import gradio as gr | |
import torch | |
import librosa | |
import json | |
from transformers import pipeline | |
from stitched_model import CombinedModel | |
device = "cuda:0" if torch.cuda.is_available() else "cpu" | |
model = CombinedModel("indonesian-nlp/wav2vec2-luganda", "Sunbird/sunbird-mul-en-mbart-merged", device=device) | |
def transcribe(audio_file_mic=None, audio_file_upload=None): | |
if audio_file_mic: | |
audio_file = audio_file_mic | |
elif audio_file_upload: | |
audio_file = audio_file_upload | |
else: | |
return "Please upload an audio file or record one" | |
# Load the audio file | |
speech, sample_rate = librosa.load(audio_file, sr=16000, mono=True) | |
# Split the audio into 30-second chunks | |
chunk_size = 30 * 16000 | |
chunks = [speech[i:i + chunk_size] for i in range(0, len(speech), chunk_size)] | |
# Process each chunk and concatenate the results | |
transcriptions = [] | |
translations = [] | |
for chunk in chunks: | |
chunk = torch.tensor([chunk]) | |
with torch.no_grad(): | |
transcription, translation = model({"audio": chunk}) | |
transcriptions.append(transcription) | |
translations.append(translation[0]) | |
transcription = "".join(transcriptions) | |
translation = "".join(translations) | |
return transcription, translation | |
description = '''Luganda to English Speech Translation''' | |
iface = gr.Interface(fn=transcribe, | |
inputs=[ | |
gr.Audio(source="microphone", type="filepath", label="Record Audio"), | |
gr.Audio(source="upload", type="filepath", label="Upload Audio")], | |
outputs=[gr.Textbox(label="Transcription"), | |
gr.Textbox(label="Translation") | |
], | |
description=description | |
) | |
iface.launch() | |