import gradio as gr import torch import librosa import json from transformers import pipeline from stitched_model import CombinedModel device = "cuda:0" if torch.cuda.is_available() else "cpu" model = CombinedModel("facebook/mms-1b-all", "Sunbird/sunbird-mul-en-mbart-merged", device=device) def transcribe(audio_file_mic=None, audio_file_upload=None): if audio_file_mic: audio_file = audio_file_mic elif audio_file_upload: audio_file = audio_file_upload else: return "Please upload an audio file or record one" # Make sure audio is 16kHz speech, sample_rate = librosa.load(audio_file) if sample_rate != 16000: speech = librosa.resample(speech, orig_sr=sample_rate, target_sr=16000) speech = torch.tensor([speech]) with torch.no_grad(): transcription, translation = model({"audio":speech}) return transcription, translation[0] description = '''Luganda to English Speech Translation''' # Define example audio files example_audio_files = [ ["audio/luganda.mp3", None, "Luganda"] # Replace with the path to your first example audio file ] # Generate example inputs and outputs examples = [] for audio_file_path in example_audio_files: transcription, translation = transcribe(audio_file_upload=audio_file_path) examples.append({ "input": audio_file_path, "output": [transcription, translation] }) iface = gr.Interface( fn=transcribe, inputs=[ gr.Audio(source="microphone", type="filepath", label="Record Audio"), gr.Audio(source="upload", type="filepath", label="Upload Audio") ], outputs=[ gr.Textbox(label="Transcription"), gr.Textbox(label="Translation") ], description=description, examples=examples # Add the examples here ) iface.launch()