File size: 2,159 Bytes
b2db4f9
 
 
52f0ba0
b2db4f9
 
 
 
 
62ff28b
f01f4ac
6bf44a2
14f3ee2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bad795e
418caa5
b2db4f9
236142a
 
25614eb
 
236142a
 
 
 
 
 
25614eb
 
 
 
 
 
236142a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import gradio as gr
import torch
import librosa
import json
from transformers import pipeline
from stitched_model import CombinedModel

device = "cuda:0" if torch.cuda.is_available() else "cpu"

model = CombinedModel("facebook/mms-1b-all", "Sunbird/sunbird-mul-en-mbart-merged", device=device)

def transcribe(audio_file_mic=None, audio_file_upload=None):
    if audio_file_mic:
        audio_file = audio_file_mic
    elif audio_file_upload:
        audio_file = audio_file_upload
    else:
        return "Please upload an audio file or record one"

    # Make sure audio is 16kHz
    speech, sample_rate = librosa.load(audio_file)
    if sample_rate != 16000:
        speech = librosa.resample(speech, orig_sr=sample_rate, target_sr=16000)
    speech = torch.tensor([speech])

    with torch.no_grad():
        transcription, translation = model({"audio": speech})

    return transcription, translation[0]

description = '''Luganda,Runyankore,Lugbara,Acholi to English Speech Translation using MMS-ASR & Sunbird translation model'''

# Define example audio files
example_audio_files = [
    "audio/luganda.mp3",  # Replace with the path to your first example audio file
    #"example_audio_files/example2.wav",  # Replace with the path to your second example audio file
]

# Generate example inputs and outputs
examples = []
for audio_file_path in example_audio_files:
    transcription, translation = transcribe(audio_file_upload=audio_file_path)
    examples.append([
        audio_file_path,  # First element corresponds to the first input component (audio_file_upload)
        None,  # Second element corresponds to the second input component (audio_file_mic). Set to None for this example.
        transcription,
        translation
    ])

iface = gr.Interface(
    fn=transcribe,
    inputs=[
        gr.Audio(source="microphone", type="filepath", label="Record Audio"),
        gr.Audio(source="upload", type="filepath", label="Upload Audio")
    ],
    outputs=[
        gr.Textbox(label="Transcription"),
        gr.Textbox(label="Translation")
    ],
    description=description,
    examples=examples  # Add the examples here
)
iface.launch()