|
import gradio as gr |
|
import torch |
|
import librosa |
|
import soundfile |
|
import nemo.collections.asr as nemo_asr |
|
import tempfile |
|
import os |
|
import uuid |
|
|
|
SAMPLE_RATE = 16000 |
|
|
|
model = nemo_asr.models.EncDecRNNTBPEModel.from_pretrained("stt_en_conformer_transducer_large") |
|
model.change_decoding_strategy(None) |
|
model.eval() |
|
|
|
|
|
def process_audio_file(file): |
|
data, sr = librosa.load(file) |
|
|
|
if sr != SAMPLE_RATE: |
|
data = librosa.resample(data, sr, SAMPLE_RATE) |
|
|
|
|
|
data = librosa.to_mono(data) |
|
return data |
|
|
|
|
|
def transcribe(Microphone, File_Upload): |
|
warn_output = "" |
|
if (Microphone is not None) and (File_Upload is not None): |
|
warn_output = "WARNING: You've uploaded an audio file and used the microphone. " \ |
|
"The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" |
|
file = Microphone |
|
|
|
elif (Microphone is None) and (File_Upload is None): |
|
return "ERROR: You have to either use the microphone or upload an audio file" |
|
|
|
elif Microphone is not None: |
|
file = Microphone |
|
else: |
|
file = File_Upload |
|
|
|
audio_data = process_audio_file(file) |
|
|
|
with tempfile.TemporaryDirectory() as tmpdir: |
|
audio_path = os.path.join(tmpdir, f'audio_{uuid.uuid4()}.wav') |
|
soundfile.write(audio_path, audio_data, SAMPLE_RATE) |
|
|
|
transcriptions = model.transcribe([audio_path]) |
|
|
|
|
|
if type(transcriptions) == tuple and len(transcriptions) == 2: |
|
transcriptions = transcriptions[0] |
|
|
|
return warn_output + transcriptions[0] |
|
|
|
|
|
iface = gr.Interface( |
|
fn=transcribe, |
|
inputs=[ |
|
gr.inputs.Audio(source="microphone", type='filepath', optional=True), |
|
gr.inputs.Audio(source="upload", type='filepath', optional=True), |
|
], |
|
outputs="text", |
|
layout="horizontal", |
|
theme="huggingface", |
|
title="NeMo Conformer Transducer Large - English", |
|
description="Demo for English speech recognition using Conformer Transducers", |
|
allow_flagging='never', |
|
) |
|
iface.launch(enable_queue=True) |
|
|