speech_to_text / app.py
piecurus's picture
app commit with wav2vec-base-960h
dc3ecb8
raw
history blame
2.18 kB
import soundfile as sf
import torch
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
import gradio as gr
import sox
def convert(inputfile, outfile):
sox_tfm = sox.Transformer()
sox_tfm.set_output_format(
file_type="wav", channels=1, encoding="signed-integer", rate=16000, bits=16
)
sox_tfm.build(inputfile, outfile)
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
def parse_transcription(wav_file):
filename = wav_file.name.split('.')[0]
convert(wav_file.name, filename + "16k.wav")
speech, _ = sf.read(filename + "16k.wav")
input_values = processor(speech, sampling_rate=16_000, return_tensors="pt").input_values
logits = model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
transcription = processor.decode(predicted_ids[0], skip_special_tokens=True)
return transcription,
output1 = gr.outputs.Textbox(label="Transcription in English: ")
output2 = gr.outputs.Textbox(label="Validated Transcription in English")
input_ = gr.inputs.Audio(source="microphone", type="file")
#gr.Interface(parse_transcription, inputs = input_, outputs="text",
# analytics_enabled=False, show_tips=False, enable_queue=True).launch(inline=False);
gr.Interface(parse_transcription, inputs = input_, outputs=[output1, output2], analytics_enabled=False,
show_tips=False,
theme='huggingface',
layout='vertical',
title="Piecurus Test on Speech Transcription",
description="This is a live demo for Speech to Text Translation. Models used: facebook/wav2vec2-base-960h", enable_queue=True).launch( inline=False)