Spaces:
Runtime error
Runtime error
File size: 2,174 Bytes
6308522 3bc51c8 5cacb9c 6308522 5cacb9c 3bc51c8 5cacb9c 8593e6e 5cacb9c 6308522 5cacb9c 6308522 8593e6e 3bc51c8 8593e6e 3bc51c8 8593e6e 3bc51c8 6308522 ab38e60 6308522 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
import gradio as gr
import librosa
import torch
import torchaudio
from datasets import load_dataset
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
import pandas as pd
from sklearn.model_selection import train_test_split
from noisereduce.torchgate import TorchGate as TG
import re
from pydub import AudioSegment
from torchaudio.transforms import Resample
import numpy as np
def transcribe_audio(audio_file):
audio = AudioSegment.from_wav(audio_file)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
input_arr, sampling_rate =librosa.load(audio_file)
# Create TorchGating instance
tg = TG(sr=sampling_rate, nonstationary=True).to(device)
try:
input_arr = tg(input_arr)
except:
input_arr = input_arr
if sampling_rate != 16000:
input_arr = librosa.resample(input_arr, orig_sr=sampling_rate, target_sr=16000)
MODEL_NAME = "rikeshsilwalekg/whisper-small-wer35-ekg"
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
model = AutoModelForSpeechSeq2Seq.from_pretrained(
MODEL_NAME, torch_dtype=torch_dtype, use_safetensors=True
)
model.to(device)
processor = AutoProcessor.from_pretrained(MODEL_NAME)
pipe = pipeline(
"automatic-speech-recognition",
model=model,
tokenizer=processor.tokenizer,
feature_extractor=processor.feature_extractor,
max_new_tokens=128,
chunk_length_s=30,
batch_size=16,
return_timestamps=False,
torch_dtype=torch_dtype,
device=device,
)
# return_timestamps=True for sentence level timestaps
# for word level timestamps return_timestamps="word"
prediction = pipe(input_arr)
prediction = prediction['text']
audio_input = gr.inputs.Audio(source="upload", type="filepath")
iface = gr.Interface(fn=transcribe_audio, inputs=audio_input,
outputs=["textbox"], title="Nepali Speech To Text",
description="Upload an audio file and hit the 'Submit'\
button")
iface.launch(inline=False)
|