|
import gradio as gr |
|
import torch |
|
import time |
|
import librosa |
|
import soundfile |
|
import nemo.collections.asr as nemo_asr |
|
import tempfile |
|
import os |
|
import uuid |
|
|
|
from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration |
|
import torch |
|
|
|
|
|
import os |
|
import csv |
|
import gradio as gr |
|
from gradio import inputs, outputs |
|
import huggingface_hub |
|
from huggingface_hub import Repository, hf_hub_download, upload_file |
|
from datetime import datetime |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mname = "facebook/blenderbot-400M-distill" |
|
model = BlenderbotForConditionalGeneration.from_pretrained(mname) |
|
tokenizer = BlenderbotTokenizer.from_pretrained(mname) |
|
|
|
def take_last_tokens(inputs, note_history, history): |
|
"""Filter the last 128 tokens""" |
|
if inputs['input_ids'].shape[1] > 128: |
|
inputs['input_ids'] = torch.tensor([inputs['input_ids'][0][-128:].tolist()]) |
|
inputs['attention_mask'] = torch.tensor([inputs['attention_mask'][0][-128:].tolist()]) |
|
note_history = ['</s> <s>'.join(note_history[0].split('</s> <s>')[2:])] |
|
history = history[1:] |
|
return inputs, note_history, history |
|
|
|
def add_note_to_history(note, note_history): |
|
"""Add a note to the historical information""" |
|
note_history.append(note) |
|
note_history = '</s> <s>'.join(note_history) |
|
return [note_history] |
|
|
|
|
|
def chat(message, history): |
|
history = history or [] |
|
if history: |
|
history_useful = ['</s> <s>'.join([str(a[0])+'</s> <s>'+str(a[1]) for a in history])] |
|
else: |
|
history_useful = [] |
|
history_useful = add_note_to_history(message, history_useful) |
|
inputs = tokenizer(history_useful, return_tensors="pt") |
|
inputs, history_useful, history = take_last_tokens(inputs, history_useful, history) |
|
reply_ids = model.generate(**inputs) |
|
response = tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0] |
|
history_useful = add_note_to_history(response, history_useful) |
|
list_history = history_useful[0].split('</s> <s>') |
|
history.append((list_history[-2], list_history[-1])) |
|
|
|
return history, history |
|
|
|
|
|
SAMPLE_RATE = 16000 |
|
model = nemo_asr.models.EncDecRNNTBPEModel.from_pretrained("nvidia/stt_en_conformer_transducer_xlarge") |
|
model.change_decoding_strategy(None) |
|
model.eval() |
|
|
|
def process_audio_file(file): |
|
data, sr = librosa.load(file) |
|
if sr != SAMPLE_RATE: |
|
data = librosa.resample(data, orig_sr=sr, target_sr=SAMPLE_RATE) |
|
|
|
data = librosa.to_mono(data) |
|
return data |
|
|
|
|
|
def transcribe(audio, state = ""): |
|
if state is None: |
|
state = "" |
|
audio_data = process_audio_file(audio) |
|
with tempfile.TemporaryDirectory() as tmpdir: |
|
audio_path = os.path.join(tmpdir, f'audio_{uuid.uuid4()}.wav') |
|
soundfile.write(audio_path, audio_data, SAMPLE_RATE) |
|
transcriptions = model.transcribe([audio_path]) |
|
if type(transcriptions) == tuple and len(transcriptions) == 2: |
|
transcriptions = transcriptions[0] |
|
transcriptions = transcriptions[0] |
|
|
|
state = state + transcriptions + " " |
|
return state, state |
|
|
|
iface = gr.Interface( |
|
fn=transcribe, |
|
inputs=[ |
|
gr.Audio(source="microphone", type='filepath', streaming=True), |
|
"state", |
|
], |
|
outputs=[ |
|
"textbox", |
|
"state", |
|
], |
|
layout="horizontal", |
|
theme="huggingface", |
|
title="🗣️LiveSpeechRecognition🧠Memory💾", |
|
description=f"Live Automatic Speech Recognition (ASR) with Memory💾 Dataset.", |
|
allow_flagging='never', |
|
live=True, |
|
|
|
) |
|
iface.launch() |
|
|