import gradio as gr import torch import time import librosa import soundfile import nemo.collections.asr as nemo_asr import tempfile import os import uuid from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration import torch # PersistDataset ----- import os import csv import gradio as gr from gradio import inputs, outputs import huggingface_hub from huggingface_hub import Repository, hf_hub_download, upload_file from datetime import datetime # --------------------------------------------- # Dataset and Token links - change awacke1 to your own HF id, and add a HF_TOKEN copy to your repo for write permissions # This should allow you to save your results to your own Dataset hosted on HF. --- #DATASET_REPO_URL = "https://huggingface.co./datasets/awacke1/Carddata.csv" #DATASET_REPO_ID = "awacke1/Carddata.csv" #DATA_FILENAME = "Carddata.csv" #DATA_FILE = os.path.join("data", DATA_FILENAME) #HF_TOKEN = os.environ.get("HF_TOKEN") #SCRIPT = """ # #""" #try: # hf_hub_download( # repo_id=DATASET_REPO_ID, # filename=DATA_FILENAME, # cache_dir=DATA_DIRNAME, # force_filename=DATA_FILENAME # ) #except: # print("file not found") #repo = Repository( # local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN #) #def store_message(name: str, message: str): # if name and message: # with open(DATA_FILE, "a") as csvfile: # writer = csv.DictWriter(csvfile, fieldnames=["name", "message", "time"]) # writer.writerow( # {"name": name.strip(), "message": message.strip(), "time": str(datetime.now())} # ) # # uncomment line below to begin saving - # commit_url = repo.push_to_hub() # return "" #iface = gr.Interface( # store_message, # [ # inputs.Textbox(placeholder="Your name"), # inputs.Textbox(placeholder="Your message", lines=2), # ], # "html", # css=""" # .message {background-color:cornflowerblue;color:white; padding:4px;margin:4px;border-radius:4px; } # """, # title="Reading/writing to a HuggingFace dataset repo from Spaces", # description=f"This is a demo of how to do simple *shared data persistence* in a Gradio Space, backed by a dataset repo.", # article=f"The dataset repo is [{DATASET_REPO_URL}]({DATASET_REPO_URL})", #) # main ------------------------- mname = "facebook/blenderbot-400M-distill" model = BlenderbotForConditionalGeneration.from_pretrained(mname) tokenizer = BlenderbotTokenizer.from_pretrained(mname) def take_last_tokens(inputs, note_history, history): """Filter the last 128 tokens""" if inputs['input_ids'].shape[1] > 128: inputs['input_ids'] = torch.tensor([inputs['input_ids'][0][-128:].tolist()]) inputs['attention_mask'] = torch.tensor([inputs['attention_mask'][0][-128:].tolist()]) note_history = [' '.join(note_history[0].split(' ')[2:])] history = history[1:] return inputs, note_history, history def add_note_to_history(note, note_history): """Add a note to the historical information""" note_history.append(note) note_history = ' '.join(note_history) return [note_history] def chat(message, history): history = history or [] if history: history_useful = [' '.join([str(a[0])+' '+str(a[1]) for a in history])] else: history_useful = [] history_useful = add_note_to_history(message, history_useful) inputs = tokenizer(history_useful, return_tensors="pt") inputs, history_useful, history = take_last_tokens(inputs, history_useful, history) reply_ids = model.generate(**inputs) response = tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0] history_useful = add_note_to_history(response, history_useful) list_history = history_useful[0].split(' ') history.append((list_history[-2], list_history[-1])) # store_message(message, response) # Save to dataset - uncomment if you uncomment above to save inputs and outputs to your dataset return history, history SAMPLE_RATE = 16000 model = nemo_asr.models.EncDecRNNTBPEModel.from_pretrained("nvidia/stt_en_conformer_transducer_xlarge") model.change_decoding_strategy(None) model.eval() def process_audio_file(file): data, sr = librosa.load(file) if sr != SAMPLE_RATE: data = librosa.resample(data, orig_sr=sr, target_sr=SAMPLE_RATE) # monochannel data = librosa.to_mono(data) return data def transcribe(audio, state = ""): if state is None: state = "" audio_data = process_audio_file(audio) with tempfile.TemporaryDirectory() as tmpdir: audio_path = os.path.join(tmpdir, f'audio_{uuid.uuid4()}.wav') soundfile.write(audio_path, audio_data, SAMPLE_RATE) transcriptions = model.transcribe([audio_path]) if type(transcriptions) == tuple and len(transcriptions) == 2: transcriptions = transcriptions[0] transcriptions = transcriptions[0] # store_message(transcriptions, state) # Save to dataset - uncomment to store into a dataset - hint you will need your HF_TOKEN state = state + transcriptions + " " return state, state iface = gr.Interface( fn=transcribe, inputs=[ gr.Audio(source="microphone", type='filepath', streaming=True), "state", ], outputs=[ "textbox", "state", ], layout="horizontal", theme="huggingface", title="🗣️LiveSpeechRecognition🧠Memory💾", description=f"Live Automatic Speech Recognition (ASR) with Memory💾 Dataset.", allow_flagging='never', live=True, # article=f"Result Output Saved to Memory💾 Dataset: [{DATASET_REPO_URL}]({DATASET_REPO_URL})" ) iface.launch()