import gradio as gr from pydub import AudioSegment from ailib import whisper_transcribe import pandas as pd def transcribe(audiofile): #audio = AudioSegment.from_file(audiofile) #open(filename, 'wb').write(audio) return whisper_transcribe(audiofile) def display_text(option): resumen_historial = df[df.paciente==paciente]['Resumen'].iloc[0] historial = df[df.paciente==paciente]['EHR'].iloc[0] return resumen_historial, historial df = pd.read_csv('pacientes.csv') pacientes = list(df['paciente'].unique()) # now contains PAC_ID print('PAX:', pacientes, type(pacientes)) pid_dict = dict(zip(df.paciente, df.pac_id)) ################################################ # gr.title("Demo CORFO") with gr.Blocks() as demo: with gr.Row(): audemo = gr.Interface( transcribe, gr.Audio(sources=["microphone"], type="filepath"), "text", ) with gr.Row(): with gr.Column(scale=1): radio = gr.Radio(choices=pacientes, label="PACIENTE", value=pacientes[0], interactive=True) with gr.Column(scale=3): with gr.Row(): left_text = gr.Markdown() right_text = gr.Markdown() radio.change(display_text, inputs=radio, outputs=[left_text, right_text]) demo.launch()