import gradio as gr from pydub import AudioSegment from ailib import whisper_transcribe, summarize import pandas as pd def transcribe(audiofile): #audio = AudioSegment.from_file(audiofile) #open(filename, 'wb').write(audio) consulta = whisper_transcribe(audiofile) resumen = summarize(consulta) random = 'CHALLA!' return consulta, resumen, random def display_text(paciente): resumen_historial = df[df.paciente==paciente]['Resumen'].iloc[0] historial = df[df.paciente==paciente]['EHR'].iloc[0] return resumen_historial, historial df = pd.read_csv('pacientes.csv') pacientes = list(df['paciente'].unique()) # now contains PAC_ID print('PAX:', pacientes, type(pacientes)) pid_dict = dict(zip(df.paciente, df.pac_id)) ################################################ # gr.title("Demo CORFO") with gr.Blocks() as demo: with gr.Row(): audemo = gr.Interface( transcribe, gr.Audio(sources=["microphone"], type="filepath"), ["text","text","text"] # was simply "text", ) with gr.Row(): with gr.Column(scale=1): radio = gr.Radio(choices=pacientes, label="PACIENTE", value=pacientes[0], interactive=True) with gr.Column(scale=4): with gr.Row(): # historial+resumen resumen_trans = gr.Markdown() nueva_ficha = gr.Markdown() with gr.Row(): # historial+resumen with gr.Accordion('Resumen Historial'): # use radio? left_text = gr.Markdown() with gr.Accordion('Historial'): right_text = gr.Markdown() radio.change(display_text, inputs=radio, outputs=[left_text, right_text]) demo.launch()