Spaces:
Runtime error
Runtime error
File size: 1,228 Bytes
534e084 d8215b2 246edcb fff86d3 52bf87c d8215b2 52bf87c 7624a50 fff86d3 71cdb69 fff86d3 3313eeb 71cdb69 fff86d3 71cdb69 3313eeb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import gradio as gr
from pydub import AudioSegment
from ailib import whisper_transcribe
import pandas as pd
def transcribe(audiofile):
#audio = AudioSegment.from_file(audiofile)
#open(filename, 'wb').write(audio)
return whisper_transcribe(audiofile)
def display_text(option):
left = 'LEFT:' + option
right = 'RIGHT:' + option
return left, right
df = pd.read_csv('pacientes.csv')
pacientes = df['paciente'].unique() # now contains PAC_ID
pid_dict = dict(zip(df.paciente, df.pac_id))
################################################
# gr.title("Demo CORFO")
with gr.Blocks() as demo:
with gr.Row():
audemo = gr.Interface(
transcribe,
gr.Audio(sources=["microphone"], type="filepath"),
"text",
)
with gr.Row():
with gr.Column(scale=1):
radio = gr.Radio(choices=pacientes,
label="PACIENTE", value=pacientes[0], interactive=True)
with gr.Column(scale=3):
with gr.Row():
left_text = gr.Markdown()
right_text = gr.Markdown()
radio.change(display_text, inputs=radio, outputs=[left_text, right_text])
demo.launch() |