from src import infer, utils import gradio as gr audio_examples = [ [None, "assets/audio/male-indonesian.wav", None], [None, "assets/audio/female-indonesian.wav", None], [None, "assets/audio/male-english.wav", None], [None, "assets/audio/female-english.wav", None], ] demo = gr.Interface( fn=infer.predict, inputs=[ gr.Radio(label="Language", choices=["indonesian","english"], value="indonesian"), gr.Audio(label="Speak", source="microphone", type="numpy"), gr.Audio(label="Upload audio", source="upload", type="numpy"), ], outputs=[gr.TextArea(label="Output Text"),], title="OpenAI Whisper Base", description=utils.parsing_text("assets/descriptions.md"), article=""utils.parsing_text("assets/articles.md"), examples=audio_examples, ) demo.launch()