Update app.py
Browse files
app.py
CHANGED
@@ -80,7 +80,7 @@ def example_inference(input_text, count, image, img_qn, audio):
|
|
80 |
pred_text = textMode(input_text, count)
|
81 |
pred_text_image = imageMode(image, img_qn)
|
82 |
pred_text_audio = audioMode(audio)
|
83 |
-
return pred_text
|
84 |
|
85 |
|
86 |
|
@@ -174,7 +174,7 @@ with gr.Blocks() as demo:
|
|
174 |
["What is a large language model?","50","zebras.png","Are the zebras walking or standing still in the image?","WtIsML.m4a"]
|
175 |
],
|
176 |
inputs=[text_input, text_input_count], #, image_input, image_text_input, audio_input],
|
177 |
-
outputs=[text_output
|
178 |
fn=example_inference,
|
179 |
)
|
180 |
|
|
|
80 |
pred_text = textMode(input_text, count)
|
81 |
pred_text_image = imageMode(image, img_qn)
|
82 |
pred_text_audio = audioMode(audio)
|
83 |
+
return pred_text, pred_text_image, pred_text_audio
|
84 |
|
85 |
|
86 |
|
|
|
174 |
["What is a large language model?","50","zebras.png","Are the zebras walking or standing still in the image?","WtIsML.m4a"]
|
175 |
],
|
176 |
inputs=[text_input, text_input_count], #, image_input, image_text_input, audio_input],
|
177 |
+
outputs=[text_output, image_text_output, audio_text_output],
|
178 |
fn=example_inference,
|
179 |
)
|
180 |
|