Update app.py
Browse files
app.py
CHANGED
@@ -76,8 +76,10 @@ with torch.no_grad():
|
|
76 |
print('--------------Loaded fine tuned phi2 model----------------------')
|
77 |
|
78 |
|
79 |
-
def example_inference(input_text, count
|
80 |
pred_text = textMode(input_text, count)
|
|
|
|
|
81 |
return pred_text #, "in progress", "in progress"
|
82 |
|
83 |
|
@@ -169,7 +171,7 @@ with gr.Blocks() as demo:
|
|
169 |
|
170 |
gr.Examples(
|
171 |
examples=[
|
172 |
-
["What is a large language model?","50"
|
173 |
],
|
174 |
inputs=[text_input, text_input_count], #, image_input, image_text_input, audio_input],
|
175 |
outputs=[text_output], #, image_text_output, audio_text_output],
|
|
|
76 |
print('--------------Loaded fine tuned phi2 model----------------------')
|
77 |
|
78 |
|
79 |
+
def example_inference(input_text, count, image, img_qn, audio):
|
80 |
pred_text = textMode(input_text, count)
|
81 |
+
pred_text_image = imageMode(image, img_qn)
|
82 |
+
pred_text_audio = audioMode(audio)
|
83 |
return pred_text #, "in progress", "in progress"
|
84 |
|
85 |
|
|
|
171 |
|
172 |
gr.Examples(
|
173 |
examples=[
|
174 |
+
["What is a large language model?","50","zebras.png","Are the zebras walking or standing still in the image?","WtIsML.m4a"]
|
175 |
],
|
176 |
inputs=[text_input, text_input_count], #, image_input, image_text_input, audio_input],
|
177 |
outputs=[text_output], #, image_text_output, audio_text_output],
|