Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,6 @@ import time
|
|
6 |
import torch
|
7 |
import spaces
|
8 |
|
9 |
-
# Load the Qwen2.5-VL-3B-Instruct model and processor
|
10 |
MODEL_ID = "Qwen/Qwen2.5-VL-3B-Instruct"
|
11 |
processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
|
12 |
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
@@ -85,11 +84,8 @@ examples = [
|
|
85 |
[{"text": "What is this UI about?", "files": ["example_images/s2w_example.png"]}],
|
86 |
]
|
87 |
|
88 |
-
# Gradio interface
|
89 |
demo = gr.ChatInterface(
|
90 |
fn=model_inference,
|
91 |
-
title="# **Qwen2.5-VL-3B-Instruc**",
|
92 |
-
description="Interact with [Qwen2.5-VL-3B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-3B-Instruct) in this demo. Upload an image and text, or try one of the examples. Each chat starts a new conversation.",
|
93 |
examples=examples,
|
94 |
textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image"], file_count="multiple"),
|
95 |
stop_btn="Stop Generation",
|
@@ -97,5 +93,4 @@ demo = gr.ChatInterface(
|
|
97 |
cache_examples=False,
|
98 |
)
|
99 |
|
100 |
-
# Launch the demo
|
101 |
demo.launch(debug=True)
|
|
|
6 |
import torch
|
7 |
import spaces
|
8 |
|
|
|
9 |
MODEL_ID = "Qwen/Qwen2.5-VL-3B-Instruct"
|
10 |
processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
|
11 |
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
|
|
84 |
[{"text": "What is this UI about?", "files": ["example_images/s2w_example.png"]}],
|
85 |
]
|
86 |
|
|
|
87 |
demo = gr.ChatInterface(
|
88 |
fn=model_inference,
|
|
|
|
|
89 |
examples=examples,
|
90 |
textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image"], file_count="multiple"),
|
91 |
stop_btn="Stop Generation",
|
|
|
93 |
cache_examples=False,
|
94 |
)
|
95 |
|
|
|
96 |
demo.launch(debug=True)
|