stzhao commited on
Commit
eaf868f
Β·
verified Β·
1 Parent(s): c7bb864

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -1,14 +1,15 @@
1
  import gradio as gr
2
- from transformers import AutoProcessor, Qwen2_5_VLForConditionalGeneration, TextIteratorStreamer
 
3
  from transformers.image_utils import load_image
4
  from threading import Thread
5
  import time
6
  import torch
7
  import spaces
8
 
9
- MODEL_ID = "Qwen/Qwen2.5-VL-7B-Instruct"
10
  processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
11
- model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
12
  MODEL_ID,
13
  trust_remote_code=True,
14
  torch_dtype=torch.bfloat16
@@ -86,7 +87,7 @@ examples = [
86
 
87
  demo = gr.ChatInterface(
88
  fn=model_inference,
89
- description="# **Qwen2.5-VL-7B-Instruct**",
90
  examples=examples,
91
  textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image"], file_count="multiple"),
92
  stop_btn="Stop Generation",
 
1
  import gradio as gr
2
+ from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor, TextIteratorStreamer
3
+ # from qwen_vl_utils import process_vision_info
4
  from transformers.image_utils import load_image
5
  from threading import Thread
6
  import time
7
  import torch
8
  import spaces
9
 
10
+ MODEL_ID = "Qwen/Qwen2-VL-2B-Instruct"
11
  processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
12
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
13
  MODEL_ID,
14
  trust_remote_code=True,
15
  torch_dtype=torch.bfloat16
 
87
 
88
  demo = gr.ChatInterface(
89
  fn=model_inference,
90
+ description="# **Qwen2-VL-2B-Instruct**",
91
  examples=examples,
92
  textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image"], file_count="multiple"),
93
  stop_btn="Stop Generation",