wixcs commited on
Commit
6593745
·
verified ·
1 Parent(s): 46ef640

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -0
app.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ from transformers.generation import GenerationConfig
5
+
6
+ # 使用你的设备
7
+ device = "cuda" if torch.cuda.is_available() else "cpu"
8
+
9
+ # 指定模型
10
+ model_id = "Qwen/Qwen2-VL-7B" # 或者 "Qwen/Qwen2-VL-Chat-7B"
11
+
12
+ # 加载模型和分词器
13
+ tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
14
+ model = AutoModelForCausalLM.from_pretrained(
15
+ model_id,
16
+ device_map=device,
17
+ torch_dtype="auto",
18
+ trust_remote_code=True
19
+ ).eval()
20
+
21
+ # 设定模型的生成参数
22
+ model.generation_config = GenerationConfig.from_pretrained(
23
+ model_id,
24
+ trust_remote_code=True
25
+ )
26
+ model.generation_config.do_sample = False # 禁用采样,使用 beam search
27
+
28
+ def respond(image, prompt, history):
29
+ # 使用模型的 chat 方法进行对话
30
+ response, history = model.chat(tokenizer, image, prompt, history=history)
31
+ return response, history
32
+
33
+ with gr.Blocks() as demo:
34
+ gr.Markdown(f"## Qwen2-VL-7B Demo (Model: {model_id})")
35
+ with gr.Row():
36
+ with gr.Column(scale=4):
37
+ image = gr.Image(type="pil", label="Image")
38
+ text_input = gr.Textbox(label="Prompt", placeholder="输入提示")
39
+ submit_button = gr.Button("Submit")
40
+ with gr.Column(scale=6):
41
+ chatbot = gr.Chatbot(label="Chatbot")
42
+
43
+ history = gr.State([])
44
+
45
+ submit_button.click(
46
+ respond,
47
+ inputs=[image, text_input, history],
48
+ outputs=[chatbot, history]
49
+ )
50
+
51
+ demo.queue().launch(server_name='0.0.0.0', server_port=7860, share=True) # 启用 share=True 以生成公开链接