wjm55 commited on
Commit
8858188
·
1 Parent(s): e80f420
Files changed (3) hide show
  1. Caracal.jpg +0 -0
  2. app.py +125 -0
  3. requirements.txt +8 -0
Caracal.jpg ADDED
app.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
3
+ from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
4
+ from qwen_vl_utils import process_vision_info
5
+ import torch
6
+ from PIL import Image
7
+ import subprocess
8
+ from datetime import datetime
9
+ import numpy as np
10
+ import os
11
+
12
+
13
+ # subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
14
+
15
+ # models = {
16
+ # "Qwen/Qwen2-VL-7B-Instruct": AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", trust_remote_code=True, torch_dtype="auto", _attn_implementation="flash_attention_2").cuda().eval()
17
+
18
+ # }
19
+ def array_to_image_path(image_array):
20
+ # Convert numpy array to PIL Image
21
+ img = Image.fromarray(np.uint8(image_array))
22
+ img.thumbnail((1024, 1024))
23
+
24
+ # Generate a unique filename using timestamp
25
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
26
+ filename = f"image_{timestamp}.png"
27
+
28
+ # Save the image
29
+ img.save(filename)
30
+
31
+ # Get the full path of the saved image
32
+ full_path = os.path.abspath(filename)
33
+
34
+ return full_path
35
+
36
+ models = {
37
+ "Qwen/Qwen2-VL-7B-Instruct": Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", trust_remote_code=True, torch_dtype="auto").cuda().eval()
38
+
39
+ }
40
+
41
+ processors = {
42
+ "Qwen/Qwen2-VL-7B-Instruct": AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", trust_remote_code=True)
43
+ }
44
+
45
+ DESCRIPTION = "This demo uses Qwen2-VL-7B-Instruct(https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct)"
46
+
47
+ kwargs = {}
48
+ kwargs['torch_dtype'] = torch.bfloat16
49
+
50
+ user_prompt = '<|user|>\n'
51
+ assistant_prompt = '<|assistant|>\n'
52
+ prompt_suffix = "<|end|>\n"
53
+
54
+ @spaces.GPU
55
+ def run_example(image, model_id="Qwen/Qwen2-VL-7B-Instruct"):
56
+ image_path = array_to_image_path(image)
57
+
58
+ print(image_path)
59
+ model = models[model_id]
60
+ processor = processors[model_id]
61
+
62
+ prompt = f"{user_prompt}<|image_1|>\n{text_input}{prompt_suffix}{assistant_prompt}"
63
+ image = Image.fromarray(image).convert("RGB")
64
+ messages = [
65
+ {
66
+ "role": "user",
67
+ "content": [
68
+ {
69
+ "type": "image",
70
+ "image": image_path,
71
+ },
72
+ {"type": "text", "text": "Convert the image to text."},
73
+ ],
74
+ }
75
+ ]
76
+
77
+ # Preparation for inference
78
+ text = processor.apply_chat_template(
79
+ messages, tokenize=False, add_generation_prompt=True
80
+ )
81
+ image_inputs, video_inputs = process_vision_info(messages)
82
+ inputs = processor(
83
+ text=[text],
84
+ images=image_inputs,
85
+ videos=video_inputs,
86
+ padding=True,
87
+ return_tensors="pt",
88
+ )
89
+ inputs = inputs.to("cuda")
90
+
91
+ # Inference: Generation of the output
92
+ generated_ids = model.generate(**inputs, max_new_tokens=1024)
93
+ generated_ids_trimmed = [
94
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
95
+ ]
96
+ output_text = processor.batch_decode(
97
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
98
+ )
99
+
100
+ return output_text[0]
101
+
102
+ css = """
103
+ #output {
104
+ height: 500px;
105
+ overflow: auto;
106
+ border: 1px solid #ccc;
107
+ }
108
+ """
109
+
110
+ with gr.Blocks(css=css) as demo:
111
+ gr.Markdown("![Caracal](Caracal.jpg)")
112
+ gr.Markdown(DESCRIPTION)
113
+ with gr.Tab(label="Qwen2-VL-7B Input"):
114
+ with gr.Row():
115
+ with gr.Column():
116
+ input_img = gr.Image(label="Input Picture")
117
+ model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="Qwen/Qwen2-VL-7B-Instruct")
118
+ submit_btn = gr.Button(value="Submit")
119
+ with gr.Column():
120
+ output_text = gr.Textbox(label="Output Text")
121
+
122
+ submit_btn.click(run_example, [input_img, model_selector], [output_text])
123
+
124
+ demo.queue(api_open=False)
125
+ demo.launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ numpy==1.24.4
2
+ Pillow==10.3.0
3
+ Requests==2.31.0
4
+ torch
5
+ torchvision
6
+ git+https://github.com/huggingface/transformers.git
7
+ accelerate
8
+ qwen-vl-utils