BoyuNLP commited on
Commit
3030b80
·
verified ·
1 Parent(s): 51b3b18

Upload folder using huggingface_hub

Browse files
Files changed (45) hide show
  1. .gitattributes +1 -0
  2. LICENSE +51 -0
  3. README.md +595 -3
  4. added_tokens.json +16 -0
  5. chat_template.json +3 -0
  6. config.json +48 -0
  7. generation_config.json +13 -0
  8. merges.txt +0 -0
  9. model-00001-of-00031.safetensors +3 -0
  10. model-00002-of-00031.safetensors +3 -0
  11. model-00003-of-00031.safetensors +3 -0
  12. model-00004-of-00031.safetensors +3 -0
  13. model-00005-of-00031.safetensors +3 -0
  14. model-00006-of-00031.safetensors +3 -0
  15. model-00007-of-00031.safetensors +3 -0
  16. model-00008-of-00031.safetensors +3 -0
  17. model-00009-of-00031.safetensors +3 -0
  18. model-00010-of-00031.safetensors +3 -0
  19. model-00011-of-00031.safetensors +3 -0
  20. model-00012-of-00031.safetensors +3 -0
  21. model-00013-of-00031.safetensors +3 -0
  22. model-00014-of-00031.safetensors +3 -0
  23. model-00015-of-00031.safetensors +3 -0
  24. model-00016-of-00031.safetensors +3 -0
  25. model-00017-of-00031.safetensors +3 -0
  26. model-00018-of-00031.safetensors +3 -0
  27. model-00019-of-00031.safetensors +3 -0
  28. model-00020-of-00031.safetensors +3 -0
  29. model-00021-of-00031.safetensors +3 -0
  30. model-00022-of-00031.safetensors +3 -0
  31. model-00023-of-00031.safetensors +3 -0
  32. model-00024-of-00031.safetensors +3 -0
  33. model-00025-of-00031.safetensors +3 -0
  34. model-00026-of-00031.safetensors +3 -0
  35. model-00027-of-00031.safetensors +3 -0
  36. model-00028-of-00031.safetensors +3 -0
  37. model-00029-of-00031.safetensors +3 -0
  38. model-00030-of-00031.safetensors +3 -0
  39. model-00031-of-00031.safetensors +3 -0
  40. model.safetensors.index.json +0 -0
  41. preprocessor_config.json +29 -0
  42. special_tokens_map.json +31 -0
  43. tokenizer.json +3 -0
  44. tokenizer_config.json +146 -0
  45. vocab.json +0 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Qwen LICENSE AGREEMENT
2
+
3
+ Qwen LICENSE AGREEMENT Release Date: September 19, 2024
4
+
5
+ By clicking to agree or by using or distributing any portion or element of the Qwen Materials, you will be deemed to have recognized and accepted the content of this Agreement, which is effective immediately.
6
+
7
+ 1. Definitions
8
+ a. This Qwen LICENSE AGREEMENT (this "Agreement") shall mean the terms and conditions for use, reproduction, distribution and modification of the Materials as defined by this Agreement.
9
+ b. "We" (or "Us") shall mean Alibaba Cloud.
10
+ c. "You" (or "Your") shall mean a natural person or legal entity exercising the rights granted by this Agreement and/or using the Materials for any purpose and in any field of use.
11
+ d. "Third Parties" shall mean individuals or legal entities that are not under common control with us or you.
12
+ e. "Qwen" shall mean the large language models, and software and algorithms, consisting of trained model weights, parameters (including optimizer states), machine-learning model code, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by us.
13
+ f. "Materials" shall mean, collectively, Alibaba Cloud's proprietary Qwen and Documentation (and any portion thereof) made available under this Agreement.
14
+ g. "Source" form shall mean the preferred form for making modifications, including but not limited to model source code, documentation source, and configuration files.
15
+ h. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
16
+
17
+ 2. Grant of Rights
18
+ You are granted a non-exclusive, worldwide, non-transferable and royalty-free limited license under Alibaba Cloud's intellectual property or other rights owned by us embodied in the Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Materials.
19
+
20
+ 3. Redistribution
21
+ You may distribute copies or make the Materials, or derivative works thereof, available as part of a product or service that contains any of them, with or without modifications, and in Source or Object form, provided that you meet the following conditions:
22
+ a. You shall give any other recipients of the Materials or derivative works a copy of this Agreement;
23
+ b. You shall cause any modified files to carry prominent notices stating that you changed the files;
24
+ c. You shall retain in all copies of the Materials that you distribute the following attribution notices within a "Notice" text file distributed as a part of such copies: "Qwen is licensed under the Qwen LICENSE AGREEMENT, Copyright (c) Alibaba Cloud. All Rights Reserved."; and
25
+ d. You may add your own copyright statement to your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of your modifications, or for any such derivative works as a whole, provided your use, reproduction, and distribution of the work otherwise complies with the terms and conditions of this Agreement.
26
+
27
+ 4. Restrictions
28
+ If you are commercially using the Materials, and your product or service has more than 100 million monthly active users, you shall request a license from us. You cannot exercise your rights under this Agreement without our express authorization.
29
+
30
+ 5. Rules of use
31
+ a. The Materials may be subject to export controls or restrictions in China, the United States or other countries or regions. You shall comply with applicable laws and regulations in your use of the Materials.
32
+ b. If you use the Materials or any outputs or results therefrom to create, train, fine-tune, or improve an AI model that is distributed or made available, you shall prominently display “Built with Qwen” or “Improved using Qwen” in the related product documentation.
33
+
34
+ 6. Intellectual Property
35
+ a. We retain ownership of all intellectual property rights in and to the Materials and derivatives made by or for us. Conditioned upon compliance with the terms and conditions of this Agreement, with respect to any derivative works and modifications of the Materials that are made by you, you are and will be the owner of such derivative works and modifications.
36
+ b. No trademark license is granted to use the trade names, trademarks, service marks, or product names of us, except as required to fulfill notice requirements under this Agreement or as required for reasonable and customary use in describing and redistributing the Materials.
37
+ c. If you commence a lawsuit or other proceedings (including a cross-claim or counterclaim in a lawsuit) against us or any entity alleging that the Materials or any output therefrom, or any part of the foregoing, infringe any intellectual property or other right owned or licensable by you, then all licenses granted to you under this Agreement shall terminate as of the date such lawsuit or other proceeding is commenced or brought.
38
+
39
+ 7. Disclaimer of Warranty and Limitation of Liability
40
+ a. We are not obligated to support, update, provide training for, or develop any further version of the Qwen Materials or to grant any license thereto.
41
+ b. THE MATERIALS ARE PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. WE MAKE NO WARRANTY AND ASSUME NO RESPONSIBILITY FOR THE SAFETY OR STABILITY OF THE MATERIALS AND ANY OUTPUT THEREFROM.
42
+ c. IN NO EVENT SHALL WE BE LIABLE TO YOU FOR ANY DAMAGES, INCLUDING, BUT NOT LIMITED TO ANY DIRECT, OR INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING FROM YOUR USE OR INABILITY TO USE THE MATERIALS OR ANY OUTPUT OF IT, NO MATTER HOW IT’S CAUSED.
43
+ d. You will defend, indemnify and hold harmless us from and against any claim by any third party arising out of or related to your use or distribution of the Materials.
44
+
45
+ 8. Survival and Termination.
46
+ a. The term of this Agreement shall commence upon your acceptance of this Agreement or access to the Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein.
47
+ b. We may terminate this Agreement if you breach any of the terms or conditions of this Agreement. Upon termination of this Agreement, you must delete and cease use of the Materials. Sections 7 and 9 shall survive the termination of this Agreement.
48
+
49
+ 9. Governing Law and Jurisdiction.
50
+ a. This Agreement and any dispute arising out of or relating to it will be governed by the laws of China, without regard to conflict of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement.
51
+ b. The People's Courts in Hangzhou City shall have exclusive jurisdiction over any dispute arising out of this Agreement.
README.md CHANGED
@@ -1,3 +1,595 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ license_name: tongyi-qianwen
4
+ license_link: https://huggingface.co/Qwen/Qwen2-VL-72B-Instruct/blob/main/LICENSE
5
+ language:
6
+ - en
7
+ pipeline_tag: image-text-to-text
8
+ tags:
9
+ - multimodal
10
+ library_name: transformers
11
+ base_model:
12
+ - Qwen/Qwen2-VL-72B
13
+ ---
14
+
15
+ # Qwen2-VL-72B-Instruct
16
+
17
+ ## Introduction
18
+
19
+ We're excited to unveil **Qwen2-VL**, the latest iteration of our Qwen-VL model, representing nearly a year of innovation.
20
+
21
+ ### What’s New in Qwen2-VL?
22
+
23
+ #### Key Enhancements:
24
+
25
+
26
+ * **SoTA understanding of images of various resolution & ratio**: Qwen2-VL achieves state-of-the-art performance on visual understanding benchmarks, including MathVista, DocVQA, RealWorldQA, MTVQA, etc.
27
+
28
+ * **Understanding videos of 20min+**: Qwen2-VL can understand videos over 20 minutes for high-quality video-based question answering, dialog, content creation, etc.
29
+
30
+ * **Agent that can operate your mobiles, robots, etc.**: with the abilities of complex reasoning and decision making, Qwen2-VL can be integrated with devices like mobile phones, robots, etc., for automatic operation based on visual environment and text instructions.
31
+
32
+ * **Multilingual Support**: to serve global users, besides English and Chinese, Qwen2-VL now supports the understanding of texts in different languages inside images, including most European languages, Japanese, Korean, Arabic, Vietnamese, etc.
33
+
34
+
35
+ #### Model Architecture Updates:
36
+
37
+ * **Naive Dynamic Resolution**: Unlike before, Qwen2-VL can handle arbitrary image resolutions, mapping them into a dynamic number of visual tokens, offering a more human-like visual processing experience.
38
+
39
+ <p align="center">
40
+ <img src="https://qianwen-res.oss-accelerate-overseas.aliyuncs.com/Qwen2-VL/qwen2_vl.jpg" width="80%"/>
41
+ <p>
42
+
43
+ * **Multimodal Rotary Position Embedding (M-ROPE)**: Decomposes positional embedding into parts to capture 1D textual, 2D visual, and 3D video positional information, enhancing its multimodal processing capabilities.
44
+
45
+ <p align="center">
46
+ <img src="http://qianwen-res.oss-accelerate-overseas.aliyuncs.com/Qwen2-VL/mrope.png" width="80%"/>
47
+ <p>
48
+
49
+ We have three models with 2, 8 and 72 billion parameters. This repo contains the instruction-tuned 72B Qwen2-VL model. For more information, visit our [Blog](https://qwenlm.github.io/blog/qwen2-vl/) and [GitHub](https://github.com/QwenLM/Qwen2-VL).
50
+
51
+
52
+
53
+ ## Evaluation
54
+
55
+ ### Image Benchmarks
56
+
57
+ | Benchmark | Previous SoTA<br><sup>(Open-source LVLM)<sup> | Claude-3.5 Sonnet | GPT-4o | **Qwen2-VL-72B**
58
+ | :--- | :---: | :---: | :---: | :---: |
59
+ | MMMU<sub>val</sub> | 58.3 | 68.3 | **69.1** | 64.5
60
+ | DocVQA<sub>test</sub> | 94.1 | 95.2 | 92.8 | **96.5**
61
+ | InfoVQA<sub>test</sub> | 82.0 | - | - | **84.5**
62
+ | ChartQA<sub>test</sub> | 88.4 | **90.8** | 85.7 | 88.3
63
+ | TextVQA<sub>val</sub> | 84.4 | - | - | **85.5**
64
+ | OCRBench | 852 | 788 | 736 | **877**
65
+ | MTVQA | 17.3 | 25.7 | 27.8 | **30.9**
66
+ | VCR<sub>en easy</sub> | 84.67 | 63.85 | 91.55 | **91.93**
67
+ | VCR<sub>zh easy</sub> | 22.09 | 1.0| 14.87 | **65.37**
68
+ | RealWorldQA | 72.2 | 60.1 | 75.4 | **77.8**
69
+ | MME<sub>sum</sub> | 2414.7 | 1920.0 | 2328.7 | **2482.7**
70
+ | MMBench-EN<sub>test</sub> | **86.5** | 79.7 | 83.4 | **86.5**
71
+ | MMBench-CN<sub>test</sub> | 86.3 | 80.7 | 82.1 | **86.6**
72
+ | MMBench-V1.1<sub>test</sub> | 85.5 | 78.5 | 82.2 | **85.9**
73
+ | MMT-Bench<sub>test</sub> | 63.4 | - | 65.5 | **71.7**
74
+ | MMStar | 67.1 | 62.2 | 63.9 | **68.3**
75
+ | MMVet<sub>GPT-4-Turbo</sub> | 65.7 | 66.0 | 69.1 | **74.0**
76
+ | HallBench<sub>avg</sub> | 55.2 | 49.9 | 55.0 | **58.1**
77
+ | MathVista<sub>testmini</sub> | 67.5 | 67.7 | 63.8 | **70.5**
78
+ | MathVision | 16.97 | - | **30.4** | 25.9
79
+
80
+ ### Video Benchmarks
81
+
82
+ | Benchmark | Previous SoTA<br><sup>(Open-source LVLM)<sup> | Gemini 1.5-Pro | GPT-4o | **Qwen2-VL-72B**
83
+ | :--- | :---: | :---: | :---: | :---: |
84
+ | MVBench | 69.6 | - | - | **73.6**
85
+ | PerceptionTest<sub>test</sub> | 66.9 | - | - | **68.0**
86
+ | EgoSchema<sub>test</sub> | 62.0 | 63.2 | 72.2 | **77.9**
87
+ | Video-MME<br><sub>(wo/w subs)</sub> | 66.3/69.6 | **75.0**/**81.3** | 71.9/77.2 | 71.2/77.8
88
+
89
+ ### Agent Benchmarks
90
+ | |Benchmark | Metric | Previous SoTA | GPT-4o | **Qwen2-VL-72B** |
91
+ | :-- | :-- | :--: | :--: | :--: | :--: |
92
+ | General | FnCall<sup>[1]</sup> | TM | - | 90.2 | **93.1** |
93
+ | | | EM | - | 50.0 | **53.2** |
94
+ | Game | Number Line | SR | 89.4<sup>[2]</sup> | 91.5 | **100.0** |
95
+ | | BlackJack | SR | 40.2<sup>[2]</sup> | 34.5 | **42.6** |
96
+ | | EZPoint | SR | 50.0<sup>[2]</sup> | 85.5 | **100.0** |
97
+ | | Point24 | SR | 2.6<sup>[2]</sup> | 3.0 | **4.5** |
98
+ | Android | AITZ | TM | 83.0<sup>[3]</sup> | 70.0 | **89.6** |
99
+ | | | EM | 47.7<sup>[3]</sup> | 35.3 | **72.1** |
100
+ | AI2THOR | ALFRED<sub>valid-unseen</sub> | SR | 67.7<sup>[4]</sup> | - | **67.8** |
101
+ | | | GC | 75.3<sup>[4]</sup> | - | **75.8** |
102
+ | VLN | R2R<sub>valid-unseen</sub> | SR | **79.0** | 43.7<sup>[5]</sup> | 51.7 |
103
+ | | REVERIE<sub>valid-unseen</sub> | SR | **61.0** | 31.6<sup>[5]</sup> | 31.0 |
104
+
105
+ SR, GC, TM and EM are short for success rate, goal-condition success, type match and exact match. ALFRED is supported by SAM<sup>[6]</sup>.
106
+ 1. Self-Curated Function Call Benchmark by Qwen Team
107
+ 2. Fine-Tuning Large Vision-Language Models as Decision-Making Agents via Reinforcement Learning
108
+ 3. Android in the Zoo: Chain-of-Action-Thought for GUI Agents
109
+ 4. ThinkBot: Embodied Instruction Following with Thought Chain Reasoning
110
+ 5. MapGPT: Map-Guided Prompting with Adaptive Path Planning for Vision-and-Language Navigation
111
+ 6. Segment Anything.
112
+
113
+
114
+ ### Multilingual Benchmarks
115
+
116
+ <table style="width:75%; text-align:center;">
117
+ <tr>
118
+ <th>Models</th>
119
+ <td>AR </td>
120
+ <td>DE </td>
121
+ <td>FR </td>
122
+ <td>IT </td>
123
+ <td>JA </td>
124
+ <td>KO </td>
125
+ <td>RU </td>
126
+ <td>TH </td>
127
+ <td>VI </td>
128
+ <td>AVG</td>
129
+ </tr>
130
+ <tr>
131
+ <th align="left">Qwen2-VL-72B</th>
132
+ <td>20.7 </td>
133
+ <td>36.5 </td>
134
+ <td>44.1 </td>
135
+ <td>42.8 </td>
136
+ <td>21.6 </td>
137
+ <td>37.4 </td>
138
+ <td>15.6 </td>
139
+ <td>17.7 </td>
140
+ <td>41.6 </td>
141
+ <td><b>30.9</b></td>
142
+ </tr>
143
+ <tr>
144
+ <th align="left">GPT-4o</th>
145
+ <td>20.2 </td>
146
+ <td>34.2 </td>
147
+ <td>41.2 </td>
148
+ <td>32.7 </td>
149
+ <td>20.0 </td>
150
+ <td>33.9 </td>
151
+ <td>11.5 </td>
152
+ <td>22.5 </td>
153
+ <td>34.2 </td>
154
+ <td>27.8</td>
155
+ </tr>
156
+ <tr>
157
+ <th align="left">Claude3 Opus</th>
158
+ <td>15.1 </td>
159
+ <td>33.4 </td>
160
+ <td>40.6 </td>
161
+ <td>34.4 </td>
162
+ <td>19.4 </td>
163
+ <td>27.2 </td>
164
+ <td>13.0 </td>
165
+ <td>19.5 </td>
166
+ <td>29.1 </td>
167
+ <td>25.7 </td>
168
+ </tr>
169
+ <tr>
170
+ <th align="left">Gemini Ultra</th>
171
+ <td>14.7 </td>
172
+ <td>32.3 </td>
173
+ <td>40.0 </td>
174
+ <td>31.8 </td>
175
+ <td>12.3 </td>
176
+ <td>17.2 </td>
177
+ <td>11.8 </td>
178
+ <td>20.3 </td>
179
+ <td>28.6 </td>
180
+ <td>23.2</td>
181
+ </tr>
182
+ </table>
183
+
184
+
185
+
186
+
187
+ ## Requirements
188
+ The code of Qwen2-VL has been in the latest Hugging face transformers and we advise you to build from source with command `pip install git+https://github.com/huggingface/transformers`, or you might encounter the following error:
189
+ ```
190
+ KeyError: 'qwen2_vl'
191
+ ```
192
+
193
+ ## Quickstart
194
+ We offer a toolkit to help you handle various types of visual input more conveniently. This includes base64, URLs, and interleaved images and videos. You can install it using the following command:
195
+
196
+ ```bash
197
+ pip install qwen-vl-utils
198
+ ```
199
+
200
+ Here we show a code snippet to show you how to use the chat model with `transformers` and `qwen_vl_utils`:
201
+
202
+ ```python
203
+ from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
204
+ from qwen_vl_utils import process_vision_info
205
+
206
+ # default: Load the model on the available device(s)
207
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
208
+ "Qwen/Qwen2-VL-72B-Instruct", torch_dtype="auto", device_map="auto"
209
+ )
210
+
211
+ # We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
212
+ # model = Qwen2VLForConditionalGeneration.from_pretrained(
213
+ # "Qwen/Qwen2-VL-72B-Instruct",
214
+ # torch_dtype=torch.bfloat16,
215
+ # attn_implementation="flash_attention_2",
216
+ # device_map="auto",
217
+ # )
218
+
219
+ # default processer
220
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-72B-Instruct")
221
+
222
+ # The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage.
223
+ # min_pixels = 256*28*28
224
+ # max_pixels = 1280*28*28
225
+ # processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-72B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels)
226
+
227
+ messages = [
228
+ {
229
+ "role": "user",
230
+ "content": [
231
+ {
232
+ "type": "image",
233
+ "image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
234
+ },
235
+ {"type": "text", "text": "Describe this image."},
236
+ ],
237
+ }
238
+ ]
239
+
240
+ # Preparation for inference
241
+ text = processor.apply_chat_template(
242
+ messages, tokenize=False, add_generation_prompt=True
243
+ )
244
+ image_inputs, video_inputs = process_vision_info(messages)
245
+ inputs = processor(
246
+ text=[text],
247
+ images=image_inputs,
248
+ videos=video_inputs,
249
+ padding=True,
250
+ return_tensors="pt",
251
+ )
252
+ inputs = inputs.to("cuda")
253
+
254
+ # Inference: Generation of the output
255
+ generated_ids = model.generate(**inputs, max_new_tokens=128)
256
+ generated_ids_trimmed = [
257
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
258
+ ]
259
+ output_text = processor.batch_decode(
260
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
261
+ )
262
+ print(output_text)
263
+ ```
264
+ <details>
265
+ <summary>Without qwen_vl_utils</summary>
266
+
267
+ ```python
268
+ from PIL import Image
269
+ import requests
270
+ import torch
271
+ from torchvision import io
272
+ from typing import Dict
273
+ from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
274
+
275
+ # Load the model in half-precision on the available device(s)
276
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
277
+ "Qwen/Qwen2-VL-72B-Instruct", torch_dtype="auto", device_map="auto"
278
+ )
279
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-72B-Instruct")
280
+
281
+ # Image
282
+ url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg"
283
+ image = Image.open(requests.get(url, stream=True).raw)
284
+
285
+ conversation = [
286
+ {
287
+ "role": "user",
288
+ "content": [
289
+ {
290
+ "type": "image",
291
+ },
292
+ {"type": "text", "text": "Describe this image."},
293
+ ],
294
+ }
295
+ ]
296
+
297
+
298
+ # Preprocess the inputs
299
+ text_prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
300
+ # Excepted output: '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>Describe this image.<|im_end|>\n<|im_start|>assistant\n'
301
+
302
+ inputs = processor(
303
+ text=[text_prompt], images=[image], padding=True, return_tensors="pt"
304
+ )
305
+ inputs = inputs.to("cuda")
306
+
307
+ # Inference: Generation of the output
308
+ output_ids = model.generate(**inputs, max_new_tokens=128)
309
+ generated_ids = [
310
+ output_ids[len(input_ids) :]
311
+ for input_ids, output_ids in zip(inputs.input_ids, output_ids)
312
+ ]
313
+ output_text = processor.batch_decode(
314
+ generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
315
+ )
316
+ print(output_text)
317
+ ```
318
+ </details>
319
+ <details>
320
+ <summary>Multi image inference</summary>
321
+
322
+ ```python
323
+ # Messages containing multiple images and a text query
324
+ messages = [
325
+ {
326
+ "role": "user",
327
+ "content": [
328
+ {"type": "image", "image": "file:///path/to/image1.jpg"},
329
+ {"type": "image", "image": "file:///path/to/image2.jpg"},
330
+ {"type": "text", "text": "Identify the similarities between these images."},
331
+ ],
332
+ }
333
+ ]
334
+
335
+ # Preparation for inference
336
+ text = processor.apply_chat_template(
337
+ messages, tokenize=False, add_generation_prompt=True
338
+ )
339
+ image_inputs, video_inputs = process_vision_info(messages)
340
+ inputs = processor(
341
+ text=[text],
342
+ images=image_inputs,
343
+ videos=video_inputs,
344
+ padding=True,
345
+ return_tensors="pt",
346
+ )
347
+ inputs = inputs.to("cuda")
348
+
349
+ # Inference
350
+ generated_ids = model.generate(**inputs, max_new_tokens=128)
351
+ generated_ids_trimmed = [
352
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
353
+ ]
354
+ output_text = processor.batch_decode(
355
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
356
+ )
357
+ print(output_text)
358
+ ```
359
+ </details>
360
+
361
+ <details>
362
+ <summary>Video inference</summary>
363
+
364
+ ```python
365
+ # Messages containing a images list as a video and a text query
366
+ messages = [
367
+ {
368
+ "role": "user",
369
+ "content": [
370
+ {
371
+ "type": "video",
372
+ "video": [
373
+ "file:///path/to/frame1.jpg",
374
+ "file:///path/to/frame2.jpg",
375
+ "file:///path/to/frame3.jpg",
376
+ "file:///path/to/frame4.jpg",
377
+ ],
378
+ "fps": 1.0,
379
+ },
380
+ {"type": "text", "text": "Describe this video."},
381
+ ],
382
+ }
383
+ ]
384
+ # Messages containing a video and a text query
385
+ messages = [
386
+ {
387
+ "role": "user",
388
+ "content": [
389
+ {
390
+ "type": "video",
391
+ "video": "file:///path/to/video1.mp4",
392
+ "max_pixels": 360 * 420,
393
+ "fps": 1.0,
394
+ },
395
+ {"type": "text", "text": "Describe this video."},
396
+ ],
397
+ }
398
+ ]
399
+
400
+ # Preparation for inference
401
+ text = processor.apply_chat_template(
402
+ messages, tokenize=False, add_generation_prompt=True
403
+ )
404
+ image_inputs, video_inputs = process_vision_info(messages)
405
+ inputs = processor(
406
+ text=[text],
407
+ images=image_inputs,
408
+ videos=video_inputs,
409
+ padding=True,
410
+ return_tensors="pt",
411
+ )
412
+ inputs = inputs.to("cuda")
413
+
414
+ # Inference
415
+ generated_ids = model.generate(**inputs, max_new_tokens=128)
416
+ generated_ids_trimmed = [
417
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
418
+ ]
419
+ output_text = processor.batch_decode(
420
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
421
+ )
422
+ print(output_text)
423
+ ```
424
+ </details>
425
+
426
+ <details>
427
+ <summary>Batch inference</summary>
428
+
429
+ ```python
430
+ # Sample messages for batch inference
431
+ messages1 = [
432
+ {
433
+ "role": "user",
434
+ "content": [
435
+ {"type": "image", "image": "file:///path/to/image1.jpg"},
436
+ {"type": "image", "image": "file:///path/to/image2.jpg"},
437
+ {"type": "text", "text": "What are the common elements in these pictures?"},
438
+ ],
439
+ }
440
+ ]
441
+ messages2 = [
442
+ {"role": "system", "content": "You are a helpful assistant."},
443
+ {"role": "user", "content": "Who are you?"},
444
+ ]
445
+ # Combine messages for batch processing
446
+ messages = [messages1, messages1]
447
+
448
+ # Preparation for batch inference
449
+ texts = [
450
+ processor.apply_chat_template(msg, tokenize=False, add_generation_prompt=True)
451
+ for msg in messages
452
+ ]
453
+ image_inputs, video_inputs = process_vision_info(messages)
454
+ inputs = processor(
455
+ text=texts,
456
+ images=image_inputs,
457
+ videos=video_inputs,
458
+ padding=True,
459
+ return_tensors="pt",
460
+ )
461
+ inputs = inputs.to("cuda")
462
+
463
+ # Batch Inference
464
+ generated_ids = model.generate(**inputs, max_new_tokens=128)
465
+ generated_ids_trimmed = [
466
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
467
+ ]
468
+ output_texts = processor.batch_decode(
469
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
470
+ )
471
+ print(output_texts)
472
+ ```
473
+ </details>
474
+
475
+ ### More Usage Tips
476
+
477
+ For input images, we support local files, base64, and URLs. For videos, we currently only support local files.
478
+
479
+ ```python
480
+ # You can directly insert a local file path, a URL, or a base64-encoded image into the position where you want in the text.
481
+ ## Local file path
482
+ messages = [
483
+ {
484
+ "role": "user",
485
+ "content": [
486
+ {"type": "image", "image": "file:///path/to/your/image.jpg"},
487
+ {"type": "text", "text": "Describe this image."},
488
+ ],
489
+ }
490
+ ]
491
+ ## Image URL
492
+ messages = [
493
+ {
494
+ "role": "user",
495
+ "content": [
496
+ {"type": "image", "image": "http://path/to/your/image.jpg"},
497
+ {"type": "text", "text": "Describe this image."},
498
+ ],
499
+ }
500
+ ]
501
+ ## Base64 encoded image
502
+ messages = [
503
+ {
504
+ "role": "user",
505
+ "content": [
506
+ {"type": "image", "image": "data:image;base64,/9j/..."},
507
+ {"type": "text", "text": "Describe this image."},
508
+ ],
509
+ }
510
+ ]
511
+ ```
512
+ #### Image Resolution for performance boost
513
+
514
+ The model supports a wide range of resolution inputs. By default, it uses the native resolution for input, but higher resolutions can enhance performance at the cost of more computation. Users can set the minimum and maximum number of pixels to achieve an optimal configuration for their needs, such as a token count range of 256-1280, to balance speed and memory usage.
515
+
516
+ ```python
517
+ min_pixels = 256 * 28 * 28
518
+ max_pixels = 1280 * 28 * 28
519
+ processor = AutoProcessor.from_pretrained(
520
+ "Qwen/Qwen2-VL-72B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels
521
+ )
522
+ ```
523
+
524
+ Besides, We provide two methods for fine-grained control over the image size input to the model:
525
+
526
+ 1. Define min_pixels and max_pixels: Images will be resized to maintain their aspect ratio within the range of min_pixels and max_pixels.
527
+
528
+ 2. Specify exact dimensions: Directly set `resized_height` and `resized_width`. These values will be rounded to the nearest multiple of 28.
529
+
530
+ ```python
531
+ # min_pixels and max_pixels
532
+ messages = [
533
+ {
534
+ "role": "user",
535
+ "content": [
536
+ {
537
+ "type": "image",
538
+ "image": "file:///path/to/your/image.jpg",
539
+ "resized_height": 280,
540
+ "resized_width": 420,
541
+ },
542
+ {"type": "text", "text": "Describe this image."},
543
+ ],
544
+ }
545
+ ]
546
+ # resized_height and resized_width
547
+ messages = [
548
+ {
549
+ "role": "user",
550
+ "content": [
551
+ {
552
+ "type": "image",
553
+ "image": "file:///path/to/your/image.jpg",
554
+ "min_pixels": 50176,
555
+ "max_pixels": 50176,
556
+ },
557
+ {"type": "text", "text": "Describe this image."},
558
+ ],
559
+ }
560
+ ]
561
+ ```
562
+
563
+ ## Limitations
564
+
565
+ While Qwen2-VL are applicable to a wide range of visual tasks, it is equally important to understand its limitations. Here are some known restrictions:
566
+
567
+ 1. Lack of Audio Support: The current model does **not comprehend audio information** within videos.
568
+ 2. Data timeliness: Our image dataset is **updated until June 2023**, and information subsequent to this date may not be covered.
569
+ 3. Constraints in Individuals and Intellectual Property (IP): The model's capacity to recognize specific individuals or IPs is limited, potentially failing to comprehensively cover all well-known personalities or brands.
570
+ 4. Limited Capacity for Complex Instruction: When faced with intricate multi-step instructions, the model's understanding and execution capabilities require enhancement.
571
+ 5. Insufficient Counting Accuracy: Particularly in complex scenes, the accuracy of object counting is not high, necessitating further improvements.
572
+ 6. Weak Spatial Reasoning Skills: Especially in 3D spaces, the model's inference of object positional relationships is inadequate, making it difficult to precisely judge the relative positions of objects.
573
+
574
+ These limitations serve as ongoing directions for model optimization and improvement, and we are committed to continually enhancing the model's performance and scope of application.
575
+
576
+
577
+ ## Citation
578
+
579
+ If you find our work helpful, feel free to give us a cite.
580
+
581
+ ```
582
+ @article{Qwen2VL,
583
+ title={Qwen2-VL: Enhancing Vision-Language Model's Perception of the World at Any Resolution},
584
+ author={Wang, Peng and Bai, Shuai and Tan, Sinan and Wang, Shijie and Fan, Zhihao and Bai, Jinze and Chen, Keqin and Liu, Xuejing and Wang, Jialin and Ge, Wenbin and Fan, Yang and Dang, Kai and Du, Mengfei and Ren, Xuancheng and Men, Rui and Liu, Dayiheng and Zhou, Chang and Zhou, Jingren and Lin, Junyang},
585
+ journal={arXiv preprint arXiv:2409.12191},
586
+ year={2024}
587
+ }
588
+
589
+ @article{Qwen-VL,
590
+ title={Qwen-VL: A Versatile Vision-Language Model for Understanding, Localization, Text Reading, and Beyond},
591
+ author={Bai, Jinze and Bai, Shuai and Yang, Shusheng and Wang, Shijie and Tan, Sinan and Wang, Peng and Lin, Junyang and Zhou, Chang and Zhou, Jingren},
592
+ journal={arXiv preprint arXiv:2308.12966},
593
+ year={2023}
594
+ }
595
+ ```
added_tokens.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|box_end|>": 151649,
3
+ "<|box_start|>": 151648,
4
+ "<|endoftext|>": 151643,
5
+ "<|im_end|>": 151645,
6
+ "<|im_start|>": 151644,
7
+ "<|image_pad|>": 151655,
8
+ "<|object_ref_end|>": 151647,
9
+ "<|object_ref_start|>": 151646,
10
+ "<|quad_end|>": 151651,
11
+ "<|quad_start|>": 151650,
12
+ "<|video_pad|>": 151656,
13
+ "<|vision_end|>": 151653,
14
+ "<|vision_pad|>": 151654,
15
+ "<|vision_start|>": 151652
16
+ }
chat_template.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}"
3
+ }
config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Qwen/Qwen2-VL-72B-Instruct",
3
+ "architectures": [
4
+ "Qwen2VLForConditionalGeneration"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151645,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 8192,
11
+ "image_token_id": 151655,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 29568,
14
+ "max_position_embeddings": 32768,
15
+ "max_window_layers": 80,
16
+ "model_type": "qwen2_vl",
17
+ "num_attention_heads": 64,
18
+ "num_hidden_layers": 80,
19
+ "num_key_value_heads": 8,
20
+ "rms_norm_eps": 1e-06,
21
+ "rope_scaling": {
22
+ "mrope_section": [
23
+ 16,
24
+ 24,
25
+ 24
26
+ ],
27
+ "rope_type": "default",
28
+ "type": "default"
29
+ },
30
+ "rope_theta": 1000000.0,
31
+ "sliding_window": 32768,
32
+ "tie_word_embeddings": false,
33
+ "torch_dtype": "float16",
34
+ "transformers_version": "4.47.1",
35
+ "use_cache": true,
36
+ "use_sliding_window": false,
37
+ "video_token_id": 151656,
38
+ "vision_config": {
39
+ "hidden_size": 8192,
40
+ "in_chans": 3,
41
+ "model_type": "qwen2_vl",
42
+ "spatial_patch_size": 14
43
+ },
44
+ "vision_end_token_id": 151653,
45
+ "vision_start_token_id": 151652,
46
+ "vision_token_id": 151654,
47
+ "vocab_size": 152064
48
+ }
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.05,
10
+ "top_k": 1,
11
+ "top_p": 0.001,
12
+ "transformers_version": "4.47.1"
13
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:173b637650564c655509a04d0c27940468f485a49c9d01aa1b2a419cc31927f8
3
+ size 4676624160
model-00002-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc6e398991ba600b2aa0d92dc85f36054175a85e05ee8f58d5a5b7051e7a5296
3
+ size 4781670288
model-00003-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ee1e0fa89b73f7410c163211cf0d70814b433ca61cba47aba35b16ac0a7d244
3
+ size 4964101360
model-00004-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d347657c8c0b9fbde2bdd2df3cc085296df2640f0329c003dcd8a90a050a7936
3
+ size 4781637296
model-00005-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ef06577545049565daa42f5fc8df8f84fea37cbee3798b94ff5a177d015df0a
3
+ size 4781670312
model-00006-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acdcc7a31b004d6aeca33098f97c6ecc82861f2c50d66b205cadef39a6e80fa8
3
+ size 4781670320
model-00007-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e85fa83b39a8174d56d66f45f0e52eb38be008adef4d1f4441cd69b5bcdf7987
3
+ size 4964101384
model-00008-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:422da8a0f2c4fa2b822223ae95efcdad344639ccf2f4ef6519cff4e5b284abe9
3
+ size 4781637328
model-00009-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06c965df9592a9e0cc3f78133352c746c0ba1feaa64ea6d1fd3a1bf17d0f4117
3
+ size 4781670320
model-00010-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:482a038ee1f7d24e7404953ffbdd7461599297ed3e4efdb98bc84d44a3edb373
3
+ size 4781670320
model-00011-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c0c6fd07adaa854018e0682be875838263555e686db9b934da57096bc26b9ae
3
+ size 4964101384
model-00012-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ebb27d25866b591c7e6bb2f9051ac5a40e56a0380bd201962809de5bd966965
3
+ size 4781637328
model-00013-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28bc2ce26546777d3e5e26b5d7f8d1a49a5423308c1005eb0bed563f6b29e22b
3
+ size 4781670320
model-00014-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30306bb9518a1507e3a9de1a9ad7714a19c971731900c25565d6673cfd945f81
3
+ size 4781670320
model-00015-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1ad570726097c1109a9d66443d2fdf1a3c9761cbc00c36b5725760d0cacadb6
3
+ size 4964101384
model-00016-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1902b24ccd25a7d50794a634e3d95fec1ab48c5279a2e273ac5400d3b5c5e9c7
3
+ size 4781637328
model-00017-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c06cb18bb9b03de98db70d9690d6c8b3cfe5a07f53449cbd6c08851b1eb9b31f
3
+ size 4781670320
model-00018-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4b43bfd4292d30877642f53961501dfff9c2dbdc1668620f70df30b2fb3d601
3
+ size 4781670320
model-00019-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3687881e4d51b81570052e2832abf477386b13c3ac72f66ddcafff55135c9ee
3
+ size 4964101384
model-00020-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5180ab2ad20d9772f0cf2b417abb079795c35d77e19cd4d03b917ed7e5cf1beb
3
+ size 4781637328
model-00021-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c84e78da453cd82fbe4a5419d0d9c1a2a8f83edf60811c06631ceb19f25e360
3
+ size 4781670320
model-00022-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7823236b8ca19853e12f1d7140bd608e62173d3e6719975f070790656cb733a3
3
+ size 4781670320
model-00023-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fb83704ee504f75c6d8222533746dec3714c6a236d8aa8a9cf367db296b4bae
3
+ size 4964101384
model-00024-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0a681fd9876bdb9c5aed3dd74c51a6a7294821b836944edbc04e059888d05dc
3
+ size 4781637328
model-00025-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ce15c1fa9e2bc4c819ad61ebc485772ce4d504d34e67d96772d1ea59899d883
3
+ size 4781670320
model-00026-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18ae48f790b3d396c568c6739d540c601b18e9ab39681153cb4c54782d58c12c
3
+ size 4781670320
model-00027-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36e7e78dd9aed2394d48bcc52e7261e1131c8a9222832611a9507e44a2f414b9
3
+ size 4964101384
model-00028-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df42ecd5ff7346da355c1b3d7039f00e0f71a4ede2e7c89a672463cc04a5b576
3
+ size 4781637328
model-00029-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1453fd35bb3495b4cfd990f13dd149d3fe26b60eb89b34904f902f4718bd20a8
3
+ size 4781670320
model-00030-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65ab9fa8fa7488c9463205736ff9769cd751fe529d7d115a1fc62bb7f55df023
3
+ size 4479675632
model-00031-of-00031.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58d90147fcf5bcc7691d9f1f01993b879f4c70d758272d3ce41f9106e3a5fa05
3
+ size 2491416704
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": true,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.48145466,
8
+ 0.4578275,
9
+ 0.40821073
10
+ ],
11
+ "image_processor_type": "Qwen2VLImageProcessor",
12
+ "image_std": [
13
+ 0.26862954,
14
+ 0.26130258,
15
+ 0.27577711
16
+ ],
17
+ "max_pixels": 1806336,
18
+ "merge_size": 2,
19
+ "min_pixels": 3136,
20
+ "patch_size": 14,
21
+ "processor_class": "Qwen2VLProcessor",
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "max_pixels": 12845056,
26
+ "min_pixels": 3136
27
+ },
28
+ "temporal_patch_size": 2
29
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:091aa7594dc2fcfbfa06b9e3c22a5f0562ac14f30375c13af7309407a0e67b8a
3
+ size 11420371
tokenizer_config.json ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "151646": {
29
+ "content": "<|object_ref_start|>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "151647": {
37
+ "content": "<|object_ref_end|>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "151648": {
45
+ "content": "<|box_start|>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "151649": {
53
+ "content": "<|box_end|>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "151650": {
61
+ "content": "<|quad_start|>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "151651": {
69
+ "content": "<|quad_end|>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "151652": {
77
+ "content": "<|vision_start|>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "151653": {
85
+ "content": "<|vision_end|>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "151654": {
93
+ "content": "<|vision_pad|>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "151655": {
101
+ "content": "<|image_pad|>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "151656": {
109
+ "content": "<|video_pad|>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ }
116
+ },
117
+ "additional_special_tokens": [
118
+ "<|im_start|>",
119
+ "<|im_end|>",
120
+ "<|object_ref_start|>",
121
+ "<|object_ref_end|>",
122
+ "<|box_start|>",
123
+ "<|box_end|>",
124
+ "<|quad_start|>",
125
+ "<|quad_end|>",
126
+ "<|vision_start|>",
127
+ "<|vision_end|>",
128
+ "<|vision_pad|>",
129
+ "<|image_pad|>",
130
+ "<|video_pad|>"
131
+ ],
132
+ "bos_token": null,
133
+ "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}",
134
+ "clean_up_tokenization_spaces": false,
135
+ "eos_token": "<|im_end|>",
136
+ "errors": "replace",
137
+ "extra_special_tokens": {},
138
+ "max_pixels": 1806336,
139
+ "model_max_length": 32768,
140
+ "pad_token": "<|endoftext|>",
141
+ "padding_side": "left",
142
+ "processor_class": "Qwen2VLProcessor",
143
+ "split_special_tokens": false,
144
+ "tokenizer_class": "Qwen2Tokenizer",
145
+ "unk_token": null
146
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff