AdithyaSK commited on
Commit
3bfff56
·
verified ·
1 Parent(s): f3459f1

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: image-text-to-text
3
+ library_name: transformers
4
+ language:
5
+ - multilingual
6
+ tags:
7
+ - got
8
+ - vision-language
9
+ - ocr2.0
10
+ - custom_code
11
+ license: apache-2.0
12
+ ---
13
+
14
+ <h1>General OCR Theory: Towards OCR-2.0 via a Unified End-to-end Model
15
+ </h1>
16
+
17
+ [🔋Online Demo](https://huggingface.co/spaces/ucaslcl/GOT_online) | [🌟GitHub](https://github.com/Ucas-HaoranWei/GOT-OCR2.0/) | [📜Paper](https://arxiv.org/abs/2409.01704)</a>
18
+
19
+
20
+ [Haoran Wei*](https://scholar.google.com/citations?user=J4naK0MAAAAJ&hl=en), Chenglong Liu*, Jinyue Chen, Jia Wang, Lingyu Kong, Yanming Xu, [Zheng Ge](https://joker316701882.github.io/), Liang Zhao, [Jianjian Sun](https://scholar.google.com/citations?user=MVZrGkYAAAAJ&hl=en), [Yuang Peng](https://scholar.google.com.hk/citations?user=J0ko04IAAAAJ&hl=zh-CN&oi=ao), Chunrui Han, [Xiangyu Zhang](https://scholar.google.com/citations?user=yuB-cfoAAAAJ&hl=en)
21
+
22
+
23
+
24
+ ![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/6653eee7a2d7a882a805ab95/QCEFY-M_YG3Bp5fn1GQ8X.jpeg)
25
+
26
+
27
+
28
+ ## Usage
29
+ Inference using Huggingface transformers on NVIDIA GPUs. Requirements tested on python 3.10:
30
+ ```
31
+ torch==2.0.1
32
+ torchvision==0.15.2
33
+ transformers==4.37.2
34
+ tiktoken==0.6.0
35
+ verovio==4.3.1
36
+ accelerate==0.28.0
37
+ ```
38
+
39
+
40
+ ```python
41
+ from transformers import AutoModel, AutoTokenizer
42
+
43
+ tokenizer = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True)
44
+ model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, low_cpu_mem_usage=True, device_map='cuda', use_safetensors=True, pad_token_id=tokenizer.eos_token_id)
45
+ model = model.eval().cuda()
46
+
47
+
48
+ # input your test image
49
+ image_file = 'xxx.jpg'
50
+
51
+ # plain texts OCR
52
+ res = model.chat(tokenizer, image_file, ocr_type='ocr')
53
+
54
+ # format texts OCR:
55
+ # res = model.chat(tokenizer, image_file, ocr_type='format')
56
+
57
+ # fine-grained OCR:
58
+ # res = model.chat(tokenizer, image_file, ocr_type='ocr', ocr_box='')
59
+ # res = model.chat(tokenizer, image_file, ocr_type='format', ocr_box='')
60
+ # res = model.chat(tokenizer, image_file, ocr_type='ocr', ocr_color='')
61
+ # res = model.chat(tokenizer, image_file, ocr_type='format', ocr_color='')
62
+
63
+ # multi-crop OCR:
64
+ # res = model.chat_crop(tokenizer, image_file, ocr_type='ocr')
65
+ # res = model.chat_crop(tokenizer, image_file, ocr_type='format')
66
+
67
+ # render the formatted OCR results:
68
+ # res = model.chat(tokenizer, image_file, ocr_type='format', render=True, save_render_file = './demo.html')
69
+
70
+ print(res)
71
+
72
+
73
+ ```
74
+ More details about 'ocr_type', 'ocr_box', 'ocr_color', and 'render' can be found at our GitHub.
75
+ Our training codes are available at our [GitHub](https://github.com/Ucas-HaoranWei/GOT-OCR2.0/).
76
+
77
+
78
+
79
+ ## More Multimodal Projects
80
+
81
+ 👏 Welcome to explore more multimodal projects of our team:
82
+
83
+ [Vary](https://github.com/Ucas-HaoranWei/Vary) | [Fox](https://github.com/ucaslcl/Fox) | [OneChart](https://github.com/LingyvKong/OneChart)
84
+
85
+ ## Citation
86
+
87
+ If you find our work helpful, please consider citing our papers 📝 and liking this project ❤️!
88
+
89
+ ```bib
90
+ @article{wei2024general,
91
+ title={General OCR Theory: Towards OCR-2.0 via a Unified End-to-end Model},
92
+ author={Wei, Haoran and Liu, Chenglong and Chen, Jinyue and Wang, Jia and Kong, Lingyu and Xu, Yanming and Ge, Zheng and Zhao, Liang and Sun, Jianjian and Peng, Yuang and others},
93
+ journal={arXiv preprint arXiv:2409.01704},
94
+ year={2024}
95
+ }
96
+ @article{liu2024focus,
97
+ title={Focus Anywhere for Fine-grained Multi-page Document Understanding},
98
+ author={Liu, Chenglong and Wei, Haoran and Chen, Jinyue and Kong, Lingyu and Ge, Zheng and Zhu, Zining and Zhao, Liang and Sun, Jianjian and Han, Chunrui and Zhang, Xiangyu},
99
+ journal={arXiv preprint arXiv:2405.14295},
100
+ year={2024}
101
+ }
102
+ @article{wei2023vary,
103
+ title={Vary: Scaling up the Vision Vocabulary for Large Vision-Language Models},
104
+ author={Wei, Haoran and Kong, Lingyu and Chen, Jinyue and Zhao, Liang and Ge, Zheng and Yang, Jinrong and Sun, Jianjian and Han, Chunrui and Zhang, Xiangyu},
105
+ journal={arXiv preprint arXiv:2312.06109},
106
+ year={2023}
107
+ }
108
+ ```
assets/got_logo.png ADDED
assets/got_support.jpg ADDED
assets/train_sample.jpg ADDED
config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ucaslcl/GOT-OCR2_0",
3
+ "architectures": [
4
+ "GOTQwenForCausalLM"
5
+ ],
6
+ "auto_map": {
7
+ "AutoConfig": "modeling_GOT.GOTConfig",
8
+ "AutoModel": "modeling_GOT.GOTQwenForCausalLM"
9
+ },
10
+ "attention_dropout": 0.0,
11
+ "bos_token_id": 151643,
12
+ "eos_token_id": 151643,
13
+ "freeze_vision_tower": false,
14
+ "hidden_act": "silu",
15
+ "hidden_size": 1024,
16
+ "im_end_token": 151858,
17
+ "im_patch_token": 151859,
18
+ "im_start_token": 151857,
19
+ "image_token_len": 256,
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 2816,
22
+ "max_position_embeddings": 32768,
23
+ "max_window_layers": 21,
24
+ "model_type": "GOT",
25
+ "num_attention_heads": 16,
26
+ "num_hidden_layers": 24,
27
+ "num_key_value_heads": 16,
28
+ "rms_norm_eps": 1e-06,
29
+ "rope_theta": 1000000.0,
30
+ "sliding_window": 32768,
31
+ "tie_word_embeddings": true,
32
+ "torch_dtype": "bfloat16",
33
+ "transformers_version": "4.37.2",
34
+ "use_cache": true,
35
+ "use_im_start_end": true,
36
+ "use_sliding_window": false,
37
+ "vocab_size": 151860
38
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "eos_token_id": 151643,
4
+ "max_new_tokens": 2048,
5
+ "transformers_version": "4.37.2"
6
+ }
got_vision_b.py ADDED
@@ -0,0 +1,468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ from typing import Optional, Tuple, Type
4
+ from functools import partial
5
+ import torch.nn as nn
6
+ from typing import Type
7
+
8
+
9
+
10
+ class MLPBlock(nn.Module):
11
+ def __init__(
12
+ self,
13
+ embedding_dim: int,
14
+ mlp_dim: int,
15
+ act: Type[nn.Module] = nn.GELU,
16
+ ) -> None:
17
+ super().__init__()
18
+ self.lin1 = nn.Linear(embedding_dim, mlp_dim)
19
+ self.lin2 = nn.Linear(mlp_dim, embedding_dim)
20
+ self.act = act()
21
+
22
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
23
+ return self.lin2(self.act(self.lin1(x)))
24
+
25
+
26
+
27
+ class LayerNorm2d(nn.Module):
28
+ def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
29
+ super().__init__()
30
+ self.weight = nn.Parameter(torch.ones(num_channels))
31
+ self.bias = nn.Parameter(torch.zeros(num_channels))
32
+ self.eps = eps
33
+
34
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
35
+ u = x.mean(1, keepdim=True)
36
+ s = (x - u).pow(2).mean(1, keepdim=True)
37
+ x = (x - u) / torch.sqrt(s + self.eps)
38
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
39
+ return x
40
+
41
+
42
+
43
+ class ImageEncoderViT(nn.Module):
44
+ def __init__(
45
+ self,
46
+ img_size: int = 1024,
47
+ patch_size: int = 16,
48
+ in_chans: int = 3,
49
+ embed_dim: int = 768,
50
+ depth: int = 12,
51
+ num_heads: int = 12,
52
+ mlp_ratio: float = 4.0,
53
+ out_chans: int = 256,
54
+ qkv_bias: bool = True,
55
+ norm_layer: Type[nn.Module] = nn.LayerNorm,
56
+ act_layer: Type[nn.Module] = nn.GELU,
57
+ use_abs_pos: bool = True,
58
+ use_rel_pos: bool = False,
59
+ rel_pos_zero_init: bool = True,
60
+ window_size: int = 0,
61
+ global_attn_indexes: Tuple[int, ...] = (),
62
+ ) -> None:
63
+ """
64
+ Args:
65
+ img_size (int): Input image size.
66
+ patch_size (int): Patch size.
67
+ in_chans (int): Number of input image channels.
68
+ embed_dim (int): Patch embedding dimension.
69
+ depth (int): Depth of ViT.
70
+ num_heads (int): Number of attention heads in each ViT block.
71
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
72
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
73
+ norm_layer (nn.Module): Normalization layer.
74
+ act_layer (nn.Module): Activation layer.
75
+ use_abs_pos (bool): If True, use absolute positional embeddings.
76
+ use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
77
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
78
+ window_size (int): Window size for window attention blocks.
79
+ global_attn_indexes (list): Indexes for blocks using global attention.
80
+ """
81
+ super().__init__()
82
+ self.img_size = img_size
83
+
84
+ self.patch_embed = PatchEmbed(
85
+ kernel_size=(patch_size, patch_size),
86
+ stride=(patch_size, patch_size),
87
+ in_chans=in_chans,
88
+ embed_dim=embed_dim,
89
+ )
90
+
91
+ self.pos_embed: Optional[nn.Parameter] = None
92
+ if use_abs_pos:
93
+ # Initialize absolute positional embedding with pretrain image size.
94
+ self.pos_embed = nn.Parameter(
95
+ torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)
96
+ )
97
+
98
+ self.blocks = nn.ModuleList()
99
+ for i in range(depth):
100
+ block = Block(
101
+ dim=embed_dim,
102
+ num_heads=num_heads,
103
+ mlp_ratio=mlp_ratio,
104
+ qkv_bias=qkv_bias,
105
+ norm_layer=norm_layer,
106
+ act_layer=act_layer,
107
+ use_rel_pos=use_rel_pos,
108
+ rel_pos_zero_init=rel_pos_zero_init,
109
+ window_size=window_size if i not in global_attn_indexes else 0,
110
+ input_size=(img_size // patch_size, img_size // patch_size),
111
+ )
112
+ self.blocks.append(block)
113
+
114
+ self.neck = nn.Sequential(
115
+ nn.Conv2d(
116
+ embed_dim,
117
+ out_chans,
118
+ kernel_size=1,
119
+ bias=False,
120
+ ),
121
+ LayerNorm2d(out_chans),
122
+ nn.Conv2d(
123
+ out_chans,
124
+ out_chans,
125
+ kernel_size=3,
126
+ padding=1,
127
+ bias=False,
128
+ ),
129
+ LayerNorm2d(out_chans),
130
+ )
131
+
132
+
133
+ self.net_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1, bias=False)
134
+ self.net_3 = nn.Conv2d(512, 1024, kernel_size=3, stride=2, padding=1, bias=False)
135
+
136
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
137
+ x = self.patch_embed(x)
138
+ if self.pos_embed is not None:
139
+ x = x + self.pos_embed
140
+
141
+ for blk in self.blocks:
142
+ x = blk(x)
143
+
144
+ x = self.neck(x.permute(0, 3, 1, 2))
145
+ x = self.net_2(x)
146
+ x = self.net_3(x)
147
+
148
+
149
+ return x
150
+
151
+
152
+ class Block(nn.Module):
153
+ """Transformer blocks with support of window attention and residual propagation blocks"""
154
+
155
+ def __init__(
156
+ self,
157
+ dim: int,
158
+ num_heads: int,
159
+ mlp_ratio: float = 4.0,
160
+ qkv_bias: bool = True,
161
+ norm_layer: Type[nn.Module] = nn.LayerNorm,
162
+ act_layer: Type[nn.Module] = nn.GELU,
163
+ use_rel_pos: bool = False,
164
+ rel_pos_zero_init: bool = True,
165
+ window_size: int = 0,
166
+ input_size: Optional[Tuple[int, int]] = None,
167
+ ) -> None:
168
+ """
169
+ Args:
170
+ dim (int): Number of input channels.
171
+ num_heads (int): Number of attention heads in each ViT block.
172
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
173
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
174
+ norm_layer (nn.Module): Normalization layer.
175
+ act_layer (nn.Module): Activation layer.
176
+ use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
177
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
178
+ window_size (int): Window size for window attention blocks. If it equals 0, then
179
+ use global attention.
180
+ input_size (tuple(int, int) or None): Input resolution for calculating the relative
181
+ positional parameter size.
182
+ """
183
+ super().__init__()
184
+ self.norm1 = norm_layer(dim)
185
+ self.attn = Attention(
186
+ dim,
187
+ num_heads=num_heads,
188
+ qkv_bias=qkv_bias,
189
+ use_rel_pos=use_rel_pos,
190
+ rel_pos_zero_init=rel_pos_zero_init,
191
+ input_size=input_size if window_size == 0 else (window_size, window_size),
192
+ )
193
+
194
+ self.norm2 = norm_layer(dim)
195
+ self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer)
196
+
197
+ self.window_size = window_size
198
+
199
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
200
+ shortcut = x
201
+ x = self.norm1(x)
202
+ # Window partition
203
+ if self.window_size > 0:
204
+ H, W = x.shape[1], x.shape[2]
205
+ x, pad_hw = window_partition(x, self.window_size)
206
+
207
+ x = self.attn(x)
208
+ # Reverse window partition
209
+ if self.window_size > 0:
210
+ x = window_unpartition(x, self.window_size, pad_hw, (H, W))
211
+
212
+ x = shortcut + x
213
+ x = x + self.mlp(self.norm2(x))
214
+
215
+ return x
216
+
217
+
218
+ class Attention(nn.Module):
219
+ """Multi-head Attention block with relative position embeddings."""
220
+
221
+ def __init__(
222
+ self,
223
+ dim: int,
224
+ num_heads: int = 8,
225
+ qkv_bias: bool = True,
226
+ use_rel_pos: bool = False,
227
+ rel_pos_zero_init: bool = True,
228
+ input_size: Optional[Tuple[int, int]] = None,
229
+ ) -> None:
230
+ """
231
+ Args:
232
+ dim (int): Number of input channels.
233
+ num_heads (int): Number of attention heads.
234
+ qkv_bias (bool): If True, add a learnable bias to query, key, value.
235
+ rel_pos (bool): If True, add relative positional embeddings to the attention map.
236
+ rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
237
+ input_size (tuple(int, int) or None): Input resolution for calculating the relative
238
+ positional parameter size.
239
+ """
240
+ super().__init__()
241
+ self.num_heads = num_heads
242
+ head_dim = dim // num_heads
243
+ self.scale = head_dim**-0.5
244
+
245
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
246
+ self.proj = nn.Linear(dim, dim)
247
+
248
+ self.use_rel_pos = use_rel_pos
249
+ if self.use_rel_pos:
250
+ assert (
251
+ input_size is not None
252
+ ), "Input size must be provided if using relative positional encoding."
253
+ # initialize relative positional embeddings
254
+ self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
255
+ self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
256
+
257
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
258
+ B, H, W, _ = x.shape
259
+ # qkv with shape (3, B, nHead, H * W, C)
260
+ qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
261
+ # q, k, v with shape (B * nHead, H * W, C)
262
+ q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
263
+
264
+ attn = (q * self.scale) @ k.transpose(-2, -1)
265
+
266
+ if self.use_rel_pos:
267
+ attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))
268
+
269
+ attn = attn.softmax(dim=-1)
270
+ x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1)
271
+ x = self.proj(x)
272
+
273
+ return x
274
+
275
+
276
+ def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:
277
+ """
278
+ Partition into non-overlapping windows with padding if needed.
279
+ Args:
280
+ x (tensor): input tokens with [B, H, W, C].
281
+ window_size (int): window size.
282
+
283
+ Returns:
284
+ windows: windows after partition with [B * num_windows, window_size, window_size, C].
285
+ (Hp, Wp): padded height and width before partition
286
+ """
287
+ B, H, W, C = x.shape
288
+
289
+ pad_h = (window_size - H % window_size) % window_size
290
+ pad_w = (window_size - W % window_size) % window_size
291
+ if pad_h > 0 or pad_w > 0:
292
+ x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
293
+ Hp, Wp = H + pad_h, W + pad_w
294
+
295
+ x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
296
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
297
+ return windows, (Hp, Wp)
298
+
299
+
300
+ def window_unpartition(
301
+ windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int]
302
+ ) -> torch.Tensor:
303
+ """
304
+ Window unpartition into original sequences and removing padding.
305
+ Args:
306
+ windows (tensor): input tokens with [B * num_windows, window_size, window_size, C].
307
+ window_size (int): window size.
308
+ pad_hw (Tuple): padded height and width (Hp, Wp).
309
+ hw (Tuple): original height and width (H, W) before padding.
310
+
311
+ Returns:
312
+ x: unpartitioned sequences with [B, H, W, C].
313
+ """
314
+ Hp, Wp = pad_hw
315
+ H, W = hw
316
+ B = windows.shape[0] // (Hp * Wp // window_size // window_size)
317
+ x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
318
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
319
+
320
+ if Hp > H or Wp > W:
321
+ x = x[:, :H, :W, :].contiguous()
322
+ return x
323
+
324
+
325
+ def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
326
+ """
327
+ Get relative positional embeddings according to the relative positions of
328
+ query and key sizes.
329
+ Args:
330
+ q_size (int): size of query q.
331
+ k_size (int): size of key k.
332
+ rel_pos (Tensor): relative position embeddings (L, C).
333
+
334
+ Returns:
335
+ Extracted positional embeddings according to relative positions.
336
+ """
337
+ max_rel_dist = int(2 * max(q_size, k_size) - 1)
338
+ # Interpolate rel pos if needed.
339
+ if rel_pos.shape[0] != max_rel_dist:
340
+ # Interpolate rel pos.
341
+ rel_pos_resized = F.interpolate(
342
+ rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
343
+ size=max_rel_dist,
344
+ mode="linear",
345
+ )
346
+ rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
347
+ else:
348
+ rel_pos_resized = rel_pos
349
+
350
+ # Scale the coords with short length if shapes for q and k are different.
351
+ q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
352
+ k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
353
+ relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
354
+
355
+ return rel_pos_resized[relative_coords.long()]
356
+
357
+
358
+ def add_decomposed_rel_pos(
359
+ attn: torch.Tensor,
360
+ q: torch.Tensor,
361
+ rel_pos_h: torch.Tensor,
362
+ rel_pos_w: torch.Tensor,
363
+ q_size: Tuple[int, int],
364
+ k_size: Tuple[int, int],
365
+ ) -> torch.Tensor:
366
+ """
367
+ Args:
368
+ attn (Tensor): attention map.
369
+ q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
370
+ rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
371
+ rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
372
+ q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
373
+ k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
374
+
375
+ Returns:
376
+ attn (Tensor): attention map with added relative positional embeddings.
377
+ """
378
+ q_h, q_w = q_size
379
+ k_h, k_w = k_size
380
+ Rh = get_rel_pos(q_h, k_h, rel_pos_h)
381
+ Rw = get_rel_pos(q_w, k_w, rel_pos_w)
382
+
383
+ B, _, dim = q.shape
384
+ r_q = q.reshape(B, q_h, q_w, dim)
385
+ rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
386
+ rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
387
+
388
+ attn = (
389
+ attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]
390
+ ).view(B, q_h * q_w, k_h * k_w)
391
+
392
+ return attn
393
+
394
+
395
+ class PatchEmbed(nn.Module):
396
+ """
397
+ Image to Patch Embedding.
398
+ """
399
+
400
+ def __init__(
401
+ self,
402
+ kernel_size: Tuple[int, int] = (16, 16),
403
+ stride: Tuple[int, int] = (16, 16),
404
+ padding: Tuple[int, int] = (0, 0),
405
+ in_chans: int = 3,
406
+ embed_dim: int = 768,
407
+ ) -> None:
408
+ """
409
+ Args:
410
+ kernel_size (Tuple): kernel size of the projection layer.
411
+ stride (Tuple): stride of the projection layer.
412
+ padding (Tuple): padding size of the projection layer.
413
+ in_chans (int): Number of input image channels.
414
+ embed_dim (int): Patch embedding dimension.
415
+ """
416
+ super().__init__()
417
+
418
+ self.proj = nn.Conv2d(
419
+ in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
420
+ )
421
+
422
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
423
+ x = self.proj(x)
424
+ # B C H W -> B H W C
425
+ x = x.permute(0, 2, 3, 1)
426
+ return x
427
+
428
+
429
+
430
+ def build_GOT_vit_b(checkpoint=None):
431
+ return _build_GOT_vision(
432
+ encoder_embed_dim=768,
433
+ encoder_depth=12,
434
+ encoder_num_heads=12,
435
+ encoder_global_attn_indexes=[2, 5, 8, 11],
436
+ checkpoint=checkpoint,
437
+ )
438
+
439
+
440
+ def _build_GOT_vision(
441
+ encoder_embed_dim,
442
+ encoder_depth,
443
+ encoder_num_heads,
444
+ encoder_global_attn_indexes,
445
+ checkpoint=None,
446
+ ):
447
+ prompt_embed_dim = 256
448
+ image_size = 1024
449
+ vit_patch_size = 16
450
+ image_embedding_size = image_size // vit_patch_size
451
+ image_encoder=ImageEncoderViT(
452
+ depth=encoder_depth,
453
+ embed_dim=encoder_embed_dim,
454
+ img_size=image_size,
455
+ mlp_ratio=4,
456
+ norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
457
+ num_heads=encoder_num_heads,
458
+ patch_size=vit_patch_size,
459
+ qkv_bias=True,
460
+ use_rel_pos=True,
461
+ global_attn_indexes=encoder_global_attn_indexes,
462
+ window_size=14,
463
+ out_chans=prompt_embed_dim,
464
+ )
465
+
466
+
467
+ return image_encoder
468
+
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77d6144039548b14253176b6eb264896bc39eba532f8894700f210a7fd2a5956
3
+ size 1432121416
modeling_GOT.py ADDED
@@ -0,0 +1,881 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import Qwen2Config, Qwen2Model, Qwen2ForCausalLM, StoppingCriteria, TextStreamer
2
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
3
+ from typing import List, Optional, Tuple, Union
4
+ from transformers.cache_utils import Cache
5
+ import requests
6
+ from PIL import Image
7
+ from io import BytesIO
8
+ import torch
9
+ import torch.nn as nn
10
+ from torch.nn import CrossEntropyLoss
11
+ from .got_vision_b import build_GOT_vit_b
12
+ from torchvision import transforms
13
+ from torchvision.transforms.functional import InterpolationMode
14
+ import dataclasses
15
+ ###
16
+
17
+ DEFAULT_IMAGE_TOKEN = "<image>"
18
+ DEFAULT_IMAGE_PATCH_TOKEN = '<imgpad>'
19
+ DEFAULT_IM_START_TOKEN = '<img>'
20
+ DEFAULT_IM_END_TOKEN = '</img>'
21
+
22
+ from enum import auto, Enum
23
+ class SeparatorStyle(Enum):
24
+ """Different separator style."""
25
+ SINGLE = auto()
26
+ TWO = auto()
27
+ MPT = auto()
28
+
29
+
30
+ @dataclasses.dataclass
31
+ class Conversation:
32
+ """A class that keeps all conversation history."""
33
+ system: str
34
+ roles: List[str]
35
+ messages: List[List[str]]
36
+ offset: int
37
+ sep_style: SeparatorStyle = SeparatorStyle.SINGLE
38
+ sep: str = "<|im_end|>"
39
+ sep2: str = None
40
+ version: str = "Unknown"
41
+
42
+ skip_next: bool = False
43
+
44
+ def get_prompt(self):
45
+ if self.sep_style == SeparatorStyle.SINGLE:
46
+ ret = self.system + self.sep + '\n'
47
+ for role, message in self.messages:
48
+ if message:
49
+ if type(message) is tuple:
50
+ message, _, _ = message
51
+ ret += role + ": " + message + self.sep
52
+ else:
53
+ ret += role + ":"
54
+ return ret
55
+ elif self.sep_style == SeparatorStyle.TWO:
56
+ seps = [self.sep, self.sep2]
57
+ ret = self.system + seps[0]
58
+ for i, (role, message) in enumerate(self.messages):
59
+ if message:
60
+ if type(message) is tuple:
61
+ message, _, _ = message
62
+ ret += role + ": " + message + seps[i % 2]
63
+ else:
64
+ ret += role + ":"
65
+ return ret
66
+ if self.sep_style == SeparatorStyle.MPT:
67
+ if self.system:
68
+ ret = self.system + self.sep
69
+ else:
70
+ ret = ''
71
+ for role, message in self.messages:
72
+ if message:
73
+ if type(message) is tuple:
74
+ message, _, _ = message
75
+ ret += role + message + self.sep
76
+ else:
77
+ ret += role
78
+ return ret
79
+ else:
80
+ raise ValueError(f"Invalid style: {self.sep_style}")
81
+
82
+
83
+ def append_message(self, role, message):
84
+ self.messages.append([role, message])
85
+
86
+ def copy(self):
87
+ return Conversation(
88
+ system=self.system,
89
+ roles=self.roles,
90
+ messages=[[x, y] for x, y in self.messages],
91
+ offset=self.offset,
92
+ sep_style=self.sep_style,
93
+ sep=self.sep,
94
+ sep2=self.sep2)
95
+
96
+
97
+
98
+ class KeywordsStoppingCriteria(StoppingCriteria):
99
+ def __init__(self, keywords, tokenizer, input_ids):
100
+ self.keywords = keywords
101
+ self.keyword_ids = [tokenizer(keyword).input_ids for keyword in keywords]
102
+ self.keyword_ids = [keyword_id[0] for keyword_id in self.keyword_ids if type(keyword_id) is list and len(keyword_id) == 1]
103
+ self.tokenizer = tokenizer
104
+ self.start_len = None
105
+ self.input_ids = input_ids
106
+
107
+ def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
108
+ if self.start_len is None:
109
+ self.start_len = self.input_ids.shape[1]
110
+ else:
111
+ for keyword_id in self.keyword_ids:
112
+ if output_ids[0, -1] == keyword_id:
113
+ return True
114
+ outputs = self.tokenizer.batch_decode(output_ids[:, self.start_len:], skip_special_tokens=True)[0]
115
+ for keyword in self.keywords:
116
+ if keyword in outputs:
117
+ return True
118
+ return False
119
+
120
+
121
+ class GOTImageEvalProcessor:
122
+ def __init__(self, image_size=384, mean=None, std=None):
123
+ if mean is None:
124
+ mean = (0.48145466, 0.4578275, 0.40821073)
125
+ if std is None:
126
+ std = (0.26862954, 0.26130258, 0.27577711)
127
+
128
+ self.normalize = transforms.Normalize(mean, std)
129
+
130
+ self.transform = transforms.Compose(
131
+ [
132
+ transforms.Resize(
133
+ (image_size, image_size), interpolation=InterpolationMode.BICUBIC
134
+ ),
135
+ transforms.ToTensor(),
136
+ self.normalize,
137
+ ]
138
+ )
139
+ def __call__(self, item):
140
+ return self.transform(item)
141
+
142
+
143
+
144
+ class GOTConfig(Qwen2Config):
145
+ model_type = "GOT"
146
+
147
+
148
+ class GOTQwenModel(Qwen2Model):
149
+ config_class = GOTConfig
150
+
151
+ def __init__(self, config: Qwen2Config):
152
+ super(GOTQwenModel, self).__init__(config)
153
+
154
+ self.vision_tower_high = build_GOT_vit_b()
155
+
156
+ self.mm_projector_vary = nn.Linear(1024, 1024)
157
+
158
+
159
+ def initialize_vision_modules(
160
+ self,
161
+ vision_tower,
162
+ pretrained_stage1_model=None,
163
+ freeze_vision_tower=False,
164
+ use_im_start_end=False,
165
+ vision_select_layer=-1,
166
+ dtype=torch.float16,
167
+ device="cuda"
168
+ ):
169
+
170
+
171
+ image_processor_high = GOTImageEvalProcessor(image_size=1024)
172
+
173
+ self.vision_tower_high = self.vision_tower_high.to(dtype=dtype, device=device)
174
+
175
+ self.mm_projector_vary = self.mm_projector_vary.to(dtype=dtype, device=device)
176
+
177
+
178
+ image_token_len = 256
179
+
180
+ self.config.vision_tower = vision_tower
181
+ self.config.image_token_len = image_token_len
182
+
183
+ self.config.use_im_start_end = True
184
+
185
+ self.config.vision_select_layer = vision_select_layer
186
+ self.config.freeze_vision_tower = freeze_vision_tower
187
+
188
+ return dict(
189
+ image_processor_high=image_processor_high,
190
+ image_token_len=image_token_len,
191
+ )
192
+
193
+
194
+ def forward(
195
+ self,
196
+ input_ids: torch.LongTensor = None,
197
+ attention_mask: Optional[torch.Tensor] = None,
198
+ position_ids: Optional[torch.LongTensor] = None,
199
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
200
+ inputs_embeds: Optional[torch.FloatTensor] = None,
201
+ use_cache: Optional[bool] = None,
202
+ output_attentions: Optional[bool] = None,
203
+ output_hidden_states: Optional[bool] = None,
204
+ images: Optional[torch.FloatTensor] = None,
205
+ return_dict: Optional[bool] = None,
206
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
207
+
208
+ # HACK: replace back original embeddings for LLaVA pretraining
209
+ orig_embeds_params = getattr(self, 'orig_embeds_params', None)
210
+ if orig_embeds_params is not None:
211
+ with torch.no_grad():
212
+ self.get_input_embeddings().weight[:-self.num_new_tokens] = orig_embeds_params[:-self.num_new_tokens].data
213
+
214
+ if inputs_embeds is None:
215
+ inputs_embeds = self.embed_tokens(input_ids)
216
+
217
+
218
+ vision_tower_high = getattr(self, 'vision_tower_high', None)
219
+
220
+
221
+ if vision_tower_high is not None and (input_ids.shape[1] != 1 or self.training) and images is not None:
222
+ use_im_start_end = getattr(self.config, "use_im_start_end", -1)
223
+
224
+ vision_select_layer = getattr(self.config, "vision_select_layer", -1)
225
+ im_patch_token = getattr(self.config, "im_patch_token", -1)
226
+ im_start_token = getattr(self.config, "im_start_token", -1)
227
+ im_end_token = getattr(self.config, "im_end_token", -1)
228
+ freeze_vision_tower = getattr(self.config, "freeze_vision_tower", False)
229
+
230
+ im_patch_token = 151859
231
+
232
+ im_start_token = 151857
233
+
234
+ im_end_token = 151858
235
+
236
+ image_features = []
237
+
238
+ for image in images:
239
+ P, C, H, W = image.shape
240
+ if P == 1:
241
+ with torch.set_grad_enabled(False):
242
+ cnn_feature = vision_tower_high(image)
243
+ cnn_feature = cnn_feature.flatten(2).permute(0, 2, 1) # 256*1024
244
+ image_feature = self.mm_projector_vary(cnn_feature)
245
+ image_features.append(image_feature)
246
+
247
+ else:
248
+ image_patches = torch.unbind(image)
249
+ image_patches_features = []
250
+ for image_patch in image_patches:
251
+ image_p = torch.stack([image_patch])
252
+
253
+ with torch.set_grad_enabled(False):
254
+ cnn_feature_p = vision_tower_high(image_p)
255
+ cnn_feature_p = cnn_feature_p.flatten(2).permute(0, 2, 1)
256
+ image_feature_p = self.mm_projector_vary(cnn_feature_p)
257
+ image_patches_features.append(image_feature_p)
258
+ image_feature = torch.cat(image_patches_features, dim=1)
259
+ image_features.append(image_feature)
260
+
261
+
262
+ dummy_image_features_2 = torch.zeros(256, 1024, device=inputs_embeds.device, dtype=inputs_embeds.dtype)
263
+ dummy_image_features = dummy_image_features_2
264
+ use_im_start_end = True
265
+ new_input_embeds = []
266
+ for cur_input_ids, cur_input_embeds, cur_image_features in zip(input_ids, inputs_embeds, image_features):
267
+ if (cur_input_ids == im_patch_token).sum() == 0:
268
+ cur_input_embeds = cur_input_embeds + (0. * dummy_image_features).sum()
269
+ new_input_embeds.append(cur_input_embeds)
270
+ continue
271
+
272
+ if use_im_start_end:
273
+ if (cur_input_ids == im_start_token).sum() != (cur_input_ids == im_end_token).sum():
274
+ raise ValueError("The number of image start tokens and image end tokens should be the same.")
275
+
276
+ image_start_tokens = torch.where(cur_input_ids == im_start_token)[0]
277
+ for image_start_token_pos, per_cur_image_features in zip(image_start_tokens, cur_image_features):
278
+ per_cur_image_features = per_cur_image_features.to(device=cur_input_embeds.device)
279
+ num_patches = per_cur_image_features.shape[0]
280
+
281
+ if cur_input_ids[image_start_token_pos + num_patches + 1] != im_end_token:
282
+ raise ValueError("The image end token should follow the image start token.")
283
+
284
+ cur_input_embeds = torch.cat(
285
+ (
286
+ cur_input_embeds[:image_start_token_pos+1],
287
+ per_cur_image_features,
288
+ cur_input_embeds[image_start_token_pos + num_patches + 1:]
289
+ ),
290
+ dim=0
291
+ )
292
+
293
+
294
+ new_input_embeds.append(cur_input_embeds)
295
+ else:
296
+ raise NotImplementedError
297
+
298
+ inputs_embeds = torch.stack(new_input_embeds, dim=0)
299
+
300
+ return super(GOTQwenModel, self).forward(
301
+ input_ids=None, attention_mask=attention_mask, past_key_values=past_key_values,
302
+ inputs_embeds=inputs_embeds, use_cache=use_cache, position_ids = position_ids,
303
+ output_attentions=output_attentions, output_hidden_states=output_hidden_states,
304
+ return_dict=return_dict
305
+ )
306
+
307
+
308
+
309
+ class GOTQwenForCausalLM(Qwen2ForCausalLM):
310
+ config_class = GOTConfig
311
+ # supports_gradient_checkpointing = True
312
+
313
+ def __init__(self, config):
314
+ super(Qwen2ForCausalLM, self).__init__(config)
315
+ self.model = GOTQwenModel(config)
316
+
317
+ self.vocab_size = config.vocab_size
318
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
319
+
320
+ # Initialize weights and apply final processing
321
+ self.post_init()
322
+
323
+ def get_model(self):
324
+ return self.model
325
+
326
+ def forward(
327
+ self,
328
+ input_ids: torch.LongTensor = None,
329
+ attention_mask: Optional[torch.Tensor] = None,
330
+ position_ids: Optional[torch.LongTensor] = None,
331
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
332
+ inputs_embeds: Optional[torch.FloatTensor] = None,
333
+ labels: Optional[torch.LongTensor] = None,
334
+ use_cache: Optional[bool] = None,
335
+ output_attentions: Optional[bool] = None,
336
+ output_hidden_states: Optional[bool] = None,
337
+ images: Optional[torch.FloatTensor] = None,
338
+ return_dict: Optional[bool] = None,
339
+
340
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
341
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
342
+ output_hidden_states = (
343
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
344
+ )
345
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
346
+
347
+ outputs = self.model(
348
+ input_ids=input_ids,
349
+ past_key_values=past_key_values,
350
+ attention_mask=attention_mask,
351
+ position_ids=position_ids,
352
+ inputs_embeds=inputs_embeds,
353
+ use_cache=use_cache,
354
+ output_attentions=output_attentions,
355
+ output_hidden_states=output_hidden_states,
356
+ images=images,
357
+ return_dict=return_dict
358
+
359
+ )
360
+
361
+ hidden_states = outputs[0]
362
+ logits = self.lm_head(hidden_states)
363
+ logits = logits.float()
364
+
365
+ # logits
366
+
367
+ loss = None
368
+ if labels is not None:
369
+ # Shift so that tokens < n predict n
370
+ shift_logits = logits[..., :-1, :].contiguous()
371
+ shift_labels = labels[..., 1:].contiguous()
372
+ # Flatten the tokens
373
+ loss_fct = CrossEntropyLoss()
374
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
375
+ shift_labels = shift_labels.view(-1)
376
+ # Enable model parallelism
377
+ shift_labels = shift_labels.to(shift_logits.device)
378
+ loss = loss_fct(shift_logits, shift_labels)
379
+
380
+ if not return_dict:
381
+ output = (logits,) + outputs[1:]
382
+ return (loss,) + output if loss is not None else output
383
+
384
+ return CausalLMOutputWithPast(
385
+ loss=loss,
386
+ logits=logits,
387
+ past_key_values=outputs.past_key_values,
388
+ hidden_states=outputs.hidden_states,
389
+ attentions=outputs.attentions,
390
+ )
391
+
392
+
393
+ def prepare_inputs_for_generation(
394
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
395
+ ):
396
+ # Omit tokens covered by past_key_values
397
+ if past_key_values is not None:
398
+ if isinstance(past_key_values, Cache):
399
+ cache_length = past_key_values.get_seq_length()
400
+ past_length = past_key_values.seen_tokens
401
+ max_cache_length = past_key_values.get_max_length()
402
+ else:
403
+ cache_length = past_length = past_key_values[0][0].shape[2]
404
+ max_cache_length = None
405
+
406
+ # Keep only the unprocessed tokens:
407
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
408
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
409
+ # input)
410
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
411
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
412
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
413
+ # input_ids based on the past_length.
414
+ elif past_length < input_ids.shape[1]:
415
+ input_ids = input_ids[:, past_length:]
416
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
417
+
418
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
419
+ if (
420
+ max_cache_length is not None
421
+ and attention_mask is not None
422
+ and cache_length + input_ids.shape[1] > max_cache_length
423
+ ):
424
+ attention_mask = attention_mask[:, -max_cache_length:]
425
+
426
+ position_ids = kwargs.get("position_ids", None)
427
+ if attention_mask is not None and position_ids is None:
428
+ # create position_ids on the fly for batch generation
429
+ position_ids = attention_mask.long().cumsum(-1) - 1
430
+ position_ids.masked_fill_(attention_mask == 0, 1)
431
+ if past_key_values:
432
+ position_ids = position_ids[:, -input_ids.shape[1] :]
433
+
434
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
435
+ if inputs_embeds is not None and past_key_values is None:
436
+ model_inputs = {"inputs_embeds": inputs_embeds}
437
+ else:
438
+ model_inputs = {"input_ids": input_ids}
439
+
440
+ model_inputs.update(
441
+ {
442
+ "position_ids": position_ids,
443
+ "past_key_values": past_key_values,
444
+ "use_cache": kwargs.get("use_cache"),
445
+ "attention_mask": attention_mask,
446
+ "images": kwargs.get("images", None),
447
+ }
448
+ )
449
+ return model_inputs
450
+
451
+ def initialize_vision_tokenizer(
452
+ self,
453
+ tokenizer,
454
+ freeze_lm_model=False,
455
+ pretrained_stage1_model=None,
456
+ device="cuda"
457
+ ):
458
+ config = self.get_model().config
459
+
460
+
461
+ self.resize_token_embeddings(len(tokenizer))
462
+
463
+ config.im_patch_token = 151859
464
+
465
+ config.use_im_start_end = True
466
+
467
+ if config.use_im_start_end:
468
+ self.resize_token_embeddings(len(tokenizer))
469
+ config.im_start_token, config.im_end_token = 151857, 151858
470
+
471
+ def load_image(self, image_file):
472
+ if image_file.startswith('http') or image_file.startswith('https'):
473
+ response = requests.get(image_file)
474
+ image = Image.open(BytesIO(response.content)).convert('RGB')
475
+ else:
476
+ image = Image.open(image_file).convert('RGB')
477
+ return image
478
+
479
+ def disable_torch_init(self):
480
+ """
481
+ Disable the redundant torch default initialization to accelerate model creation.
482
+ """
483
+ import torch
484
+ setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
485
+ setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
486
+
487
+ def chat(self, tokenizer, image_file, ocr_type, ocr_box='', ocr_color='', render=False, save_render_file=None, print_prompt=False, gradio_input=False, stream_flag = False):
488
+
489
+ self.disable_torch_init()
490
+
491
+
492
+ image_processor_high = GOTImageEvalProcessor(image_size=1024)
493
+
494
+ use_im_start_end = True
495
+
496
+ image_token_len = 256
497
+
498
+ if gradio_input:
499
+ image = image_file.copy()
500
+ else:
501
+ image = self.load_image(image_file)
502
+
503
+ w, h = image.size
504
+
505
+ if ocr_type == 'format':
506
+ qs = 'OCR with format: '
507
+ else:
508
+ qs = 'OCR: '
509
+
510
+ if ocr_box:
511
+ bbox = eval(ocr_box)
512
+ if len(bbox) == 2:
513
+ bbox[0] = int(bbox[0]/w*1000)
514
+ bbox[1] = int(bbox[1]/h*1000)
515
+ if len(bbox) == 4:
516
+ bbox[0] = int(bbox[0]/w*1000)
517
+ bbox[1] = int(bbox[1]/h*1000)
518
+ bbox[2] = int(bbox[2]/w*1000)
519
+ bbox[3] = int(bbox[3]/h*1000)
520
+ if ocr_type == 'format':
521
+ qs = str(bbox) + ' ' + 'OCR with format: '
522
+ else:
523
+ qs = str(bbox) + ' ' + 'OCR: '
524
+
525
+ if ocr_color:
526
+ if ocr_type == 'format':
527
+ qs = '[' + ocr_color + ']' + ' ' + 'OCR with format: '
528
+ else:
529
+ qs = '[' + ocr_color + ']' + ' ' + 'OCR: '
530
+
531
+ if use_im_start_end:
532
+ qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_PATCH_TOKEN*image_token_len + DEFAULT_IM_END_TOKEN + '\n' + qs
533
+ else:
534
+ qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
535
+
536
+
537
+ conv_mpt = Conversation(
538
+ system="""<|im_start|>system
539
+ You should follow the instructions carefully and explain your answers in detail.""",
540
+ # system = None,
541
+ roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
542
+ version="mpt",
543
+ messages=(),
544
+ offset=0,
545
+ sep_style=SeparatorStyle.MPT,
546
+ sep="<|im_end|>",
547
+ )
548
+
549
+ conv = conv_mpt.copy()
550
+ conv.append_message(conv.roles[0], qs)
551
+ conv.append_message(conv.roles[1], None)
552
+ prompt = conv.get_prompt()
553
+
554
+ if print_prompt:
555
+ print(prompt)
556
+
557
+ inputs = tokenizer([prompt])
558
+
559
+ image_tensor_1 = image_processor_high(image)
560
+
561
+ input_ids = torch.as_tensor(inputs.input_ids).cuda()
562
+
563
+ stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
564
+ keywords = [stop_str]
565
+ stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
566
+ streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
567
+
568
+ if stream_flag:
569
+ with torch.autocast("cuda", dtype=torch.bfloat16):
570
+ output_ids = self.generate(
571
+ input_ids,
572
+ images=[image_tensor_1.unsqueeze(0).half().cuda()],
573
+ do_sample=False,
574
+ num_beams = 1,
575
+ no_repeat_ngram_size = 20,
576
+ streamer=streamer,
577
+ max_new_tokens=4096,
578
+ stopping_criteria=[stopping_criteria]
579
+ )
580
+ else:
581
+ with torch.autocast("cuda", dtype=torch.bfloat16):
582
+ output_ids = self.generate(
583
+ input_ids,
584
+ images=[image_tensor_1.unsqueeze(0).half().cuda()],
585
+ do_sample=False,
586
+ num_beams = 1,
587
+ no_repeat_ngram_size = 20,
588
+ # streamer=streamer,
589
+ max_new_tokens=4096,
590
+ stopping_criteria=[stopping_criteria]
591
+ )
592
+
593
+ outputs = tokenizer.decode(output_ids[0, input_ids.shape[1]:]).strip()
594
+
595
+ if outputs.endswith(stop_str):
596
+ outputs = outputs[:-len(stop_str)]
597
+ outputs = outputs.strip()
598
+ response_str = outputs
599
+
600
+ if render:
601
+ print('==============rendering===============')
602
+ from .render_tools import svg_to_html, content_mmd_to_html, tik_html, translation_table
603
+
604
+ if '**kern' in outputs:
605
+ import verovio
606
+ tk = verovio.toolkit()
607
+ tk.loadData(outputs)
608
+ tk.setOptions({"pageWidth": 2100, "footer": 'none',
609
+ 'barLineWidth': 0.5, 'beamMaxSlope': 15,
610
+ 'staffLineWidth': 0.2, 'spacingStaff': 6})
611
+ tk.getPageCount()
612
+ svg = tk.renderToSVG()
613
+ svg = svg.replace("overflow=\"inherit\"", "overflow=\"visible\"")
614
+
615
+ svg_to_html(svg, save_render_file)
616
+
617
+ if ocr_type == 'format' and '**kern' not in outputs:
618
+
619
+
620
+ if '\\begin{tikzpicture}' not in outputs:
621
+ html_path_2 = save_render_file
622
+ right_num = outputs.count('\\right')
623
+ left_num = outputs.count('\left')
624
+
625
+ if right_num != left_num:
626
+ outputs = outputs.replace('\left(', '(').replace('\\right)', ')').replace('\left[', '[').replace('\\right]', ']').replace('\left{', '{').replace('\\right}', '}').replace('\left|', '|').replace('\\right|', '|').replace('\left.', '.').replace('\\right.', '.')
627
+
628
+
629
+ outputs = outputs.replace('"', '``').replace('$', '')
630
+
631
+ outputs_list = outputs.split('\n')
632
+ gt= ''
633
+ for out in outputs_list:
634
+ gt += '"' + out.replace('\\', '\\\\') + r'\n' + '"' + '+' + '\n'
635
+
636
+ gt = gt[:-2]
637
+
638
+
639
+ lines = content_mmd_to_html
640
+ lines = lines.split("const text =")
641
+ new_web = lines[0] + 'const text =' + gt + lines[1]
642
+
643
+ else:
644
+ html_path_2 = save_render_file
645
+ outputs = outputs.translate(translation_table)
646
+ outputs_list = outputs.split('\n')
647
+ gt= ''
648
+ for out in outputs_list:
649
+ if out:
650
+ if '\\begin{tikzpicture}' not in out and '\\end{tikzpicture}' not in out:
651
+ while out[-1] == ' ':
652
+ out = out[:-1]
653
+ if out is None:
654
+ break
655
+
656
+ if out:
657
+ if out[-1] != ';':
658
+ gt += out[:-1] + ';\n'
659
+ else:
660
+ gt += out + '\n'
661
+ else:
662
+ gt += out + '\n'
663
+
664
+
665
+ lines = tik_html
666
+ lines = lines.split("const text =")
667
+ new_web = lines[0] + gt + lines[1]
668
+
669
+ with open(html_path_2, 'w') as web_f_new:
670
+ web_f_new.write(new_web)
671
+ return response_str
672
+
673
+ def dynamic_preprocess(self, image, min_num=1, max_num=6, image_size=1024, use_thumbnail=True):
674
+
675
+ def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
676
+ best_ratio_diff = float('inf')
677
+ best_ratio = (1, 1)
678
+ area = width * height
679
+ for ratio in target_ratios:
680
+ target_aspect_ratio = ratio[0] / ratio[1]
681
+ ratio_diff = abs(aspect_ratio - target_aspect_ratio)
682
+ if ratio_diff < best_ratio_diff:
683
+ best_ratio_diff = ratio_diff
684
+ best_ratio = ratio
685
+ elif ratio_diff == best_ratio_diff:
686
+ if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
687
+ best_ratio = ratio
688
+ # print(f'width: {width}, height: {height}, best_ratio: {best_ratio}')
689
+ return best_ratio
690
+
691
+ orig_width, orig_height = image.size
692
+ aspect_ratio = orig_width / orig_height
693
+
694
+ # calculate the existing image aspect ratio
695
+ target_ratios = set(
696
+ (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
697
+ i * j <= max_num and i * j >= min_num)
698
+ # print(target_ratios)
699
+ target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
700
+
701
+ # find the closest aspect ratio to the target
702
+ target_aspect_ratio = find_closest_aspect_ratio(
703
+ aspect_ratio, target_ratios, orig_width, orig_height, image_size)
704
+
705
+ # print(target_aspect_ratio)
706
+ # calculate the target width and height
707
+ target_width = image_size * target_aspect_ratio[0]
708
+ target_height = image_size * target_aspect_ratio[1]
709
+ blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
710
+
711
+ # resize the image
712
+ resized_img = image.resize((target_width, target_height))
713
+ processed_images = []
714
+ for i in range(blocks):
715
+ box = (
716
+ (i % (target_width // image_size)) * image_size,
717
+ (i // (target_width // image_size)) * image_size,
718
+ ((i % (target_width // image_size)) + 1) * image_size,
719
+ ((i // (target_width // image_size)) + 1) * image_size
720
+ )
721
+ # split the image
722
+ split_img = resized_img.crop(box)
723
+ processed_images.append(split_img)
724
+ assert len(processed_images) == blocks
725
+ if use_thumbnail and len(processed_images) != 1:
726
+ thumbnail_img = image.resize((image_size, image_size))
727
+ processed_images.append(thumbnail_img)
728
+ return processed_images
729
+
730
+
731
+ def chat_crop(self, tokenizer, image_file, ocr_type, render=False, save_render_file=None, print_prompt=False, gradio_input=False, stream_flag = False):
732
+ # Model
733
+ self.disable_torch_init()
734
+ multi_page=False
735
+
736
+
737
+ image_processor_high = GOTImageEvalProcessor(image_size=1024)
738
+
739
+ use_im_start_end = True
740
+
741
+
742
+ image_token_len = 256
743
+
744
+ image_list = []
745
+
746
+ # if len(image_file_list)>1:
747
+ # multi_page = True
748
+
749
+ if multi_page:
750
+ qs = 'OCR with format across multi pages: '
751
+ # only for png files
752
+ # import glob
753
+ # from natsort import natsorted
754
+ # patches = glob.glob(image_file + '/*png')
755
+ patches = image_file
756
+ # patches = natsorted(patches)
757
+ sub_images = []
758
+ for sub_image in patches:
759
+ sub_images.append(self.load_image(sub_image))
760
+
761
+ ll = len(patches)
762
+ # print(patches)
763
+ # print("len ll: ", ll)
764
+
765
+ else:
766
+ if ocr_type == 'format':
767
+ qs = 'OCR with format upon the patch reference: '
768
+ else:
769
+ qs = 'OCR upon the patch reference: '
770
+ if gradio_input:
771
+ img = image_file.copy()
772
+ else:
773
+ img = self.load_image(image_file)
774
+ sub_images = self.dynamic_preprocess(img)
775
+ ll = len(sub_images)
776
+
777
+ for image in sub_images:
778
+ image_tensor_1 = image_processor_high(image)
779
+ image_list.append(image_tensor_1)
780
+
781
+
782
+ image_list = torch.stack(image_list)
783
+
784
+ print('====new images batch size======: \n',image_list.shape)
785
+
786
+
787
+ if use_im_start_end:
788
+ qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_PATCH_TOKEN*image_token_len*ll + DEFAULT_IM_END_TOKEN + '\n' + qs
789
+ else:
790
+ qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
791
+
792
+
793
+ conv_mpt = Conversation(
794
+ system="""<|im_start|>system
795
+ You should follow the instructions carefully and explain your answers in detail.""",
796
+ # system = None,
797
+ roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
798
+ version="mpt",
799
+ messages=(),
800
+ offset=0,
801
+ sep_style=SeparatorStyle.MPT,
802
+ sep="<|im_end|>",
803
+ )
804
+
805
+ conv = conv_mpt.copy()
806
+ conv.append_message(conv.roles[0], qs)
807
+ conv.append_message(conv.roles[1], None)
808
+ prompt = conv.get_prompt()
809
+
810
+ if print_prompt:
811
+ print(prompt)
812
+
813
+ inputs = tokenizer([prompt])
814
+
815
+ input_ids = torch.as_tensor(inputs.input_ids).cuda()
816
+
817
+ stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
818
+ keywords = [stop_str]
819
+ stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
820
+ streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
821
+
822
+ if stream_flag:
823
+ with torch.autocast("cuda", dtype=torch.bfloat16):
824
+ output_ids = self.generate(
825
+ input_ids,
826
+ images=[image_list.half().cuda()],
827
+ do_sample=False,
828
+ num_beams = 1,
829
+ # no_repeat_ngram_size = 20,
830
+ streamer=streamer,
831
+ max_new_tokens=4096,
832
+ stopping_criteria=[stopping_criteria]
833
+ )
834
+ else:
835
+ with torch.autocast("cuda", dtype=torch.bfloat16):
836
+ output_ids = self.generate(
837
+ input_ids,
838
+ images=[image_list.half().cuda()],
839
+ do_sample=False,
840
+ num_beams = 1,
841
+ # no_repeat_ngram_size = 20,
842
+ # streamer=streamer,
843
+ max_new_tokens=4096,
844
+ stopping_criteria=[stopping_criteria]
845
+ )
846
+
847
+ outputs = tokenizer.decode(output_ids[0, input_ids.shape[1]:]).strip()
848
+
849
+ if outputs.endswith(stop_str):
850
+ outputs = outputs[:-len(stop_str)]
851
+ outputs = outputs.strip()
852
+ response_str = outputs
853
+
854
+ if render:
855
+ print('==============rendering===============')
856
+ from .render_tools import content_mmd_to_html
857
+ html_path_2 = save_render_file
858
+ right_num = outputs.count('\\right')
859
+ left_num = outputs.count('\left')
860
+
861
+ if right_num != left_num:
862
+ outputs = outputs.replace('\left(', '(').replace('\\right)', ')').replace('\left[', '[').replace('\\right]', ']').replace('\left{', '{').replace('\\right}', '}').replace('\left|', '|').replace('\\right|', '|').replace('\left.', '.').replace('\\right.', '.')
863
+
864
+
865
+ outputs = outputs.replace('"', '``').replace('$', '')
866
+
867
+ outputs_list = outputs.split('\n')
868
+ gt= ''
869
+ for out in outputs_list:
870
+ gt += '"' + out.replace('\\', '\\\\') + r'\n' + '"' + '+' + '\n'
871
+
872
+ gt = gt[:-2]
873
+
874
+ lines = content_mmd_to_html
875
+ lines = lines.split("const text =")
876
+ new_web = lines[0] + 'const text =' + gt + lines[1]
877
+
878
+ with open(html_path_2, 'w') as web_f_new:
879
+ web_f_new.write(new_web)
880
+
881
+ return response_str
qwen.tiktoken ADDED
The diff for this file is too large to render. See raw diff
 
qwen_original.tiktoken ADDED
The diff for this file is too large to render. See raw diff
 
render_tools.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ punctuation_dict = {
3
+ ",": ",",
4
+ "。": ".",
5
+
6
+ }
7
+ translation_table = str.maketrans(punctuation_dict)
8
+
9
+ def svg_to_html(svg_content, output_filename):
10
+
11
+ html_content = f"""
12
+ <!DOCTYPE html>
13
+ <html lang="en">
14
+ <head>
15
+ <meta charset="UTF-8">
16
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
17
+ <title>SVG Embedded in HTML</title>
18
+ </head>
19
+ <body>
20
+ <svg width="2100" height="15000" xmlns="http://www.w3.org/2000/svg">
21
+ {svg_content}
22
+ </svg>
23
+ </body>
24
+ </html>
25
+ """
26
+
27
+ with open(output_filename, 'w') as file:
28
+ file.write(html_content)
29
+
30
+
31
+
32
+ content_mmd_to_html = """<!DOCTYPE html>
33
+ <html lang="en" data-lt-installed="true"><head>
34
+ <meta charset="UTF-8">
35
+ <title>Title</title>
36
+ <script>
37
+ const text =
38
+ </script>
39
+ <style>
40
+ #content {
41
+ max-width: 800px;
42
+ margin: auto;
43
+ }
44
+ </style>
45
+ <script>
46
+ let script = document.createElement('script');
47
+ script.src = "https://cdn.jsdelivr.net/npm/[email protected]/es5/bundle.js";
48
+ document.head.append(script);
49
+
50
+ script.onload = function() {
51
+ const isLoaded = window.loadMathJax();
52
+ if (isLoaded) {
53
+ console.log('Styles loaded!')
54
+ }
55
+
56
+ const el = window.document.getElementById('content-text');
57
+ if (el) {
58
+ const options = {
59
+ htmlTags: true
60
+ };
61
+ const html = window.render(text, options);
62
+ el.outerHTML = html;
63
+ }
64
+ };
65
+ </script>
66
+ </head>
67
+ <body>
68
+ <div id="content"><div id="content-text"></div></div>
69
+ </body>
70
+ </html>
71
+ """
72
+
73
+
74
+
75
+ tik_html = """
76
+ <!DOCTYPE html>
77
+
78
+ <html>
79
+
80
+ <head>
81
+ <meta charset="UTF-8">
82
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
83
+ <title>Document</title>
84
+ <link rel="stylesheet" type="text/css" href="https://tikzjax.com/v1/fonts.css">
85
+ <script src="https://tikzjax.com/v1/tikzjax.js"></script>
86
+ </head>
87
+ <body>
88
+ <script type="text/tikz">
89
+ const text =
90
+ </script>
91
+ </body>
92
+ </html>"""
93
+
94
+
95
+
96
+ # print(tik_html)
special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "pad_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ }
9
+ }
tokenisation.ipynb ADDED
@@ -0,0 +1,571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 5,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "from tokenization_qwen import QWenTokenizer\n",
10
+ "from tokenization_qwen_sub import QWenTokenizer as QWenTokenizer_SUB\n",
11
+ "\n",
12
+ "tokenizer = QWenTokenizer(vocab_file=\"./qwen.tiktoken\")\n",
13
+ "tokenizer_sub = QWenTokenizer_SUB(vocab_file=\"./modified_qwen_sub.tiktoken\")\n",
14
+ "# tokenizer = QWenTokenizer(vocab_file=\"./modified_qwen.tiktoken\")\n",
15
+ "# tokenizer = QWenTokenizer(vocab_file=\"./modified_qwen_sub.tiktoken\")\n",
16
+ "\n",
17
+ "# print(tokenizer.tokenizer.encode(\"Hello World\", allowed_special=set(\"<|extra_40|>\")))\n"
18
+ ]
19
+ },
20
+ {
21
+ "cell_type": "code",
22
+ "execution_count": 6,
23
+ "metadata": {},
24
+ "outputs": [
25
+ {
26
+ "name": "stdout",
27
+ "output_type": "stream",
28
+ "text": [
29
+ "151860\n",
30
+ "151860\n"
31
+ ]
32
+ }
33
+ ],
34
+ "source": [
35
+ "print(len(tokenizer))\n",
36
+ "print(len(tokenizer_sub))"
37
+ ]
38
+ },
39
+ {
40
+ "cell_type": "code",
41
+ "execution_count": 7,
42
+ "metadata": {},
43
+ "outputs": [],
44
+ "source": [
45
+ "layout_generation_exmaple = \"\"\"<text><loc_72><loc_55><loc_372><loc_20><text><loc_100><loc_118><loc_789><loc_42><text><loc_100><loc_167><loc_788><loc_56><text><loc_100><loc_229><loc_783><loc_56><text><loc_100><loc_292><loc_733><loc_42><text><loc_100><loc_341><loc_757><loc_29><text><loc_100><loc_500><loc_798><loc_42><text><loc_100><loc_587><loc_523><loc_15><text><loc_100><loc_705><loc_809><loc_56><text><loc_100><loc_768><loc_702><loc_29><text><loc_100><loc_803><loc_809><loc_56><text><loc_947><loc_987><loc_24><loc_17><title><loc_100><loc_563><loc_709><loc_19><table><loc_97><loc_370><loc_817><loc_115><table><loc_99><loc_607><loc_809><loc_86>\"\"\"\n",
46
+ "# layout_generation_exmaple = \"<|extra_40|>\""
47
+ ]
48
+ },
49
+ {
50
+ "cell_type": "code",
51
+ "execution_count": 8,
52
+ "metadata": {},
53
+ "outputs": [
54
+ {
55
+ "name": "stdout",
56
+ "output_type": "stream",
57
+ "text": [
58
+ "[151649, 27, 1074, 62, 22, 17, 1784, 1074, 62, 20, 20, 1784, 1074, 62, 18, 22, 17, 1784, 1074, 62, 17, 15, 29, 151649, 27, 1074, 62, 16, 15, 15, 1784, 1074, 62, 16, 16, 23, 1784, 1074, 62, 22, 23, 24, 1784, 1074, 62, 19, 17, 29, 151649, 27, 1074, 62, 16, 15, 15, 1784, 1074, 62, 16, 21, 22, 1784, 1074, 62, 22, 23, 23, 1784, 1074, 62, 20, 21, 29, 151649, 27, 1074, 62, 16, 15, 15, 1784, 1074, 62, 17, 17, 24, 1784, 1074, 62, 22, 23, 18, 1784, 1074, 62, 20, 21, 29, 151649, 27, 1074, 62, 16, 15, 15, 1784, 1074, 62, 17, 24, 17, 1784, 1074, 62, 22, 18, 18, 1784, 1074, 62, 19, 17, 29, 151649, 27, 1074, 62, 16, 15, 15, 1784, 1074, 62, 18, 19, 16, 1784, 1074, 62, 22, 20, 22, 1784, 1074, 62, 17, 24, 29, 151649, 27, 1074, 62, 16, 15, 15, 1784, 1074, 62, 20, 15, 15, 1784, 1074, 62, 22, 24, 23, 1784, 1074, 62, 19, 17, 29, 151649, 27, 1074, 62, 16, 15, 15, 1784, 1074, 62, 20, 23, 22, 1784, 1074, 62, 20, 17, 18, 1784, 1074, 62, 16, 20, 29, 151649, 27, 1074, 62, 16, 15, 15, 1784, 1074, 62, 22, 15, 20, 1784, 1074, 62, 23, 15, 24, 1784, 1074, 62, 20, 21, 29, 151649, 27, 1074, 62, 16, 15, 15, 1784, 1074, 62, 22, 21, 23, 1784, 1074, 62, 22, 15, 17, 1784, 1074, 62, 17, 24, 29, 151649, 27, 1074, 62, 16, 15, 15, 1784, 1074, 62, 23, 15, 18, 1784, 1074, 62, 23, 15, 24, 1784, 1074, 62, 20, 21, 29, 151649, 27, 1074, 62, 24, 19, 22, 1784, 1074, 62, 24, 23, 22, 1784, 1074, 62, 17, 19, 1784, 1074, 62, 16, 22, 29, 151651, 27, 1074, 62, 16, 15, 15, 1784, 1074, 62, 20, 21, 18, 1784, 1074, 62, 22, 15, 24, 1784, 1074, 62, 16, 24, 29, 151652, 27, 1074, 62, 24, 22, 1784, 1074, 62, 18, 22, 15, 1784, 1074, 62, 23, 16, 22, 1784, 1074, 62, 16, 16, 20, 29, 151652, 27, 1074, 62, 24, 24, 1784, 1074, 62, 21, 15, 22, 1784, 1074, 62, 23, 15, 24, 1784, 1074, 62, 23, 21, 29]\n",
59
+ "[151649, 150715, 150698, 151015, 150663, 151649, 150743, 150761, 151432, 150685, 151649, 150743, 150810, 151431, 150699, 151649, 150743, 150872, 151426, 150699, 151649, 150743, 150935, 151376, 150685, 151649, 150743, 150984, 151400, 150672, 151649, 150743, 151143, 151441, 150685, 151649, 150743, 151230, 151166, 150658, 151649, 150743, 151348, 151452, 150699, 151649, 150743, 151411, 151345, 150672, 151649, 150743, 151446, 151452, 150699, 151649, 151590, 151630, 150667, 150660, 151651, 150743, 151206, 151352, 150662, 151652, 150740, 151013, 151460, 150758, 151652, 150742, 151250, 151452, 150729]\n"
60
+ ]
61
+ }
62
+ ],
63
+ "source": [
64
+ "tokens = tokenizer.tokenizer.encode(str(layout_generation_exmaple), allowed_special=\"all\")\n",
65
+ "print(tokens)\n",
66
+ "tokens_sub = tokenizer_sub.tokenizer.encode(str(layout_generation_exmaple), allowed_special=\"all\")\n",
67
+ "print(tokens_sub)"
68
+ ]
69
+ },
70
+ {
71
+ "cell_type": "code",
72
+ "execution_count": null,
73
+ "metadata": {},
74
+ "outputs": [],
75
+ "source": [
76
+ "# decoded = tokenizer.tokenizer.decode(tokens)\n",
77
+ "# new_decoded = tokenizer.tokenizer.decode(new_tokens)\n",
78
+ "# print(decoded)\n",
79
+ "# print(new_decoded)\n",
80
+ "for i in tokens:\n",
81
+ " decoded_token = tokenizer.tokenizer.decode([i])\n",
82
+ " print(f\"{i},{decoded_token}\\n\")\n",
83
+ " "
84
+ ]
85
+ },
86
+ {
87
+ "cell_type": "code",
88
+ "execution_count": null,
89
+ "metadata": {},
90
+ "outputs": [],
91
+ "source": [
92
+ "print(len(tokenizer))"
93
+ ]
94
+ },
95
+ {
96
+ "cell_type": "code",
97
+ "execution_count": null,
98
+ "metadata": {},
99
+ "outputs": [],
100
+ "source": [
101
+ "# map the token id to the token\n",
102
+ "for i in tokens:\n",
103
+ " decoded_token = tokenizer.tokenizer.decode([i])\n",
104
+ " print(f\"{i},{decoded_token}\\n\")"
105
+ ]
106
+ },
107
+ {
108
+ "cell_type": "code",
109
+ "execution_count": null,
110
+ "metadata": {},
111
+ "outputs": [],
112
+ "source": [
113
+ "import tiktoken\n",
114
+ "\n",
115
+ "tokenizer = tiktoken.model.load_tiktoken_bpe(\"./qwen.tiktoken\")"
116
+ ]
117
+ },
118
+ {
119
+ "cell_type": "code",
120
+ "execution_count": null,
121
+ "metadata": {},
122
+ "outputs": [],
123
+ "source": [
124
+ "import tiktoken\n",
125
+ "import os\n",
126
+ "\n",
127
+ "def modify_tiktoken_file(input_file, output_file):\n",
128
+ " # Read the file contents directly\n",
129
+ " with open(input_file, 'rb') as f:\n",
130
+ " original_content = f.readlines()\n",
131
+ " \n",
132
+ " # Generate the new location unique tokens\n",
133
+ " LOCATION_UNIQUE_TOKENS = tuple(f\"<loc_{i}>\" for i in range(1, 1001))\n",
134
+ " \n",
135
+ " # Prepare the new content\n",
136
+ " new_content = []\n",
137
+ " \n",
138
+ " # Keep the version header if it exists\n",
139
+ " if original_content and original_content[0].startswith(b'version:'):\n",
140
+ " new_content.append(original_content[0])\n",
141
+ " original_content = original_content[1:]\n",
142
+ " \n",
143
+ " # Keep all existing tokens except the last 1000\n",
144
+ " existing_tokens = original_content[:-1000] if len(original_content) > 1000 else []\n",
145
+ " new_content.extend(existing_tokens)\n",
146
+ " \n",
147
+ " # Add the new location tokens\n",
148
+ " for token in LOCATION_UNIQUE_TOKENS:\n",
149
+ " # Encode the token and create a rank (you might want to adjust the rank strategy)\n",
150
+ " encoded_token = token.encode('utf-8')\n",
151
+ " # Use a high rank to ensure these are at the end\n",
152
+ " new_content.append(encoded_token + b' ' + str(len(existing_tokens) + LOCATION_UNIQUE_TOKENS.index(token)).encode('utf-8') + b'\\n')\n",
153
+ " \n",
154
+ " # Save the modified tokenizer\n",
155
+ " with open(output_file, 'wb') as f:\n",
156
+ " f.writelines(new_content)\n",
157
+ " \n",
158
+ " print(f\"Modified tokenizer saved to {output_file}\")\n",
159
+ " print(f\"Total tokens in new file: {len(new_content)}\")\n",
160
+ "\n",
161
+ "# Example usage\n",
162
+ "input_tiktoken_file = './qwen.tiktoken'\n",
163
+ "output_tiktoken_file = './modified_qwen.tiktoken'\n",
164
+ "\n",
165
+ "modify_tiktoken_file(input_tiktoken_file, output_tiktoken_file)"
166
+ ]
167
+ },
168
+ {
169
+ "cell_type": "code",
170
+ "execution_count": null,
171
+ "metadata": {},
172
+ "outputs": [],
173
+ "source": [
174
+ "import base64\n",
175
+ "from typing import Dict\n",
176
+ "\n",
177
+ "def _load_tiktoken_bpe(tiktoken_bpe_file: str) -> Dict[bytes, int]:\n",
178
+ " with open(tiktoken_bpe_file, \"rb\") as f:\n",
179
+ " contents = f.read()\n",
180
+ " return {\n",
181
+ " base64.b64decode(token): int(rank)\n",
182
+ " for token, rank in (line.split() for line in contents.splitlines() if line)\n",
183
+ " }\n",
184
+ "\n",
185
+ "# Path to your .tiktoken file\n",
186
+ "tiktoken_bpe_file = \"./qwen.tiktoken\"\n",
187
+ "\n",
188
+ "# Load the BPE encoding\n",
189
+ "bpe_data = _load_tiktoken_bpe(tiktoken_bpe_file)\n",
190
+ "\n",
191
+ "# Example usage\n",
192
+ "# print(\"Loaded BPE Data:\", bpe_data)"
193
+ ]
194
+ },
195
+ {
196
+ "cell_type": "code",
197
+ "execution_count": null,
198
+ "metadata": {},
199
+ "outputs": [],
200
+ "source": [
201
+ "with open(\"./qwen.tiktoken\", 'rb') as f:\n",
202
+ " contents = f.read()\n",
203
+ "\n",
204
+ "# Parse the original vocabulary\n",
205
+ "vocab = {\n",
206
+ " base64.b64decode(token): int(rank)\n",
207
+ " for token, rank in (line.split() for line in contents.splitlines() if line)\n",
208
+ "}\n",
209
+ "\n",
210
+ "base_vocab_size = len(vocab)\n",
211
+ "print(base_vocab_size)"
212
+ ]
213
+ },
214
+ {
215
+ "cell_type": "code",
216
+ "execution_count": null,
217
+ "metadata": {},
218
+ "outputs": [],
219
+ "source": [
220
+ "location_tokens = [f\"<loc_{i}>\" for i in range(1, 1001)]\n",
221
+ "print(location_tokens)"
222
+ ]
223
+ },
224
+ {
225
+ "cell_type": "code",
226
+ "execution_count": null,
227
+ "metadata": {},
228
+ "outputs": [],
229
+ "source": [
230
+ "tokens_to_remove = sorted(vocab.items(), key=lambda x: x[1])[-1000:]\n",
231
+ "print(tokens_to_remove)"
232
+ ]
233
+ },
234
+ {
235
+ "cell_type": "code",
236
+ "execution_count": null,
237
+ "metadata": {},
238
+ "outputs": [],
239
+ "source": [
240
+ "import base64\n",
241
+ "import tiktoken\n",
242
+ "\n",
243
+ "def modify_tokenizer(input_file='./qwen.tiktoken', output_file='./modified_qwen.tiktoken'):\n",
244
+ " # Read the original tokenizer file\n",
245
+ " with open(input_file, 'rb') as f:\n",
246
+ " contents = f.read()\n",
247
+ " \n",
248
+ " # Parse the original vocabulary\n",
249
+ " vocab = {\n",
250
+ " base64.b64decode(token): int(rank)\n",
251
+ " for token, rank in (line.split() for line in contents.splitlines() if line)\n",
252
+ " }\n",
253
+ " \n",
254
+ " # Get the base vocabulary size (excluding special tokens)\n",
255
+ " base_vocab_size = len(vocab)\n",
256
+ " \n",
257
+ " # Create location tokens\n",
258
+ " location_tokens = [f\"<loc_{i}>\" for i in range(1, 1001)]\n",
259
+ " \n",
260
+ " # Remove the last 1000 tokens from the vocabulary\n",
261
+ " tokens_to_remove = sorted(vocab.items(), key=lambda x: x[1])[-1000:]\n",
262
+ " for token, _ in tokens_to_remove:\n",
263
+ " del vocab[token]\n",
264
+ " \n",
265
+ " # Add location tokens\n",
266
+ " for i, token in enumerate(location_tokens):\n",
267
+ " vocab[token.encode('utf-8')] = base_vocab_size - 1000 + i\n",
268
+ " \n",
269
+ " # Write the modified vocabulary to the new file\n",
270
+ " with open(output_file, 'w', encoding='utf-8') as f:\n",
271
+ " for token, rank in sorted(vocab.items(), key=lambda x: x[1]):\n",
272
+ " # Encode the token in base64\n",
273
+ " token_b64 = base64.b64encode(token).decode('utf-8')\n",
274
+ " f.write(f\"{token_b64} {rank}\\n\")\n",
275
+ " \n",
276
+ " print(f\"Modified tokenizer saved to {output_file}\")\n",
277
+ " print(f\"Added {len(location_tokens)} location tokens\")\n",
278
+ " print(f\"Final vocabulary size: {len(vocab)}\")\n",
279
+ "\n",
280
+ "\n",
281
+ "modify_tokenizer()"
282
+ ]
283
+ },
284
+ {
285
+ "cell_type": "code",
286
+ "execution_count": null,
287
+ "metadata": {},
288
+ "outputs": [],
289
+ "source": [
290
+ "import base64\n",
291
+ "import tiktoken\n",
292
+ "from typing import Dict\n",
293
+ "from tabulate import tabulate\n",
294
+ "\n",
295
+ "# Define special tokens from the original tokenizer\n",
296
+ "ENDOFTEXT = \"<|endoftext|>\"\n",
297
+ "IMSTART = \"<|im_start|>\"\n",
298
+ "IMEND = \"<|im_end|>\"\n",
299
+ "DOCUMENT_UNIQUE_TOKENS = tuple([\"<caption>\", \"<formula>\", \"<list>\", \"<text>\", \"<image>\", \"<title>\", \"<table>\", \"<LD>\", \"<TE>\", \"<MF>\", \"<IC>\", \"<OCR>\", \"<POCR>\", \"<VQA>\", \"<DVQA>\"])\n",
300
+ "LOCATION_UNIQUE_TOKENS = tuple([f\"<loc_{i}>\" for i in range(0, 1001)])\n",
301
+ "EXTRAS = tuple((f\"<|extra_{i}|>\" for i in range(len(DOCUMENT_UNIQUE_TOKENS), 205)))\n",
302
+ "\n",
303
+ "# Include location tokens in special tokens\n",
304
+ "SPECIAL_TOKENS = (ENDOFTEXT, IMSTART, IMEND) + DOCUMENT_UNIQUE_TOKENS + EXTRAS + LOCATION_UNIQUE_TOKENS\n",
305
+ "\n",
306
+ "def load_tiktoken_bpe(tiktoken_bpe_file: str) -> Dict[bytes, int]:\n",
307
+ " with open(tiktoken_bpe_file, \"rb\") as f:\n",
308
+ " contents = f.read()\n",
309
+ " return {\n",
310
+ " base64.b64decode(token): int(rank)\n",
311
+ " for token, rank in (line.split() for line in contents.splitlines() if line)\n",
312
+ " }\n",
313
+ "\n",
314
+ "def decode_token(token_bytes):\n",
315
+ " \"\"\"Attempt to decode bytes to string, fallback to base64 if not UTF-8 decodable\"\"\"\n",
316
+ " try:\n",
317
+ " return token_bytes.decode('utf-8')\n",
318
+ " except UnicodeDecodeError:\n",
319
+ " return f\"<bytes>{base64.b64encode(token_bytes).decode('utf-8')}\"\n",
320
+ "\n",
321
+ "def compare_tokenizers(old_file='./qwen.tiktoken', new_file='./modified_qwen.tiktoken'):\n",
322
+ " # Load both tokenizers\n",
323
+ " old_vocab = load_tiktoken_bpe(old_file)\n",
324
+ " new_vocab = load_tiktoken_bpe(new_file)\n",
325
+ " \n",
326
+ " # Create special tokens dictionary including location tokens\n",
327
+ " special_tokens = {\n",
328
+ " token: index\n",
329
+ " for index, token in enumerate(SPECIAL_TOKENS, start=len(new_vocab))\n",
330
+ " }\n",
331
+ " \n",
332
+ " # Add location tokens to the special tokens dictionary with their proper ranks\n",
333
+ " for i, token in enumerate(LOCATION_UNIQUE_TOKENS):\n",
334
+ " special_tokens[token] = len(new_vocab) - 1000 + i\n",
335
+ " \n",
336
+ " print(f\"Old vocabulary size: {len(old_vocab)}\")\n",
337
+ " print(f\"New vocabulary size: {len(new_vocab)}\")\n",
338
+ " print(f\"Difference in size: {len(new_vocab) - len(old_vocab)}\")\n",
339
+ " print(f\"Number of special tokens: {len(special_tokens)}\")\n",
340
+ " print(\"\\n\")\n",
341
+ " \n",
342
+ " # Find tokens that were removed\n",
343
+ " removed_tokens = set(old_vocab.keys()) - set(new_vocab.keys())\n",
344
+ " print(f\"Number of removed tokens: {len(removed_tokens)}\")\n",
345
+ " \n",
346
+ " # Find new tokens that were added\n",
347
+ " added_tokens = set(new_vocab.keys()) - set(old_vocab.keys())\n",
348
+ " print(f\"Number of added tokens: {len(added_tokens)}\")\n",
349
+ " print(\"\\n\")\n",
350
+ " \n",
351
+ " # Create comparison tables\n",
352
+ " print(\"Sample of removed tokens (last 10):\")\n",
353
+ " removed_data = []\n",
354
+ " for token in sorted(removed_tokens, key=lambda x: old_vocab[x])[-10:]:\n",
355
+ " removed_data.append([\n",
356
+ " decode_token(token),\n",
357
+ " old_vocab[token]\n",
358
+ " ])\n",
359
+ " print(tabulate(removed_data, headers=['Token', 'Rank'], tablefmt='grid'))\n",
360
+ " print(\"\\n\")\n",
361
+ " \n",
362
+ " print(\"Sample of added tokens (first 10):\")\n",
363
+ " added_data = []\n",
364
+ " for token in sorted(added_tokens, key=lambda x: new_vocab[x])[:10]:\n",
365
+ " added_data.append([\n",
366
+ " decode_token(token),\n",
367
+ " new_vocab[token]\n",
368
+ " ])\n",
369
+ " print(tabulate(added_data, headers=['Token', 'Rank'], tablefmt='grid'))\n",
370
+ " \n",
371
+ " # Test tokenization of a sample text\n",
372
+ " print(\"\\nTokenization comparison for sample text:\")\n",
373
+ " # sample_text = \"This is a test sentence with location markers <loc_1> and <loc_999>\"\n",
374
+ " sample_text = layout_generation_exmaple\n",
375
+ " \n",
376
+ " # Create encodings for both tokenizers\n",
377
+ " old_enc = tiktoken.Encoding(\n",
378
+ " \"old_qwen\",\n",
379
+ " pat_str=r\"\"\"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+\"\"\",\n",
380
+ " mergeable_ranks=old_vocab,\n",
381
+ " special_tokens=special_tokens\n",
382
+ " )\n",
383
+ " \n",
384
+ " new_enc = tiktoken.Encoding(\n",
385
+ " \"new_qwen\",\n",
386
+ " pat_str=r\"\"\"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+\"\"\",\n",
387
+ " mergeable_ranks=new_vocab,\n",
388
+ " special_tokens=special_tokens\n",
389
+ " )\n",
390
+ " \n",
391
+ " try:\n",
392
+ " old_tokens = old_enc.encode(sample_text , allowed_special=\"all\")\n",
393
+ " print(\"\\nOld tokenizer:\")\n",
394
+ " print(f\"Token IDs: {old_tokens}\")\n",
395
+ " print(f\"Decoded: {old_enc.decode(old_tokens)}\")\n",
396
+ " except Exception as e:\n",
397
+ " print(\"\\nError with old tokenizer:\", str(e))\n",
398
+ " \n",
399
+ " try:\n",
400
+ " new_tokens = new_enc.encode(sample_text , allowed_special=\"all\")\n",
401
+ " print(\"\\nNew tokenizer:\")\n",
402
+ " print(f\"Token IDs: {new_tokens}\")\n",
403
+ " print(f\"Decoded: {new_enc.decode(new_tokens)}\")\n",
404
+ " except Exception as e:\n",
405
+ " print(\"\\nError with new tokenizer:\", str(e))\n",
406
+ "\n",
407
+ "# if __name__ == \"__main__\":\n",
408
+ "compare_tokenizers()"
409
+ ]
410
+ },
411
+ {
412
+ "cell_type": "code",
413
+ "execution_count": 4,
414
+ "metadata": {},
415
+ "outputs": [
416
+ {
417
+ "name": "stdout",
418
+ "output_type": "stream",
419
+ "text": [
420
+ "Original vocab size: 151643\n",
421
+ "Truncated vocab size: 150643\n",
422
+ "Modified tokenizer saved to ./modified_qwen_sub.tiktoken\n",
423
+ "Added 1000 location tokens\n"
424
+ ]
425
+ }
426
+ ],
427
+ "source": [
428
+ "import base64\n",
429
+ "\n",
430
+ "def modify_qwen_tokenizer_small(input_file='./qwen.tiktoken', output_file='./modified_qwen_sub.tiktoken'):\n",
431
+ " # Read the original tokenizer file\n",
432
+ " with open(input_file, 'rb') as f:\n",
433
+ " contents = f.read()\n",
434
+ " \n",
435
+ " # Parse the original vocabulary\n",
436
+ " vocab = {\n",
437
+ " base64.b64decode(token): int(rank)\n",
438
+ " for token, rank in (line.split() for line in contents.splitlines() if line)\n",
439
+ " }\n",
440
+ " \n",
441
+ " # Sort tokens by rank and remove last 1001 tokens\n",
442
+ " sorted_tokens = sorted(vocab.items(), key=lambda x: x[1])\n",
443
+ " truncated_vocab = dict(sorted_tokens[:-1000])\n",
444
+ " base_vocab_size = len(truncated_vocab)\n",
445
+ " \n",
446
+ " print(f\"Original vocab size: {len(vocab)}\")\n",
447
+ " print(f\"Truncated vocab size: {len(truncated_vocab)}\")\n",
448
+ " \n",
449
+ " # Write the modified vocabulary with location tokens first\n",
450
+ " with open(output_file, 'w', encoding='utf-8') as f:\n",
451
+ " # First write the truncated base vocabulary\n",
452
+ " for token, rank in sorted(truncated_vocab.items(), key=lambda x: x[1]):\n",
453
+ " token_b64 = base64.b64encode(token).decode('utf-8')\n",
454
+ " f.write(f\"{token_b64} {rank}\\n\")\n",
455
+ " \n",
456
+ " print(f\"Modified tokenizer saved to {output_file}\")\n",
457
+ " print(f\"Added {1000} location tokens\")\n",
458
+ "\n",
459
+ "# if __name__ == \"__main__\":\n",
460
+ "modify_qwen_tokenizer_small()"
461
+ ]
462
+ },
463
+ {
464
+ "cell_type": "code",
465
+ "execution_count": 9,
466
+ "metadata": {},
467
+ "outputs": [
468
+ {
469
+ "name": "stdout",
470
+ "output_type": "stream",
471
+ "text": [
472
+ "Comparison saved to tokenizer_comparison.txt\n",
473
+ "\n",
474
+ "Statistics:\n",
475
+ "Old tokenizer size: 151860\n",
476
+ "New tokenizer size: 151860\n",
477
+ "\n",
478
+ "Sample of differences:\n",
479
+ "Token 150643: '∉' -> '<loc_0>'\n",
480
+ "Token 150644: '∊' -> '<loc_1>'\n",
481
+ "Token 150645: '∖' -> '<loc_2>'\n",
482
+ "Token 150646: '∜' -> '<loc_3>'\n",
483
+ "Token 150647: '∾' -> '<loc_4>'\n",
484
+ "Token 150648: '≀' -> '<loc_5>'\n",
485
+ "Token 150649: '≋' -> '<loc_6>'\n",
486
+ "Token 150650: '≌' -> '<loc_7>'\n",
487
+ "Token 150651: '≓' -> '<loc_8>'\n",
488
+ "Token 150652: '≜' -> '<loc_9>'\n"
489
+ ]
490
+ }
491
+ ],
492
+ "source": [
493
+ "from tokenization_qwen import QWenTokenizer\n",
494
+ "from tabulate import tabulate\n",
495
+ "\n",
496
+ "def compare_tokenizers(old_file=\"./qwen.tiktoken\", new_file=\"./modified_qwen_sub.tiktoken\", output_file=\"tokenizer_comparison.txt\"):\n",
497
+ " # Initialize both tokenizers\n",
498
+ " old_tokenizer = QWenTokenizer(vocab_file=old_file)\n",
499
+ " new_tokenizer = QWenTokenizer_SUB(vocab_file=new_file)\n",
500
+ " \n",
501
+ " # Get vocabulary size\n",
502
+ " vocab_size = max(len(old_tokenizer), len(new_tokenizer))\n",
503
+ " \n",
504
+ " # Prepare comparison data\n",
505
+ " comparison_data = []\n",
506
+ " for token_id in range(vocab_size):\n",
507
+ " try:\n",
508
+ " old_token = old_tokenizer.tokenizer.decode([token_id])\n",
509
+ " except:\n",
510
+ " old_token = \"N/A\"\n",
511
+ " \n",
512
+ " try:\n",
513
+ " new_token = new_tokenizer.tokenizer.decode([token_id])\n",
514
+ " except:\n",
515
+ " new_token = \"N/A\"\n",
516
+ " \n",
517
+ " comparison_data.append([token_id, old_token, new_token])\n",
518
+ " \n",
519
+ " # just write the data to a file\n",
520
+ " with open(output_file, 'w', encoding='utf-8') as f:\n",
521
+ " for row in comparison_data:\n",
522
+ " f.write(f\"{row[0]},{row[1]},{row[2]}\\n\")\n",
523
+ " \n",
524
+ "\n",
525
+ " print(f\"Comparison saved to {output_file}\")\n",
526
+ " \n",
527
+ " # Print some statistics\n",
528
+ " print(f\"\\nStatistics:\")\n",
529
+ " print(f\"Old tokenizer size: {len(old_tokenizer)}\")\n",
530
+ " print(f\"New tokenizer size: {len(new_tokenizer)}\")\n",
531
+ " \n",
532
+ " # Print sample of differences\n",
533
+ " print(\"\\nSample of differences:\")\n",
534
+ " differences = [(i, old, new) for i, old, new in comparison_data if old != new]\n",
535
+ " for i, (token_id, old, new) in enumerate(differences[:10]):\n",
536
+ " print(f\"Token {token_id}: '{old}' -> '{new}'\")\n",
537
+ "\n",
538
+ "# Run the comparison\n",
539
+ "compare_tokenizers()"
540
+ ]
541
+ },
542
+ {
543
+ "cell_type": "code",
544
+ "execution_count": null,
545
+ "metadata": {},
546
+ "outputs": [],
547
+ "source": []
548
+ }
549
+ ],
550
+ "metadata": {
551
+ "kernelspec": {
552
+ "display_name": "base",
553
+ "language": "python",
554
+ "name": "python3"
555
+ },
556
+ "language_info": {
557
+ "codemirror_mode": {
558
+ "name": "ipython",
559
+ "version": 3
560
+ },
561
+ "file_extension": ".py",
562
+ "mimetype": "text/x-python",
563
+ "name": "python",
564
+ "nbconvert_exporter": "python",
565
+ "pygments_lexer": "ipython3",
566
+ "version": "3.11.9"
567
+ }
568
+ },
569
+ "nbformat": 4,
570
+ "nbformat_minor": 2
571
+ }
tokenization_qwen.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ """Tokenization classes for QWen."""
7
+
8
+ import base64
9
+ import logging
10
+ import os
11
+ import unicodedata
12
+ from typing import Collection, Dict, List, Set, Tuple, Union
13
+
14
+ import tiktoken
15
+ from transformers import PreTrainedTokenizer, AddedToken
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ VOCAB_FILES_NAMES = {"vocab_file": "qwen.tiktoken"}
21
+
22
+ PAT_STR = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
23
+ ENDOFTEXT = "<|endoftext|>"
24
+ IMSTART = "<|im_start|>"
25
+ IMEND = "<|im_end|>"
26
+ # as the default behavior is changed to allow special tokens in
27
+ # regular texts, the surface forms of special tokens need to be
28
+ # as different as possible to minimize the impact
29
+ # EXTRAS = tuple((f"<|extra_{i}|>" for i in range(205)))
30
+ # 8 unique categories
31
+ # caption , text , formula , list , text , image , title , table
32
+
33
+ # layout_deteciton <LD>
34
+ # table_extraction <TE>
35
+ # math_formula_detection <MF>
36
+ # image_captioning <IC>
37
+ # ocr <OCR>
38
+ # page_ocr <POCR>
39
+ # visual_question_answering <VQA>
40
+ # document_visual_question_answering <DVQA>
41
+
42
+ DOCUMENT_UNIQUE_TOKENS = tuple(["<caption>" , "<formula>" , "<list>" , "<text>" , "<image>" , "<title>" , "<table>" , "<LD>", "<TE>", "<MF>", "<IC>", "<OCR>" , "<POCR>" , "<VQA>" , "<DVQA>"])
43
+
44
+ EXTRAS = tuple((f"<|extra_{i}|>" for i in range(len(DOCUMENT_UNIQUE_TOKENS) , 205)))
45
+
46
+ LOCATION_UNIQUE_TOKENS = tuple((f"<loc_{i}>" for i in range(1,1001)))
47
+
48
+
49
+ SPECIAL_TOKENS = LOCATION_UNIQUE_TOKENS + (
50
+ ENDOFTEXT,
51
+ IMSTART,
52
+ IMEND,
53
+ ) + DOCUMENT_UNIQUE_TOKENS + EXTRAS
54
+
55
+
56
+ def _load_tiktoken_bpe(tiktoken_bpe_file: str) -> Dict[bytes, int]:
57
+ with open(tiktoken_bpe_file, "rb") as f:
58
+ contents = f.read()
59
+ return {
60
+ base64.b64decode(token): int(rank)
61
+ for token, rank in (line.split() for line in contents.splitlines() if line)
62
+ }
63
+
64
+ class QWenTokenizer(PreTrainedTokenizer):
65
+ """QWen tokenizer."""
66
+
67
+ vocab_files_names = VOCAB_FILES_NAMES
68
+
69
+ def __init__(
70
+ self,
71
+ vocab_file,
72
+ errors="replace",
73
+ image_start_tag='<img>',
74
+ image_end_tag='</img>',
75
+ image_pad_tag='<imgpad>',
76
+ ref_start_tag='<ref>',
77
+ ref_end_tag='</ref>',
78
+ box_start_tag='<box>',
79
+ box_end_tag='</box>',
80
+ quad_start_tag='<quad>',
81
+ quad_end_tag='</quad>',
82
+ **kwargs,
83
+ ):
84
+ super().__init__(**kwargs)
85
+
86
+ self.image_start_tag = image_start_tag
87
+ self.image_end_tag = image_end_tag
88
+ self.image_pad_tag = image_pad_tag
89
+ self.ref_start_tag = ref_start_tag
90
+ self.ref_end_tag = ref_end_tag
91
+ self.box_start_tag = box_start_tag
92
+ self.box_end_tag = box_end_tag
93
+ self.quad_start_tag = quad_start_tag
94
+ self.quad_end_tag = quad_end_tag
95
+ self.IMAGE_ST = (
96
+ ref_start_tag, ref_end_tag,
97
+ box_start_tag, box_end_tag,
98
+ quad_start_tag, quad_end_tag,
99
+ image_start_tag, image_end_tag,
100
+ image_pad_tag
101
+ )
102
+
103
+ self.errors = errors # how to handle errors in decoding
104
+
105
+ self.mergeable_ranks = _load_tiktoken_bpe(vocab_file) # type: dict[bytes, int]
106
+ self.special_tokens = {
107
+ token: index
108
+ for index, token in enumerate(
109
+ SPECIAL_TOKENS + self.IMAGE_ST, start=len(self.mergeable_ranks)
110
+ )
111
+ }
112
+
113
+ self.img_start_id = self.special_tokens[self.image_start_tag]
114
+ self.img_end_id = self.special_tokens[self.image_end_tag]
115
+ self.img_pad_id = self.special_tokens[self.image_pad_tag]
116
+ self.ref_start_id = self.special_tokens[self.ref_start_tag]
117
+ self.ref_end_id = self.special_tokens[self.ref_end_tag]
118
+ self.box_start_id = self.special_tokens[self.box_start_tag]
119
+ self.box_end_id = self.special_tokens[self.box_end_tag]
120
+ self.quad_start_id = self.special_tokens[self.quad_start_tag]
121
+ self.quad_end_id = self.special_tokens[self.quad_end_tag]
122
+
123
+ enc = tiktoken.Encoding(
124
+ "Qwen",
125
+ pat_str=PAT_STR,
126
+ mergeable_ranks=self.mergeable_ranks,
127
+ special_tokens=self.special_tokens,
128
+ )
129
+ assert (
130
+ len(self.mergeable_ranks) + len(self.special_tokens) == enc.n_vocab
131
+ ), f"{len(self.mergeable_ranks) + len(self.special_tokens)} != {enc.n_vocab} in encoding"
132
+
133
+ self.decoder = {
134
+ v: k for k, v in self.mergeable_ranks.items()
135
+ } # type: dict[int, bytes|str]
136
+ self.decoder.update({v: k for k, v in self.special_tokens.items()})
137
+
138
+ self.tokenizer = enc # type: tiktoken.Encoding
139
+
140
+ self.eod_id = self.tokenizer.eot_token
141
+ self.im_start_id = self.special_tokens[IMSTART]
142
+ self.im_end_id = self.special_tokens[IMEND]
143
+
144
+ def __len__(self) -> int:
145
+ return self.tokenizer.n_vocab
146
+
147
+ def get_vocab(self) -> Dict[bytes, int]:
148
+ return self.mergeable_ranks
149
+
150
+ def convert_tokens_to_ids(
151
+ self, tokens: Union[bytes, str, List[Union[bytes, str]]]
152
+ ) -> List[int]:
153
+ ids = []
154
+ if isinstance(tokens, (str, bytes)):
155
+ if tokens in self.special_tokens:
156
+ return self.special_tokens[tokens]
157
+ else:
158
+ return self.mergeable_ranks.get(tokens)
159
+ for token in tokens:
160
+ if token in self.special_tokens:
161
+ ids.append(self.special_tokens[token])
162
+ else:
163
+ ids.append(self.mergeable_ranks.get(token))
164
+ return ids
165
+
166
+ def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
167
+ if not special_tokens and new_tokens:
168
+ raise ValueError('Adding regular tokens is not supported')
169
+ for token in new_tokens:
170
+ surface_form = token.content if isinstance(token, AddedToken) else token
171
+ if surface_form not in SPECIAL_TOKENS:
172
+ raise ValueError('Adding unknown special tokens is not supported')
173
+ return 0
174
+
175
+ def save_vocabulary(self, save_directory: str, **kwargs) -> Tuple[str]:
176
+ """
177
+ Save only the vocabulary of the tokenizer (vocabulary).
178
+
179
+ Returns:
180
+ `Tuple(str)`: Paths to the files saved.
181
+ """
182
+ file_path = os.path.join(save_directory, "qwen.tiktoken")
183
+ with open(file_path, "w", encoding="utf8") as w:
184
+ for k, v in self.mergeable_ranks.items():
185
+ line = base64.b64encode(k).decode("utf8") + " " + str(v) + "\n"
186
+ w.write(line)
187
+ return (file_path,)
188
+
189
+ def tokenize(
190
+ self,
191
+ text: str,
192
+ allowed_special: Union[Set, str] = "all",
193
+ disallowed_special: Union[Collection, str] = (),
194
+ **kwargs,
195
+ ) -> List[Union[bytes, str]]:
196
+ """
197
+ Converts a string in a sequence of tokens.
198
+
199
+ Args:
200
+ text (`str`):
201
+ The sequence to be encoded.
202
+ allowed_special (`Literal["all"]` or `set`):
203
+ The surface forms of the tokens to be encoded as special tokens in regular texts.
204
+ Default to "all".
205
+ disallowed_special (`Literal["all"]` or `Collection`):
206
+ The surface forms of the tokens that should not be in regular texts and trigger errors.
207
+ Default to an empty tuple.
208
+
209
+ kwargs (additional keyword arguments, *optional*):
210
+ Will be passed to the underlying model specific encode method.
211
+
212
+ Returns:
213
+ `List[bytes|str]`: The list of tokens.
214
+ """
215
+ tokens = []
216
+ text = unicodedata.normalize("NFC", text)
217
+
218
+ # this implementation takes a detour: text -> token id -> token surface forms
219
+ for t in self.tokenizer.encode(
220
+ text, allowed_special=allowed_special, disallowed_special=disallowed_special
221
+ ):
222
+ tokens.append(self.decoder[t])
223
+ return tokens
224
+
225
+ def convert_tokens_to_string(self, tokens: List[Union[bytes, str]]) -> str:
226
+ """
227
+ Converts a sequence of tokens in a single string.
228
+ """
229
+ text = ""
230
+ temp = b""
231
+ for t in tokens:
232
+ if isinstance(t, str):
233
+ if temp:
234
+ text += temp.decode("utf-8", errors=self.errors)
235
+ temp = b""
236
+ text += t
237
+ elif isinstance(t, bytes):
238
+ temp += t
239
+ else:
240
+ raise TypeError("token should only be of type types or str")
241
+ if temp:
242
+ text += temp.decode("utf-8", errors=self.errors)
243
+ return text
244
+
245
+ @property
246
+ def vocab_size(self):
247
+ return self.tokenizer.n_vocab
248
+
249
+ def _convert_id_to_token(self, index: int) -> Union[bytes, str]:
250
+ """Converts an id to a token, special tokens included"""
251
+ if index in self.decoder:
252
+ return self.decoder[index]
253
+ raise ValueError("unknown ids")
254
+
255
+ def _convert_token_to_id(self, token: Union[bytes, str]) -> int:
256
+ """Converts a token to an id using the vocab, special tokens included"""
257
+ if token in self.special_tokens:
258
+ return self.special_tokens[token]
259
+ if token in self.mergeable_ranks:
260
+ return self.mergeable_ranks[token]
261
+ raise ValueError("unknown token")
262
+
263
+ def _tokenize(self, text: str, **kwargs):
264
+ """
265
+ Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
266
+ vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
267
+
268
+ Do NOT take care of added tokens.
269
+ """
270
+ raise NotImplementedError
271
+
272
+ def _decode(
273
+ self,
274
+ token_ids: Union[int, List[int]],
275
+ skip_special_tokens: bool = False,
276
+ errors: str = None,
277
+ **kwargs,
278
+ ) -> str:
279
+ if isinstance(token_ids, int):
280
+ token_ids = [token_ids]
281
+ if skip_special_tokens:
282
+ token_ids = [i for i in token_ids if i < self.eod_id]
283
+ return self.tokenizer.decode(token_ids, errors=errors or self.errors)
tokenization_qwen_original.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ """Tokenization classes for QWen."""
7
+
8
+ import base64
9
+ import logging
10
+ import os
11
+ import unicodedata
12
+ from typing import Collection, Dict, List, Set, Tuple, Union
13
+
14
+ import tiktoken
15
+ from transformers import PreTrainedTokenizer, AddedToken
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ VOCAB_FILES_NAMES = {"vocab_file": "qwen.tiktoken"}
21
+
22
+ PAT_STR = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
23
+ ENDOFTEXT = "<|endoftext|>"
24
+ IMSTART = "<|im_start|>"
25
+ IMEND = "<|im_end|>"
26
+ # as the default behavior is changed to allow special tokens in
27
+ # regular texts, the surface forms of special tokens need to be
28
+ # as different as possible to minimize the impact
29
+ # EXTRAS = tuple((f"<|extra_{i}|>" for i in range(205)))
30
+ # 8 unique categories
31
+ # caption , text , formula , list , text , image , title , table
32
+
33
+ # layout_deteciton <LD>
34
+ # table_extraction <TE>
35
+ # math_formula_detection <MF>
36
+ # image_captioning <IC>
37
+ # ocr <OCR>
38
+ # page_ocr <POCR>
39
+ # visual_question_answering <VQA>
40
+ # document_visual_question_answering <DVQA>
41
+
42
+ DOCUMENT_UNIQUE_TOKENS = tuple(["<caption>" , "<formula>" , "<list>" , "<text>" , "<image>" , "<title>" , "<table>" , "<LD>", "<TE>", "<MF>", "<IC>", "<OCR>" , "<POCR>" , "<VQA>" , "<DVQA>"])
43
+
44
+ EXTRAS = tuple((f"<|extra_{i}|>" for i in range(len(DOCUMENT_UNIQUE_TOKENS) , 205)))
45
+
46
+ LOCATION_UNIQUE_TOKENS = tuple((f"<loc_{i}>" for i in range(0,1000)))
47
+
48
+
49
+ SPECIAL_TOKENS = (
50
+ ENDOFTEXT,
51
+ IMSTART,
52
+ IMEND,
53
+ ) + DOCUMENT_UNIQUE_TOKENS + EXTRAS
54
+
55
+
56
+ def _load_tiktoken_bpe(tiktoken_bpe_file: str) -> Dict[bytes, int]:
57
+ with open(tiktoken_bpe_file, "rb") as f:
58
+ contents = f.read()
59
+ return {
60
+ base64.b64decode(token): int(rank)
61
+ for token, rank in (line.split() for line in contents.splitlines() if line)
62
+ }
63
+
64
+ class QWenTokenizer(PreTrainedTokenizer):
65
+ """QWen tokenizer."""
66
+
67
+ vocab_files_names = VOCAB_FILES_NAMES
68
+
69
+ def __init__(
70
+ self,
71
+ vocab_file,
72
+ errors="replace",
73
+ image_start_tag='<img>',
74
+ image_end_tag='</img>',
75
+ image_pad_tag='<imgpad>',
76
+ ref_start_tag='<ref>',
77
+ ref_end_tag='</ref>',
78
+ box_start_tag='<box>',
79
+ box_end_tag='</box>',
80
+ quad_start_tag='<quad>',
81
+ quad_end_tag='</quad>',
82
+ **kwargs,
83
+ ):
84
+ super().__init__(**kwargs)
85
+
86
+ self.image_start_tag = image_start_tag
87
+ self.image_end_tag = image_end_tag
88
+ self.image_pad_tag = image_pad_tag
89
+ self.ref_start_tag = ref_start_tag
90
+ self.ref_end_tag = ref_end_tag
91
+ self.box_start_tag = box_start_tag
92
+ self.box_end_tag = box_end_tag
93
+ self.quad_start_tag = quad_start_tag
94
+ self.quad_end_tag = quad_end_tag
95
+ self.IMAGE_ST = (
96
+ ref_start_tag, ref_end_tag,
97
+ box_start_tag, box_end_tag,
98
+ quad_start_tag, quad_end_tag,
99
+ image_start_tag, image_end_tag,
100
+ image_pad_tag
101
+ )
102
+
103
+ self.errors = errors # how to handle errors in decoding
104
+
105
+ self.mergeable_ranks = _load_tiktoken_bpe(vocab_file) # type: dict[bytes, int]
106
+ self.special_tokens = {
107
+ token: index
108
+ for index, token in enumerate(
109
+ SPECIAL_TOKENS + self.IMAGE_ST, start=len(self.mergeable_ranks)
110
+ )
111
+ }
112
+
113
+ self.img_start_id = self.special_tokens[self.image_start_tag]
114
+ self.img_end_id = self.special_tokens[self.image_end_tag]
115
+ self.img_pad_id = self.special_tokens[self.image_pad_tag]
116
+ self.ref_start_id = self.special_tokens[self.ref_start_tag]
117
+ self.ref_end_id = self.special_tokens[self.ref_end_tag]
118
+ self.box_start_id = self.special_tokens[self.box_start_tag]
119
+ self.box_end_id = self.special_tokens[self.box_end_tag]
120
+ self.quad_start_id = self.special_tokens[self.quad_start_tag]
121
+ self.quad_end_id = self.special_tokens[self.quad_end_tag]
122
+
123
+ enc = tiktoken.Encoding(
124
+ "Qwen",
125
+ pat_str=PAT_STR,
126
+ mergeable_ranks=self.mergeable_ranks,
127
+ special_tokens=self.special_tokens,
128
+ )
129
+ assert (
130
+ len(self.mergeable_ranks) + len(self.special_tokens) == enc.n_vocab
131
+ ), f"{len(self.mergeable_ranks) + len(self.special_tokens)} != {enc.n_vocab} in encoding"
132
+
133
+ self.decoder = {
134
+ v: k for k, v in self.mergeable_ranks.items()
135
+ } # type: dict[int, bytes|str]
136
+ self.decoder.update({v: k for k, v in self.special_tokens.items()})
137
+
138
+ self.tokenizer = enc # type: tiktoken.Encoding
139
+
140
+ self.eod_id = self.tokenizer.eot_token
141
+ self.im_start_id = self.special_tokens[IMSTART]
142
+ self.im_end_id = self.special_tokens[IMEND]
143
+
144
+ def __len__(self) -> int:
145
+ return self.tokenizer.n_vocab
146
+
147
+ def get_vocab(self) -> Dict[bytes, int]:
148
+ return self.mergeable_ranks
149
+
150
+ def convert_tokens_to_ids(
151
+ self, tokens: Union[bytes, str, List[Union[bytes, str]]]
152
+ ) -> List[int]:
153
+ ids = []
154
+ if isinstance(tokens, (str, bytes)):
155
+ if tokens in self.special_tokens:
156
+ return self.special_tokens[tokens]
157
+ else:
158
+ return self.mergeable_ranks.get(tokens)
159
+ for token in tokens:
160
+ if token in self.special_tokens:
161
+ ids.append(self.special_tokens[token])
162
+ else:
163
+ ids.append(self.mergeable_ranks.get(token))
164
+ return ids
165
+
166
+ def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
167
+ if not special_tokens and new_tokens:
168
+ raise ValueError('Adding regular tokens is not supported')
169
+ for token in new_tokens:
170
+ surface_form = token.content if isinstance(token, AddedToken) else token
171
+ if surface_form not in SPECIAL_TOKENS:
172
+ raise ValueError('Adding unknown special tokens is not supported')
173
+ return 0
174
+
175
+ def save_vocabulary(self, save_directory: str, **kwargs) -> Tuple[str]:
176
+ """
177
+ Save only the vocabulary of the tokenizer (vocabulary).
178
+
179
+ Returns:
180
+ `Tuple(str)`: Paths to the files saved.
181
+ """
182
+ file_path = os.path.join(save_directory, "qwen.tiktoken")
183
+ with open(file_path, "w", encoding="utf8") as w:
184
+ for k, v in self.mergeable_ranks.items():
185
+ line = base64.b64encode(k).decode("utf8") + " " + str(v) + "\n"
186
+ w.write(line)
187
+ return (file_path,)
188
+
189
+ def tokenize(
190
+ self,
191
+ text: str,
192
+ allowed_special: Union[Set, str] = "all",
193
+ disallowed_special: Union[Collection, str] = (),
194
+ **kwargs,
195
+ ) -> List[Union[bytes, str]]:
196
+ """
197
+ Converts a string in a sequence of tokens.
198
+
199
+ Args:
200
+ text (`str`):
201
+ The sequence to be encoded.
202
+ allowed_special (`Literal["all"]` or `set`):
203
+ The surface forms of the tokens to be encoded as special tokens in regular texts.
204
+ Default to "all".
205
+ disallowed_special (`Literal["all"]` or `Collection`):
206
+ The surface forms of the tokens that should not be in regular texts and trigger errors.
207
+ Default to an empty tuple.
208
+
209
+ kwargs (additional keyword arguments, *optional*):
210
+ Will be passed to the underlying model specific encode method.
211
+
212
+ Returns:
213
+ `List[bytes|str]`: The list of tokens.
214
+ """
215
+ tokens = []
216
+ text = unicodedata.normalize("NFC", text)
217
+
218
+ # this implementation takes a detour: text -> token id -> token surface forms
219
+ for t in self.tokenizer.encode(
220
+ text, allowed_special=allowed_special, disallowed_special=disallowed_special
221
+ ):
222
+ tokens.append(self.decoder[t])
223
+ return tokens
224
+
225
+ def convert_tokens_to_string(self, tokens: List[Union[bytes, str]]) -> str:
226
+ """
227
+ Converts a sequence of tokens in a single string.
228
+ """
229
+ text = ""
230
+ temp = b""
231
+ for t in tokens:
232
+ if isinstance(t, str):
233
+ if temp:
234
+ text += temp.decode("utf-8", errors=self.errors)
235
+ temp = b""
236
+ text += t
237
+ elif isinstance(t, bytes):
238
+ temp += t
239
+ else:
240
+ raise TypeError("token should only be of type types or str")
241
+ if temp:
242
+ text += temp.decode("utf-8", errors=self.errors)
243
+ return text
244
+
245
+ @property
246
+ def vocab_size(self):
247
+ return self.tokenizer.n_vocab
248
+
249
+ def _convert_id_to_token(self, index: int) -> Union[bytes, str]:
250
+ """Converts an id to a token, special tokens included"""
251
+ if index in self.decoder:
252
+ return self.decoder[index]
253
+ raise ValueError("unknown ids")
254
+
255
+ def _convert_token_to_id(self, token: Union[bytes, str]) -> int:
256
+ """Converts a token to an id using the vocab, special tokens included"""
257
+ if token in self.special_tokens:
258
+ return self.special_tokens[token]
259
+ if token in self.mergeable_ranks:
260
+ return self.mergeable_ranks[token]
261
+ raise ValueError("unknown token")
262
+
263
+ def _tokenize(self, text: str, **kwargs):
264
+ """
265
+ Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
266
+ vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
267
+
268
+ Do NOT take care of added tokens.
269
+ """
270
+ raise NotImplementedError
271
+
272
+ def _decode(
273
+ self,
274
+ token_ids: Union[int, List[int]],
275
+ skip_special_tokens: bool = False,
276
+ errors: str = None,
277
+ **kwargs,
278
+ ) -> str:
279
+ if isinstance(token_ids, int):
280
+ token_ids = [token_ids]
281
+ if skip_special_tokens:
282
+ token_ids = [i for i in token_ids if i < self.eod_id]
283
+ return self.tokenizer.decode(token_ids, errors=errors or self.errors)
tokenizer_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {},
3
+ "auto_map": {
4
+ "AutoTokenizer": [
5
+ "tokenization_qwen.QWenTokenizer",
6
+ null
7
+ ]
8
+ },
9
+ "clean_up_tokenization_spaces": true,
10
+ "model_max_length": 8000,
11
+ "pad_token": "<|endoftext|>",
12
+ "padding_side": "right",
13
+ "tokenizer_class": "QWenTokenizer"
14
+ }