linzheng commited on
Commit
9ae9789
·
verified ·
1 Parent(s): cdac6db

Upload processor

Browse files
preprocessor_config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "auto_map": {
3
- "AutoImageProcessor": "image_processing_evabyte.EvaByteImageProcessor"
 
4
  },
5
  "do_convert_rgb": true,
6
  "do_resize": true,
@@ -9,6 +10,7 @@
9
  "jpeg_restart_marker_blocks": 1,
10
  "jpeg_streamtype": 2,
11
  "jpeg_subsampling": "4:2:0",
 
12
  "resample": 1,
13
  "size": {
14
  "longest_edge": 384
 
1
  {
2
  "auto_map": {
3
+ "AutoImageProcessor": "image_processing_evabyte.EvaByteImageProcessor",
4
+ "AutoProcessor": "processing_evabyte.EvaByteProcessor"
5
  },
6
  "do_convert_rgb": true,
7
  "do_resize": true,
 
10
  "jpeg_restart_marker_blocks": 1,
11
  "jpeg_streamtype": 2,
12
  "jpeg_subsampling": "4:2:0",
13
+ "processor_class": "EvaByteProcessor",
14
  "resample": 1,
15
  "size": {
16
  "longest_edge": 384
processing_evabyte.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ """
3
+ Processor class for EvaByte.
4
+ """
5
+ import base64
6
+ from io import BytesIO
7
+
8
+ import requests
9
+ import os
10
+ import PIL
11
+ from PIL import Image
12
+
13
+ from typing import List, Optional, Union
14
+
15
+ from transformers.feature_extraction_utils import BatchFeature
16
+ from transformers.image_utils import ImageInput, is_valid_image
17
+ from transformers.processing_utils import ProcessorMixin
18
+ from transformers.tokenization_utils_base import PreTokenizedInput, TextInput
19
+ from transformers.utils import TensorType, to_py_obj
20
+
21
+ def fetch_image(image: Union[str, "PIL.Image.Image"]) -> Image.Image:
22
+ image_obj = None
23
+ if isinstance(image, Image.Image):
24
+ image_obj = image
25
+ elif image.startswith("http://") or image.startswith("https://"):
26
+ image_obj = Image.open(BytesIO(requests.get(image, timeout=None).content))
27
+ elif os.path.isfile(image):
28
+ image_obj = Image.open(image)
29
+ elif image.startswith("data:image/"):
30
+ image = image.split(",")[1]
31
+ # Try to load as base64
32
+ try:
33
+ b64 = base64.decodebytes(image.encode())
34
+ image = PIL.Image.open(BytesIO(b64))
35
+ except Exception as e:
36
+ raise ValueError(
37
+ f"Incorrect image source. Must be a valid URL starting with `http://` or `https://`, a valid path to an image file, or a base64 encoded string. Got {image}. Failed with {e}"
38
+ )
39
+ else:
40
+ image_obj = Image.open(image)
41
+ if image_obj is None:
42
+ raise ValueError(f"Unrecognized image input, support local path, http url, base64 and PIL.Image, got {image}")
43
+
44
+ return image_obj
45
+
46
+ def is_url(val) -> bool:
47
+ return isinstance(val, str) and val.startswith("http")
48
+
49
+ def is_file(val) -> bool:
50
+ return isinstance(val, str) and os.path.isfile(val)
51
+
52
+ def is_image_or_image_url(elem):
53
+ return is_url(elem) or is_valid_image(elem) or is_file(elem)
54
+
55
+ vl_chat_template = """
56
+ {{- bos_token }}
57
+ {%- if messages[0]['role'] == 'system' %}
58
+ {%- set system_message = messages[0]['content'] %}
59
+ {%- set messages = messages[1:] %}
60
+ {%- else %}
61
+ {%- set system_message = "" %}
62
+ {%- endif %}
63
+
64
+ {{- '<|start_header_id|>system<|end_header_id|>\n\n' + system_message + '<|eot_id|>'}}
65
+
66
+ {%- for message in messages %}
67
+ {%- if (message['role'] != 'user') and (message['role'] != 'assistant') %}
68
+ {{- raise_exception('Conversation roles must be user or assistant') }}
69
+ {%- endif %}
70
+
71
+ {%- if message['content'] is string %}
72
+ {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] + '<|eot_id|>' }}
73
+ {%- else %}
74
+ {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' }}
75
+ {%- for content in message['content'] %}
76
+ {%- if content['type'] == 'image' %}
77
+ {{- '<image_placeholder>\n' }}
78
+ {%- elif content['type'] == 'text' %}
79
+ {{- content['text'] }}
80
+ {%- endif %}
81
+ {%- endfor %}
82
+ {{- '<|eot_id|>' }}
83
+ {%- endif %}
84
+ {%- endfor %}
85
+
86
+ {%- if add_generation_prompt %}
87
+ {{- '<|start_header_id|>' + 'assistant' + '<|end_header_id|>\n\n' }}
88
+ {%- endif %}
89
+ """
90
+
91
+ class EvaByteProcessor(ProcessorMixin):
92
+ r"""
93
+ Constructs a EvaByte processor which wraps a EvaByte image processor and a EvaByte tokenizer into a single processor.
94
+
95
+ [`EvaByteProcessor`] offers all the functionalities of [`EvaByteImageProcessor`] and [`EvaByteTokenizer`]. See the
96
+ [`~EvaByteProcessor.__call__`] and [`~EvaByteProcessor.decode`] for more information.
97
+
98
+ Args:
99
+ image_processor ([`EvaByteImageProcessor`], *optional*):
100
+ The image processor is a required input.
101
+ tokenizer ([`EvaByteTokenizer`], *optional*):
102
+ The tokenizer is a required input.
103
+ """
104
+
105
+ attributes = ["image_processor", "tokenizer"]
106
+ image_processor_class = "AutoImageProcessor"
107
+ tokenizer_class = "AutoTokenizer"
108
+
109
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
110
+ if image_processor is None:
111
+ raise ValueError("You need to specify an `image_processor`.")
112
+ if tokenizer is None:
113
+ raise ValueError("You need to specify a `tokenizer`.")
114
+
115
+ super().__init__(image_processor, tokenizer)
116
+ self.t2v_token_id = self.tokenizer.convert_tokens_to_ids("<t2v_token>")
117
+ self.v2t_token_id = self.tokenizer.convert_tokens_to_ids("<v2t_token>")
118
+ self.image_placeholder = "<image_placeholder>"
119
+ self.vl_chat_template = vl_chat_template
120
+
121
+ def __call__(
122
+ self,
123
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
124
+ images: ImageInput = None,
125
+ return_tensors: Optional[Union[str, TensorType]] = None,
126
+ strip_ending_sentinel: bool = False,
127
+ encode_only: bool = False,
128
+ **kwargs
129
+ ) -> Union[BatchFeature, List[List[int]]]:
130
+ # processing pipeline:
131
+ # 1. read images or videos from paths
132
+ # 2. use image_processor to convert images / videos to byte streams
133
+ if images is not None:
134
+ if isinstance(images, bytes):
135
+ image_bytes_list = [[images]]
136
+ elif isinstance(images, list) and isinstance(images[0], bytes):
137
+ image_bytes_list = [images]
138
+ elif isinstance(images, list) and isinstance(images[0], list) and isinstance(images[0][0], bytes):
139
+ image_bytes_list = images
140
+ else:
141
+ if is_image_or_image_url(images):
142
+ images = [[images]]
143
+ elif isinstance(images, list) and is_image_or_image_url(images[0]):
144
+ images = [images]
145
+ elif (
146
+ not isinstance(images, list)
147
+ and not isinstance(images[0], list)
148
+ and not is_image_or_image_url(images[0][0])
149
+ ):
150
+ raise ValueError(
151
+ "Invalid input images. Please provide a single image or a list of images or a list of list of images."
152
+ )
153
+ # Load images if they are URLs
154
+ images = [[fetch_image(im) if is_url(im) or is_file(im) else im for im in sample] for sample in images]
155
+ image_bytes_list = self.image_processor(images=images, **kwargs)
156
+
157
+ if not isinstance(text, list):
158
+ text = [text]
159
+ assert len(text) == 1, "Only support batch size 1 for now"
160
+ assert len(text) == len(image_bytes_list), "text and image_bytes_list must have the same length"
161
+ # TODO: invoke SequenceFeatureExtractor to get batched inputs
162
+
163
+ # 3. tokenize the text and put images / videos byte streams into the placeholders
164
+ # surrounded by special tokens like ""
165
+ batch_input_ids = []
166
+ if not encode_only:
167
+ batch_attention_mask = []
168
+ else:
169
+ batch_attention_mask = None
170
+
171
+ for t, image_bytes in zip(text, image_bytes_list):
172
+ text_splits = t.split(self.image_placeholder)
173
+ if len(text_splits) != len(image_bytes) + 1:
174
+ raise ValueError(
175
+ f"The number of image tokens should be equal to the number of images, "
176
+ f"but got {len(text_splits)} and {len(image_bytes) + 1}"
177
+ )
178
+
179
+ input_ids = [self.tokenizer.bos_token_id]
180
+ for i, text_part in enumerate(text_splits):
181
+ # each text part must be non-empty because we added markers around placeholders
182
+ split_tokens = self.tokenizer.encode(text_part, add_special_tokens=False)
183
+ input_ids.extend(split_tokens)
184
+ # Add image bytes after each text part except the last one
185
+ if i < len(image_bytes):
186
+ input_ids.append(self.t2v_token_id)
187
+ input_ids.extend([b + self.tokenizer.offset for b in image_bytes[i]])
188
+ input_ids.append(self.v2t_token_id)
189
+
190
+ if strip_ending_sentinel and (input_ids[-1] in [self.t2v_token_id, self.v2t_token_id]):
191
+ input_ids = input_ids[:-1]
192
+
193
+ batch_input_ids.append(input_ids)
194
+ if not encode_only:
195
+ batch_attention_mask.append([1] * len(input_ids))
196
+
197
+ if not encode_only:
198
+ # 4. return batch of features
199
+ inputs = BatchFeature({
200
+ "input_ids": batch_input_ids,
201
+ "attention_mask": batch_attention_mask
202
+ }, tensor_type=return_tensors)
203
+ return inputs
204
+ # # Pad sequences
205
+ # padded_inputs = self.tokenizer.pad(
206
+ # {"input_ids": batch_input_ids},
207
+ # padding=True,
208
+ # return_attention_mask=True,
209
+ # return_tensors=return_tensors,
210
+ # )
211
+ # return BatchFeature(data=padded_inputs)
212
+ else:
213
+ return batch_input_ids
214
+
215
+ def image_tokens_to_bytes(self, image_token_ids, jpeg_quality=None):
216
+ image_bytes = bytes([token_id - self.tokenizer.offset for token_id in image_token_ids])
217
+ image_bytes = self.image_processor.jpeg_merge_qtables(image_bytes, jpeg_quality)
218
+ return image_bytes
219
+
220
+ def batch_decode(self, sequences, **kwargs):
221
+ """
222
+ This method forwards all its arguments to EvaByteTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
223
+ refer to the docstring of this method for more information.
224
+ """
225
+ rets = [self.decode(seq, **kwargs) for seq in sequences]
226
+ return tuple(map(list, zip(*rets)))
227
+
228
+ def decode(self, token_ids, **kwargs):
229
+ """
230
+ Decodes a sequence of input_ids, handling image tokens separately.
231
+ Returns a tuple of (decoded_text, images), where images is a list of bytes.
232
+ """
233
+ if kwargs and "jpeg_quality" in kwargs:
234
+ kwargs = kwargs.copy()
235
+ jpeg_quality = kwargs.pop("jpeg_quality")
236
+ else:
237
+ jpeg_quality = None
238
+
239
+ token_ids = to_py_obj(token_ids)
240
+ # Find indices of t2v_token_id and v2t_token_id
241
+ t2v_indices = [i for i, token_id in enumerate(token_ids) if token_id == self.t2v_token_id]
242
+ v2t_indices = [i for i, token_id in enumerate(token_ids) if token_id == self.v2t_token_id]
243
+
244
+ # Check for correct pairing of t2v and v2t tokens
245
+ if len(t2v_indices) != len(v2t_indices):
246
+ raise ValueError("Mismatched number of t2v and v2t tokens in token_ids: {} and {}".format(t2v_indices, v2t_indices))
247
+
248
+ # Ensure t2v and v2t tokens are in the correct order
249
+ for t2v_idx, v2t_idx in zip(t2v_indices, v2t_indices):
250
+ if t2v_idx >= v2t_idx:
251
+ raise ValueError("Found t2v_token_id after v2t_token_id in token_ids")
252
+
253
+ # Initialize the start index
254
+ images = []
255
+ decoded_text = ""
256
+
257
+ start = 0
258
+ # Iterate over pairs of t2v and v2t indices
259
+ for t2v_idx, v2t_idx in zip(t2v_indices, v2t_indices):
260
+ # Decode text tokens before the image
261
+ text_token_ids = token_ids[start:t2v_idx]
262
+ if len(text_token_ids) > 0:
263
+ decoded_text += self.tokenizer.decode(text_token_ids, **kwargs)
264
+
265
+ # Insert image placeholder
266
+ decoded_text += self.image_placeholder
267
+
268
+ # Extract image tokens and convert them to bytes
269
+ image_token_ids = token_ids[t2v_idx + 1 : v2t_idx]
270
+ image_bytes = self.image_tokens_to_bytes(image_token_ids, jpeg_quality)
271
+ images.append(image_bytes)
272
+
273
+ # Update the start index to the token after v2t_token_id
274
+ start = v2t_idx + 1
275
+
276
+ # Decode any remaining text tokens after the last image
277
+ if start < len(token_ids):
278
+ text_token_ids = token_ids[start:]
279
+ decoded_text += self.tokenizer.decode(text_token_ids, **kwargs)
280
+
281
+ return decoded_text, images
282
+
283
+ @property
284
+ def model_input_names(self):
285
+ tokenizer_input_names = self.tokenizer.model_input_names
286
+ image_processor_input_names = self.image_processor.model_input_names
287
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
processor_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "processing_evabyte.EvaByteProcessor"
4
+ },
5
+ "processor_class": "EvaByteProcessor"
6
+ }
tokenizer_config.json CHANGED
@@ -575,6 +575,7 @@
575
  "<extra_id_63>"
576
  ],
577
  "auto_map": {
 
578
  "AutoTokenizer": [
579
  "tokenization_evabyte.EvaByteTokenizer",
580
  null
@@ -588,6 +589,7 @@
588
  "extra_special_tokens": {},
589
  "model_max_length": 1000000000000000019884624838656,
590
  "pad_token": "<pad>",
 
591
  "sep_token": "<eos>",
592
  "tokenizer_class": "EvaByteTokenizer",
593
  "unk_token": "<unk>"
 
575
  "<extra_id_63>"
576
  ],
577
  "auto_map": {
578
+ "AutoProcessor": "processing_evabyte.EvaByteProcessor",
579
  "AutoTokenizer": [
580
  "tokenization_evabyte.EvaByteTokenizer",
581
  null
 
589
  "extra_special_tokens": {},
590
  "model_max_length": 1000000000000000019884624838656,
591
  "pad_token": "<pad>",
592
+ "processor_class": "EvaByteProcessor",
593
  "sep_token": "<eos>",
594
  "tokenizer_class": "EvaByteTokenizer",
595
  "unk_token": "<unk>"