TCMVince commited on
Commit
9e06b74
1 Parent(s): 903d9fd

Upload tokenizer

Browse files
bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27573c473b962c1d7d7ef15dd6b5e0dcba5a4201a709ad0798bfb918b68e5bfc
3
+ size 771488
flaubert2_tokenizer.py ADDED
@@ -0,0 +1,457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Largely inspired from https://github.com/king-menin/yttm_transformers_tokenizer/blob/master/tokenization_yttm.py
2
+
3
+ from collections import OrderedDict
4
+ from fairseq.data import Dictionary
5
+
6
+ from transformers.tokenization_utils import PreTrainedTokenizer
7
+ from transformers.dynamic_module_utils import custom_object_save
8
+ from transformers.utils import (
9
+ is_tokenizers_available,
10
+ logging,
11
+ )
12
+
13
+ from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union
14
+
15
+ import copy
16
+ import os
17
+ import stanza
18
+ import youtokentome as yttm
19
+ import json
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+ # Slow tokenizers used to be saved in three separated files
25
+ SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json"
26
+ ADDED_TOKENS_FILE = "added_tokens.json"
27
+ TOKENIZER_CONFIG_FILE = "tokenizer_config.json"
28
+
29
+ if is_tokenizers_available():
30
+ from tokenizers import AddedToken
31
+ from tokenizers import Encoding as EncodingFast
32
+ else:
33
+
34
+ @dataclass(frozen=True, eq=True)
35
+ class AddedToken:
36
+ """
37
+ AddedToken represents a token to be added to a Tokenizer An AddedToken can have special options defining the
38
+ way it should behave.
39
+ """
40
+
41
+ content: str = field(default_factory=str)
42
+ single_word: bool = False
43
+ lstrip: bool = False
44
+ rstrip: bool = False
45
+ normalized: bool = True
46
+
47
+ def __getstate__(self):
48
+ return self.__dict__
49
+
50
+ @dataclass
51
+ class EncodingFast:
52
+ """This is dummy class because without the `tokenizers` library we don't have these objects anyway"""
53
+
54
+ pass
55
+
56
+
57
+ class BertDictionary(Dictionary):
58
+ """Dictionary for BERT tasks
59
+ extended from Dictionary by adding support for cls as well as mask symbols"""
60
+ def __init__(
61
+ self,
62
+ pad='[PAD]',
63
+ unk='[UNK]',
64
+ cls='[CLS]',
65
+ mask='[MASK]',
66
+ sep='[SEP]'
67
+ ):
68
+ super().__init__(pad=pad, unk=unk)
69
+ (
70
+ self.cls_word,
71
+ self.mask_word,
72
+ self.sep_word,
73
+ ) = cls, mask, sep
74
+
75
+ self.is_end = None
76
+ self.nspecial = len(self.symbols)
77
+
78
+ def mask(self):
79
+ """Helper to get index of mask symbol"""
80
+ idx = self.index(self.mask_word)
81
+ return idx
82
+
83
+ def is_end_word(self, idx):
84
+ if self.is_end is None:
85
+ self.is_end = [self.symbols[i].endswith("</w>") for i in range(len(self))]
86
+ return self.is_end[idx]
87
+
88
+
89
+ class FB2Tokenizer(PreTrainedTokenizer):
90
+ """
91
+ YTTMTransformersTokenizer BPE tokenizer. Peculiarities:
92
+
93
+ - Byte-level Byte-Pair-Encoding
94
+ - Requires a space to start the input string => the encoding methods should be called with the
95
+ ``add_prefix_space`` flag set to ``True``.
96
+ Otherwise, this tokenizer ``encode`` and ``decode`` method will not conserve
97
+ the absence of a space at the beginning of a string:
98
+
99
+ ::
100
+
101
+ tokenizer.decode(tokenizer.encode("Hello", add_special_tokens=False))
102
+
103
+ This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the methods. Users
104
+ should refer to the superclass for more information regarding methods.
105
+
106
+ Args:
107
+ vocab_file (:obj:`str`):
108
+ Path to the vocabulary file.
109
+ unk_token (:obj:`string`, `optional`, defaults to <UNK>`):
110
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
111
+ token instead.
112
+ bos_token (:obj:`string`, `optional`, defaults to `<BOS>`):
113
+ The beginning of sequence token.
114
+ eos_token (:obj:`string`, `optional`, defaults to `<EOS>`):
115
+ The end of sequence token.
116
+ pad_token (:obj:`string`, `optional`, defaults to `<PAD>`):
117
+ The padding of sequence token.
118
+ model_max_length: (`Optional`) int: the maximum length in number of tokens for the inputs to the transformer
119
+ model. When the tokenizer is loaded with `from_pretrained`,
120
+ this will be set to the value stored for the associated.
121
+ """
122
+ vocab_files_names = {"vocab_file": "vocab.txt", "bpe_model": "bpe.model"}
123
+
124
+ def __init__(
125
+ self,
126
+ vocab_file,
127
+ bpe_model,
128
+ unk_token="[UNK]",
129
+ bos_token="<s>",
130
+ cls_token="<s>",
131
+ eos_token="</s>",
132
+ pad_token="[PAD]",
133
+ mask_token="[MASK]",
134
+ sep_token="</s>",
135
+ model_max_length=512,
136
+ **kwargs
137
+ ):
138
+ super().__init__(
139
+ bos_token=bos_token,
140
+ eos_token=eos_token,
141
+ unk_token=unk_token,
142
+ pad_token=pad_token,
143
+ cls_token=cls_token,
144
+ sep_token=sep_token,
145
+ mask_token=mask_token,
146
+ model_max_length=model_max_length,
147
+ **kwargs
148
+ )
149
+ # no default special tokens - you can update this value if you add special tokens
150
+ #self.max_len_single_sentence = model_max_length - 2
151
+ # no default special tokens - you can update this value if you add special tokens
152
+ #self.max_len_sentences_pair = model_max_length - 2
153
+ vocab_file = str(vocab_file)
154
+ self.vocab_file = str(vocab_file)
155
+ self.bpe_model_path = str(bpe_model)
156
+
157
+ self.vocab_files_names = {'vocab_file': 'vocab.txt', 'bpe_model': 'bpe.model'}
158
+
159
+ try:
160
+ import stanza
161
+ import youtokentome as yttm
162
+ import fairseq
163
+ except ImportError:
164
+ raise ImportError("You need to install stanza, youtokentome and fairseq to use this tokenizer")
165
+
166
+ if os.path.isfile(bpe_model):
167
+ self.bpe = yttm.BPE(bpe_model, n_threads=-1)
168
+ else:
169
+ raise OSError("bpe_model should be a path to model file")
170
+
171
+ self.nlp = stanza.Pipeline(lang='fr',
172
+ processors='tokenize',
173
+ tokenize_no_ssplit=True,
174
+ use_gpu=True, tokenize_batch_size=128, verbose=False)
175
+
176
+ self.vocab_file = vocab_file
177
+ self.cache = {}
178
+ self.dictionary = BertDictionary.load(vocab_file)
179
+ self.dictionary.add_symbol(mask_token)
180
+
181
+ self.vocab = OrderedDict([(key, val) for val, key in enumerate(self.dictionary.symbols)])
182
+
183
+ self.encoder = self.vocab
184
+ self.decoder = {k: v for k, v in enumerate(self.dictionary.symbols)}
185
+
186
+ @property
187
+ def vocab_size(self) -> int:
188
+ return len(self.vocab)
189
+
190
+ def get_vocab(self):
191
+ return dict(self.vocab)
192
+
193
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
194
+ """
195
+ Save only the vocabulary of the tokenizer (vocabulary + added tokens).
196
+
197
+ This method won't save the configuration and special token mappings of the tokenizer. Use
198
+ [`~PreTrainedTokenizerFast._save_pretrained`] to save the whole state of the tokenizer.
199
+
200
+ Args:
201
+ save_directory (`str`):
202
+ The directory in which to save the vocabulary.
203
+ filename_prefix (`str`, *optional*):
204
+ An optional prefix to add to the named of the saved files.
205
+
206
+ Returns:
207
+ `Tuple(str)`: Paths to the files saved.
208
+ """
209
+ if not os.path.isdir(save_directory):
210
+ exit(f"Provided path ({save_directory}) should be a directory")
211
+
212
+ bpe_save_file = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "bpe.model")
213
+ os.system(f"cp {self.bpe_model_path} {bpe_save_file}")
214
+ self.bpe_model_path = bpe_save_file
215
+
216
+ vocab_save_file = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "vocab.txt")
217
+ os.system(f"cp {self.vocab_file} {vocab_save_file}")
218
+ self.vocab_file = vocab_save_file
219
+
220
+ return bpe_save_file, vocab_save_file
221
+
222
+ def replace_brackets(self, sentence):
223
+
224
+ sent = [None] * 10000
225
+ for j, tok in enumerate(sentence.tokens):
226
+ if j > len(sent) - 1:
227
+ break
228
+ tok = tok.text
229
+ if tok == "(":
230
+ tok = "-LRB-"
231
+ elif tok == ")":
232
+ tok = "-RRB-"
233
+
234
+ sent[j] = tok
235
+
236
+ return sent[:len(sentence.tokens)]
237
+
238
+ def _tokenize(self, text: str, **kwargs):
239
+ """Converts a string in a sequence of tokens (string), using the tokenizer.
240
+ Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies (BPE).
241
+ """
242
+ sent = self.nlp([stanza.Document([], text=text)])[0].sentences[0]
243
+ sent = ' '.join(self.replace_brackets(sent))
244
+
245
+ bpe = self.bpe.encode([sent], output_type=yttm.OutputType.SUBWORD)[0]
246
+ return bpe
247
+
248
+
249
+ def tokenize(self, text: Union[List[str], str], add_special_tokens=True, **kwargs):
250
+
251
+ if isinstance(text, list):
252
+ return list(map(
253
+ lambda x: self.tokenize(x, add_special_tokens=add_special_tokens, **kwargs),
254
+ text
255
+ ))
256
+ res = self._tokenize(text)
257
+ if add_special_tokens:
258
+ res = [self.bos_token] + res + [self.eos_token]
259
+ return res
260
+
261
+ def _convert_token_to_id(self, token):
262
+ """ Converts a token (str) in an id using the vocab. """
263
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
264
+
265
+ def _convert_id_to_token(self, index):
266
+ """Converts an index (integer) in a token (str) using the vocab."""
267
+ return self.decoder.get(index)
268
+
269
+ def convert_tokens_to_string(self, tokens: List[str]):
270
+ """Converts a sequence of tokens (string) in a single string. """
271
+ if tokens[0] == self.bos_token:
272
+ tokens = tokens[1:]
273
+ if tokens[-1] == self.eos_token:
274
+ tokens = tokens[:-1]
275
+ return self.bpe.decode(list(map(self.bpe.subword_to_id, tokens)))[0]
276
+
277
+ #@classmethod
278
+ #def from_pretrained(self, cls, **kwargs):
279
+ # """Load from file. Actually only call __init__"""
280
+ # return cls(**kwargs)
281
+
282
+ def save_pretrained(
283
+ self,
284
+ save_directory: Union[str, os.PathLike],
285
+ legacy_format: Optional[bool] = None,
286
+ filename_prefix: Optional[str] = None,
287
+ push_to_hub: bool = False,
288
+ **kwargs,
289
+ ) -> Tuple[str]:
290
+
291
+ """
292
+ Save the full tokenizer state.
293
+
294
+
295
+ This method make sure the full tokenizer can then be re-loaded using the
296
+ [`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`] class method..
297
+
298
+ Warning,None This won't save modifications you may have applied to the tokenizer after the instantiation (for
299
+ instance, modifying `tokenizer.do_lower_case` after creation).
300
+
301
+ Args:
302
+ save_directory (`str` or `os.PathLike`): The path to a directory where the tokenizer will be saved.
303
+ legacy_format (`bool`, *optional*):
304
+ Only applicable for a fast tokenizer. If unset (default), will save the tokenizer in the unified JSON
305
+ format as well as in legacy format if it exists, i.e. with tokenizer specific vocabulary and a separate
306
+ added_tokens files.
307
+
308
+ If `False`, will only save the tokenizer in the unified JSON format. This format is incompatible with
309
+ "slow" tokenizers (not powered by the *tokenizers* library), so the tokenizer will not be able to be
310
+ loaded in the corresponding "slow" tokenizer.
311
+
312
+ If `True`, will save the tokenizer in legacy format. If the "slow" tokenizer doesn't exits, a value
313
+ error is raised.
314
+ filename_prefix: (`str`, *optional*):
315
+ A prefix to add to the names of the files saved by the tokenizer.
316
+ push_to_hub (`bool`, *optional*, defaults to `False`):
317
+ Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
318
+ repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
319
+ namespace).
320
+ kwargs:
321
+ Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
322
+
323
+ Returns:
324
+ A tuple of `str`: The files saved.
325
+ """
326
+ if os.path.isfile(save_directory):
327
+ logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
328
+ return
329
+
330
+ os.makedirs(save_directory, exist_ok=True)
331
+
332
+ if push_to_hub:
333
+ commit_message = kwargs.pop("commit_message", None)
334
+ repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
335
+ repo_id, token = self._create_repo(repo_id, **kwargs)
336
+ files_timestamps = self._get_files_timestamps(save_directory)
337
+
338
+ special_tokens_map_file = os.path.join(
339
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + SPECIAL_TOKENS_MAP_FILE
340
+ )
341
+ tokenizer_config_file = os.path.join(
342
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + TOKENIZER_CONFIG_FILE
343
+ )
344
+
345
+ tokenizer_config = copy.deepcopy(self.init_kwargs)
346
+
347
+ # TODO: Ensure the modified attributes (those are also in the __init__ kwargs) will give identical tokenizers
348
+ # target_keys = self.init_kwargs.keys()
349
+ target_keys = ["model_max_length"]
350
+ for k in target_keys:
351
+ if hasattr(self, k):
352
+ tokenizer_config[k] = getattr(self, k)
353
+
354
+ if len(self.init_inputs) > 0:
355
+ tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs)
356
+ for file_id in self.vocab_files_names.keys():
357
+ tokenizer_config.pop(file_id, None)
358
+
359
+ # Sanitize AddedTokens
360
+ def convert_added_tokens(obj: Union[AddedToken, Any], add_type_field=True):
361
+ if isinstance(obj, AddedToken):
362
+ out = obj.__getstate__()
363
+ if add_type_field:
364
+ out["__type"] = "AddedToken"
365
+ return out
366
+ elif isinstance(obj, (list, tuple)):
367
+ return list(convert_added_tokens(o, add_type_field=add_type_field) for o in obj)
368
+ elif isinstance(obj, dict):
369
+ return {k: convert_added_tokens(v, add_type_field=add_type_field) for k, v in obj.items()}
370
+ return obj
371
+
372
+ # add_type_field=True to allow dicts in the kwargs / differentiate from AddedToken serialization
373
+ tokenizer_config = convert_added_tokens(tokenizer_config, add_type_field=True)
374
+
375
+ # Add tokenizer class to the tokenizer config to be able to reload it with from_pretrained
376
+ tokenizer_class = self.__class__.__name__
377
+ # Remove the Fast at the end unless we have a special `PreTrainedTokenizerFast`
378
+ if tokenizer_class.endswith("Fast") and tokenizer_class != "PreTrainedTokenizerFast":
379
+ tokenizer_class = tokenizer_class[:-4]
380
+ tokenizer_config["tokenizer_class"] = tokenizer_class
381
+
382
+
383
+ if getattr(self, "_auto_map", None) is not None:
384
+ tokenizer_config["auto_map"] = self._auto_map
385
+ if getattr(self, "_processor_class", None) is not None:
386
+ tokenizer_config["processor_class"] = self._processor_class
387
+
388
+ # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be
389
+ # loaded from the Hub.
390
+ if self._auto_class is not None:
391
+ custom_object_save(self, save_directory, config=tokenizer_config)
392
+
393
+ #tokenizer_config["vocab_file"] = "vocab.txt"
394
+ #tokenizer_config["bpe_model"] = "bpe.model"
395
+ with open(tokenizer_config_file, "w", encoding="utf-8") as f:
396
+ out_str = json.dumps(tokenizer_config, indent=2, sort_keys=True, ensure_ascii=False) + "\n"
397
+ f.write(out_str)
398
+ logger.info(f"tokenizer config file saved in {tokenizer_config_file}")
399
+
400
+ # Sanitize AddedTokens in special_tokens_map
401
+ write_dict = convert_added_tokens(self.special_tokens_map_extended, add_type_field=False)
402
+ with open(special_tokens_map_file, "w", encoding="utf-8") as f:
403
+ out_str = json.dumps(write_dict, indent=2, sort_keys=True, ensure_ascii=False) + "\n"
404
+ f.write(out_str)
405
+ logger.info(f"Special tokens file saved in {special_tokens_map_file}")
406
+
407
+ file_names = (tokenizer_config_file, special_tokens_map_file)
408
+ save_files = self._save_pretrained(
409
+ save_directory=save_directory,
410
+ file_names=file_names,
411
+ legacy_format=legacy_format,
412
+ filename_prefix=filename_prefix,
413
+ )
414
+
415
+
416
+
417
+ if push_to_hub:
418
+ self._upload_modified_files(
419
+ save_directory, repo_id, files_timestamps, commit_message=commit_message, token=token
420
+ )
421
+
422
+ return save_files
423
+
424
+ def _save_pretrained(
425
+ self,
426
+ save_directory: Union[str, os.PathLike],
427
+ file_names: Tuple[str],
428
+ legacy_format: Optional[bool] = None,
429
+ filename_prefix: Optional[str] = None,
430
+ ) -> Tuple[str]:
431
+ """
432
+ Save a tokenizer using the slow-tokenizer/legacy format: vocabulary + added tokens.
433
+
434
+ Fast tokenizers can also be saved in a unique JSON file containing {config + vocab + added-tokens} using the
435
+ specific [`~tokenization_utils_fast.PreTrainedTokenizerFast._save_pretrained`]
436
+ """
437
+ if legacy_format is False:
438
+ raise ValueError(
439
+ "Only fast tokenizers (instances of PreTrainedTokenizerFast) can be saved in non legacy format."
440
+ )
441
+
442
+ save_directory = str(save_directory)
443
+
444
+ added_tokens_file = os.path.join(
445
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + ADDED_TOKENS_FILE
446
+ )
447
+ added_vocab = self.get_added_vocab()
448
+ if added_vocab:
449
+ with open(added_tokens_file, "w", encoding="utf-8") as f:
450
+ out_str = json.dumps(added_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n"
451
+ f.write(out_str)
452
+ logger.info(f"added tokens file saved in {added_tokens_file}")
453
+ vocab_files = self.save_vocabulary(save_directory, filename_prefix=filename_prefix)
454
+
455
+ return file_names + vocab_files + (added_tokens_file,)
456
+
457
+
special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "</s>",
8
+ "unk_token": "[UNK]"
9
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoTokenizer": [
4
+ "flaubert2_tokenizer.FB2Tokenizer",
5
+ null
6
+ ]
7
+ },
8
+ "bos_token": "<s>",
9
+ "cls_token": "<s>",
10
+ "eos_token": "</s>",
11
+ "mask_token": "[MASK]",
12
+ "model_max_length": 512,
13
+ "pad_token": "[PAD]",
14
+ "sep_token": "</s>",
15
+ "tokenizer_class": "FB2Tokenizer",
16
+ "unk_token": "[UNK]"
17
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff