5661 / tokenizer_config.json
vania2911's picture
Training in progress, step 500
5cd6cb4 verified
raw
history blame
1.62 kB
{
"added_tokens_decoder": {
"0": {
"content": "</s>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"1": {
"content": "<unk>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"33252": {
"content": "<pad>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"33253": {
"content": "<ASL>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"33254": {
"content": "<EL>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"33255": {
"content": "<SL>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
},
"33256": {
"content": "<MSL>",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
"special": true
}
},
"additional_special_tokens": [
"<ASL>",
"<EL>",
"<SL>",
"<MSL>"
],
"clean_up_tokenization_spaces": false,
"eos_token": "</s>",
"extra_special_tokens": {},
"model_max_length": 512,
"pad_token": "<pad>",
"separate_vocabs": false,
"source_lang": "es",
"sp_model_kwargs": {},
"target_lang": "es",
"tokenizer_class": "MarianTokenizer",
"unk_token": "<unk>"
}