gpt2-imdb-pos / tokenizer_config.json
onceiapp's picture
Upload tokenizer
127f9f5
raw
history blame
930 Bytes
{
"add_bos_token": false,
"add_prefix_space": false,
"bos_token": {
"__type": "AddedToken",
"content": "<|endoftext|>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"eos_token": {
"__type": "AddedToken",
"content": "<|endoftext|>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"errors": "replace",
"max_len": 1024,
"max_length": 1024,
"model_max_length": 1024,
"name_or_path": "lvwerra/gpt2-imdb",
"pad_token": null,
"special_tokens_map_file": "/root/.cache/huggingface/hub/models--lvwerra--gpt2-imdb/snapshots/f1bfd819c6bee6c18fa5f95bfe88d9198839a435/special_tokens_map.json",
"tokenizer_class": "GPT2Tokenizer",
"unk_token": {
"__type": "AddedToken",
"content": "<|endoftext|>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
}
}