Sindhu commited on
Commit
5fdfbea
1 Parent(s): 775995e

add tokenizer

Browse files
sentencepiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67ce12ea4550e57af39217a75686a61695e34edbb1c9892f82f0b861d73a4482
3
+ size 4697711
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": false, "remove_space": true, "keep_accents": true, "bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "model_max_length": 256, "special_tokens_map_file": "/root/.cache/huggingface/transformers/d0d5cf448e7367ce69a8cbb48980c788a66b736ec136a0d3061fd26b5c1b25f0.f886166424e457f0fc75f92e81205faabe843b2dbbbef6b25f9d8ec69f64bc7d", "name_or_path": "../input/rembert-finetuning-squad2-epoch3/rembert_squad2/checkpoint-6852", "tokenizer_class": "RemBertTokenizer"}