isenbek commited on
Commit
317e813
1 Parent(s): a89d5b2

Upload tokenizer

Browse files
special_tokens_map.json CHANGED
@@ -2,14 +2,14 @@
2
  "bos_token": {
3
  "content": "<s>",
4
  "lstrip": false,
5
- "normalized": true,
6
  "rstrip": false,
7
  "single_word": false
8
  },
9
  "eos_token": {
10
  "content": "</s>",
11
  "lstrip": false,
12
- "normalized": true,
13
  "rstrip": false,
14
  "single_word": false
15
  },
@@ -17,7 +17,7 @@
17
  "unk_token": {
18
  "content": "<unk>",
19
  "lstrip": false,
20
- "normalized": true,
21
  "rstrip": false,
22
  "single_word": false
23
  }
 
2
  "bos_token": {
3
  "content": "<s>",
4
  "lstrip": false,
5
+ "normalized": false,
6
  "rstrip": false,
7
  "single_word": false
8
  },
9
  "eos_token": {
10
  "content": "</s>",
11
  "lstrip": false,
12
+ "normalized": false,
13
  "rstrip": false,
14
  "single_word": false
15
  },
 
17
  "unk_token": {
18
  "content": "<unk>",
19
  "lstrip": false,
20
+ "normalized": false,
21
  "rstrip": false,
22
  "single_word": false
23
  }
tokenizer.json CHANGED
@@ -18,7 +18,7 @@
18
  "single_word": false,
19
  "lstrip": false,
20
  "rstrip": false,
21
- "normalized": true,
22
  "special": true
23
  },
24
  {
@@ -27,17 +27,8 @@
27
  "single_word": false,
28
  "lstrip": false,
29
  "rstrip": false,
30
- "normalized": true,
31
  "special": true
32
- },
33
- {
34
- "id": 32000,
35
- "content": "<pad>",
36
- "single_word": false,
37
- "lstrip": false,
38
- "rstrip": false,
39
- "normalized": true,
40
- "special": false
41
  }
42
  ],
43
  "normalizer": {
 
18
  "single_word": false,
19
  "lstrip": false,
20
  "rstrip": false,
21
+ "normalized": false,
22
  "special": true
23
  },
24
  {
 
27
  "single_word": false,
28
  "lstrip": false,
29
  "rstrip": false,
30
+ "normalized": false,
31
  "special": true
 
 
 
 
 
 
 
 
 
32
  }
33
  ],
34
  "normalizer": {
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json CHANGED
@@ -3,7 +3,7 @@
3
  "__type": "AddedToken",
4
  "content": "<s>",
5
  "lstrip": false,
6
- "normalized": true,
7
  "rstrip": false,
8
  "single_word": false
9
  },
@@ -12,21 +12,23 @@
12
  "__type": "AddedToken",
13
  "content": "</s>",
14
  "lstrip": false,
15
- "normalized": true,
16
  "rstrip": false,
17
  "single_word": false
18
  },
19
  "legacy": false,
20
  "model_max_length": 1000000000000000019884624838656,
21
  "pad_token": null,
 
22
  "sp_model_kwargs": {},
23
  "tokenizer_class": "LlamaTokenizer",
24
  "unk_token": {
25
  "__type": "AddedToken",
26
  "content": "<unk>",
27
  "lstrip": false,
28
- "normalized": true,
29
  "rstrip": false,
30
  "single_word": false
31
- }
 
32
  }
 
3
  "__type": "AddedToken",
4
  "content": "<s>",
5
  "lstrip": false,
6
+ "normalized": false,
7
  "rstrip": false,
8
  "single_word": false
9
  },
 
12
  "__type": "AddedToken",
13
  "content": "</s>",
14
  "lstrip": false,
15
+ "normalized": false,
16
  "rstrip": false,
17
  "single_word": false
18
  },
19
  "legacy": false,
20
  "model_max_length": 1000000000000000019884624838656,
21
  "pad_token": null,
22
+ "padding_side": "right",
23
  "sp_model_kwargs": {},
24
  "tokenizer_class": "LlamaTokenizer",
25
  "unk_token": {
26
  "__type": "AddedToken",
27
  "content": "<unk>",
28
  "lstrip": false,
29
+ "normalized": false,
30
  "rstrip": false,
31
  "single_word": false
32
+ },
33
+ "use_default_system_prompt": true
34
  }