Text Generation
Transformers
PyTorch
Safetensors
English
hf_olmo
conversational
custom_code
hamishivi commited on
Commit
b0d8450
1 Parent(s): a6b68ac

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/net/nfs.cirrascale/allennlp/hamishi/checkpoints/olmo_7b_finetune",
3
+ "activation_type": "swiglu",
4
+ "alibi": false,
5
+ "alibi_bias_max": 8.0,
6
+ "architectures": [
7
+ "OLMoForCausalLM"
8
+ ],
9
+ "attention_dropout": 0.0,
10
+ "attention_layer_norm": false,
11
+ "attention_layer_norm_with_affine": false,
12
+ "auto_map": {
13
+ "AutoConfig": "configuration_olmo.OLMoConfig",
14
+ "AutoModelForCausalLM": "modeling_olmo.OLMoForCausalLM",
15
+ "AutoTokenizer": [
16
+ "tokenization_olmo_fast.OLMoTokenizerFast",
17
+ "tokenization_olmo_fast.OLMoTokenizerFast"
18
+ ]
19
+ },
20
+ "bias_for_layer_norm": false,
21
+ "block_group_size": 1,
22
+ "block_type": "sequential",
23
+ "d_model": 4096,
24
+ "embedding_dropout": 0.0,
25
+ "embedding_size": 50304,
26
+ "eos_token_id": 50279,
27
+ "flash_attention": true,
28
+ "include_bias": false,
29
+ "init_cutoff_factor": null,
30
+ "init_device": "meta",
31
+ "init_fn": "mitchell",
32
+ "init_std": 0.02,
33
+ "layer_norm_type": "default",
34
+ "layer_norm_with_affine": false,
35
+ "max_sequence_length": 2048,
36
+ "mlp_hidden_size": 22016,
37
+ "mlp_ratio": 4,
38
+ "model_type": "olmo",
39
+ "multi_query_attention": false,
40
+ "n_heads": 32,
41
+ "n_layers": 32,
42
+ "pad_token_id": 1,
43
+ "precision": "amp_bf16",
44
+ "residual_dropout": 0.0,
45
+ "rope": true,
46
+ "rope_full_precision": true,
47
+ "scale_logits": false,
48
+ "torch_dtype": "bfloat16",
49
+ "transformers_version": "4.35.0.dev0",
50
+ "use_cache": true,
51
+ "vocab_size": 50280,
52
+ "weight_tying": false
53
+ }
configuration_olmo.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from hf_olmo import OLMoConfig
2
+
3
+
modeling_olmo.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from hf_olmo import OLMoForCausalLM
2
+
3
+
pytorch_model-00001-of-00002.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c01e8e5db3174103ebbf4435dfb9c746e6fb2eb890966429e4a9e182ee3c277
3
+ size 9945777322
pytorch_model-00002-of-00002.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3ae639bb44aca919f0c8f0966824d2f4dfe2eca5719eba0d5d70a02ed8cfbec
3
+ size 3830460110
pytorch_model.bin.index.json ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 13776191488
4
+ },
5
+ "weight_map": {
6
+ "model.transformer.blocks.0.att_proj.weight": "pytorch_model-00001-of-00002.bin",
7
+ "model.transformer.blocks.0.attn_out.weight": "pytorch_model-00001-of-00002.bin",
8
+ "model.transformer.blocks.0.ff_out.weight": "pytorch_model-00001-of-00002.bin",
9
+ "model.transformer.blocks.0.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
10
+ "model.transformer.blocks.1.att_proj.weight": "pytorch_model-00001-of-00002.bin",
11
+ "model.transformer.blocks.1.attn_out.weight": "pytorch_model-00001-of-00002.bin",
12
+ "model.transformer.blocks.1.ff_out.weight": "pytorch_model-00001-of-00002.bin",
13
+ "model.transformer.blocks.1.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
14
+ "model.transformer.blocks.10.att_proj.weight": "pytorch_model-00001-of-00002.bin",
15
+ "model.transformer.blocks.10.attn_out.weight": "pytorch_model-00001-of-00002.bin",
16
+ "model.transformer.blocks.10.ff_out.weight": "pytorch_model-00001-of-00002.bin",
17
+ "model.transformer.blocks.10.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
18
+ "model.transformer.blocks.11.att_proj.weight": "pytorch_model-00001-of-00002.bin",
19
+ "model.transformer.blocks.11.attn_out.weight": "pytorch_model-00001-of-00002.bin",
20
+ "model.transformer.blocks.11.ff_out.weight": "pytorch_model-00001-of-00002.bin",
21
+ "model.transformer.blocks.11.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
22
+ "model.transformer.blocks.12.att_proj.weight": "pytorch_model-00001-of-00002.bin",
23
+ "model.transformer.blocks.12.attn_out.weight": "pytorch_model-00001-of-00002.bin",
24
+ "model.transformer.blocks.12.ff_out.weight": "pytorch_model-00001-of-00002.bin",
25
+ "model.transformer.blocks.12.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
26
+ "model.transformer.blocks.13.att_proj.weight": "pytorch_model-00001-of-00002.bin",
27
+ "model.transformer.blocks.13.attn_out.weight": "pytorch_model-00001-of-00002.bin",
28
+ "model.transformer.blocks.13.ff_out.weight": "pytorch_model-00001-of-00002.bin",
29
+ "model.transformer.blocks.13.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
30
+ "model.transformer.blocks.14.att_proj.weight": "pytorch_model-00001-of-00002.bin",
31
+ "model.transformer.blocks.14.attn_out.weight": "pytorch_model-00001-of-00002.bin",
32
+ "model.transformer.blocks.14.ff_out.weight": "pytorch_model-00001-of-00002.bin",
33
+ "model.transformer.blocks.14.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
34
+ "model.transformer.blocks.15.att_proj.weight": "pytorch_model-00001-of-00002.bin",
35
+ "model.transformer.blocks.15.attn_out.weight": "pytorch_model-00001-of-00002.bin",
36
+ "model.transformer.blocks.15.ff_out.weight": "pytorch_model-00001-of-00002.bin",
37
+ "model.transformer.blocks.15.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
38
+ "model.transformer.blocks.16.att_proj.weight": "pytorch_model-00001-of-00002.bin",
39
+ "model.transformer.blocks.16.attn_out.weight": "pytorch_model-00001-of-00002.bin",
40
+ "model.transformer.blocks.16.ff_out.weight": "pytorch_model-00001-of-00002.bin",
41
+ "model.transformer.blocks.16.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
42
+ "model.transformer.blocks.17.att_proj.weight": "pytorch_model-00001-of-00002.bin",
43
+ "model.transformer.blocks.17.attn_out.weight": "pytorch_model-00001-of-00002.bin",
44
+ "model.transformer.blocks.17.ff_out.weight": "pytorch_model-00001-of-00002.bin",
45
+ "model.transformer.blocks.17.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
46
+ "model.transformer.blocks.18.att_proj.weight": "pytorch_model-00001-of-00002.bin",
47
+ "model.transformer.blocks.18.attn_out.weight": "pytorch_model-00001-of-00002.bin",
48
+ "model.transformer.blocks.18.ff_out.weight": "pytorch_model-00001-of-00002.bin",
49
+ "model.transformer.blocks.18.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
50
+ "model.transformer.blocks.19.att_proj.weight": "pytorch_model-00001-of-00002.bin",
51
+ "model.transformer.blocks.19.attn_out.weight": "pytorch_model-00001-of-00002.bin",
52
+ "model.transformer.blocks.19.ff_out.weight": "pytorch_model-00001-of-00002.bin",
53
+ "model.transformer.blocks.19.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
54
+ "model.transformer.blocks.2.att_proj.weight": "pytorch_model-00001-of-00002.bin",
55
+ "model.transformer.blocks.2.attn_out.weight": "pytorch_model-00001-of-00002.bin",
56
+ "model.transformer.blocks.2.ff_out.weight": "pytorch_model-00001-of-00002.bin",
57
+ "model.transformer.blocks.2.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
58
+ "model.transformer.blocks.20.att_proj.weight": "pytorch_model-00001-of-00002.bin",
59
+ "model.transformer.blocks.20.attn_out.weight": "pytorch_model-00001-of-00002.bin",
60
+ "model.transformer.blocks.20.ff_out.weight": "pytorch_model-00001-of-00002.bin",
61
+ "model.transformer.blocks.20.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
62
+ "model.transformer.blocks.21.att_proj.weight": "pytorch_model-00001-of-00002.bin",
63
+ "model.transformer.blocks.21.attn_out.weight": "pytorch_model-00001-of-00002.bin",
64
+ "model.transformer.blocks.21.ff_out.weight": "pytorch_model-00001-of-00002.bin",
65
+ "model.transformer.blocks.21.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
66
+ "model.transformer.blocks.22.att_proj.weight": "pytorch_model-00001-of-00002.bin",
67
+ "model.transformer.blocks.22.attn_out.weight": "pytorch_model-00001-of-00002.bin",
68
+ "model.transformer.blocks.22.ff_out.weight": "pytorch_model-00001-of-00002.bin",
69
+ "model.transformer.blocks.22.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
70
+ "model.transformer.blocks.23.att_proj.weight": "pytorch_model-00001-of-00002.bin",
71
+ "model.transformer.blocks.23.attn_out.weight": "pytorch_model-00001-of-00002.bin",
72
+ "model.transformer.blocks.23.ff_out.weight": "pytorch_model-00001-of-00002.bin",
73
+ "model.transformer.blocks.23.ff_proj.weight": "pytorch_model-00002-of-00002.bin",
74
+ "model.transformer.blocks.24.att_proj.weight": "pytorch_model-00002-of-00002.bin",
75
+ "model.transformer.blocks.24.attn_out.weight": "pytorch_model-00002-of-00002.bin",
76
+ "model.transformer.blocks.24.ff_out.weight": "pytorch_model-00002-of-00002.bin",
77
+ "model.transformer.blocks.24.ff_proj.weight": "pytorch_model-00002-of-00002.bin",
78
+ "model.transformer.blocks.25.att_proj.weight": "pytorch_model-00002-of-00002.bin",
79
+ "model.transformer.blocks.25.attn_out.weight": "pytorch_model-00002-of-00002.bin",
80
+ "model.transformer.blocks.25.ff_out.weight": "pytorch_model-00002-of-00002.bin",
81
+ "model.transformer.blocks.25.ff_proj.weight": "pytorch_model-00002-of-00002.bin",
82
+ "model.transformer.blocks.26.att_proj.weight": "pytorch_model-00002-of-00002.bin",
83
+ "model.transformer.blocks.26.attn_out.weight": "pytorch_model-00002-of-00002.bin",
84
+ "model.transformer.blocks.26.ff_out.weight": "pytorch_model-00002-of-00002.bin",
85
+ "model.transformer.blocks.26.ff_proj.weight": "pytorch_model-00002-of-00002.bin",
86
+ "model.transformer.blocks.27.att_proj.weight": "pytorch_model-00002-of-00002.bin",
87
+ "model.transformer.blocks.27.attn_out.weight": "pytorch_model-00002-of-00002.bin",
88
+ "model.transformer.blocks.27.ff_out.weight": "pytorch_model-00002-of-00002.bin",
89
+ "model.transformer.blocks.27.ff_proj.weight": "pytorch_model-00002-of-00002.bin",
90
+ "model.transformer.blocks.28.att_proj.weight": "pytorch_model-00002-of-00002.bin",
91
+ "model.transformer.blocks.28.attn_out.weight": "pytorch_model-00002-of-00002.bin",
92
+ "model.transformer.blocks.28.ff_out.weight": "pytorch_model-00002-of-00002.bin",
93
+ "model.transformer.blocks.28.ff_proj.weight": "pytorch_model-00002-of-00002.bin",
94
+ "model.transformer.blocks.29.att_proj.weight": "pytorch_model-00002-of-00002.bin",
95
+ "model.transformer.blocks.29.attn_out.weight": "pytorch_model-00002-of-00002.bin",
96
+ "model.transformer.blocks.29.ff_out.weight": "pytorch_model-00002-of-00002.bin",
97
+ "model.transformer.blocks.29.ff_proj.weight": "pytorch_model-00002-of-00002.bin",
98
+ "model.transformer.blocks.3.att_proj.weight": "pytorch_model-00001-of-00002.bin",
99
+ "model.transformer.blocks.3.attn_out.weight": "pytorch_model-00001-of-00002.bin",
100
+ "model.transformer.blocks.3.ff_out.weight": "pytorch_model-00001-of-00002.bin",
101
+ "model.transformer.blocks.3.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
102
+ "model.transformer.blocks.30.att_proj.weight": "pytorch_model-00002-of-00002.bin",
103
+ "model.transformer.blocks.30.attn_out.weight": "pytorch_model-00002-of-00002.bin",
104
+ "model.transformer.blocks.30.ff_out.weight": "pytorch_model-00002-of-00002.bin",
105
+ "model.transformer.blocks.30.ff_proj.weight": "pytorch_model-00002-of-00002.bin",
106
+ "model.transformer.blocks.31.att_proj.weight": "pytorch_model-00002-of-00002.bin",
107
+ "model.transformer.blocks.31.attn_out.weight": "pytorch_model-00002-of-00002.bin",
108
+ "model.transformer.blocks.31.ff_out.weight": "pytorch_model-00002-of-00002.bin",
109
+ "model.transformer.blocks.31.ff_proj.weight": "pytorch_model-00002-of-00002.bin",
110
+ "model.transformer.blocks.4.att_proj.weight": "pytorch_model-00001-of-00002.bin",
111
+ "model.transformer.blocks.4.attn_out.weight": "pytorch_model-00001-of-00002.bin",
112
+ "model.transformer.blocks.4.ff_out.weight": "pytorch_model-00001-of-00002.bin",
113
+ "model.transformer.blocks.4.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
114
+ "model.transformer.blocks.5.att_proj.weight": "pytorch_model-00001-of-00002.bin",
115
+ "model.transformer.blocks.5.attn_out.weight": "pytorch_model-00001-of-00002.bin",
116
+ "model.transformer.blocks.5.ff_out.weight": "pytorch_model-00001-of-00002.bin",
117
+ "model.transformer.blocks.5.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
118
+ "model.transformer.blocks.6.att_proj.weight": "pytorch_model-00001-of-00002.bin",
119
+ "model.transformer.blocks.6.attn_out.weight": "pytorch_model-00001-of-00002.bin",
120
+ "model.transformer.blocks.6.ff_out.weight": "pytorch_model-00001-of-00002.bin",
121
+ "model.transformer.blocks.6.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
122
+ "model.transformer.blocks.7.att_proj.weight": "pytorch_model-00001-of-00002.bin",
123
+ "model.transformer.blocks.7.attn_out.weight": "pytorch_model-00001-of-00002.bin",
124
+ "model.transformer.blocks.7.ff_out.weight": "pytorch_model-00001-of-00002.bin",
125
+ "model.transformer.blocks.7.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
126
+ "model.transformer.blocks.8.att_proj.weight": "pytorch_model-00001-of-00002.bin",
127
+ "model.transformer.blocks.8.attn_out.weight": "pytorch_model-00001-of-00002.bin",
128
+ "model.transformer.blocks.8.ff_out.weight": "pytorch_model-00001-of-00002.bin",
129
+ "model.transformer.blocks.8.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
130
+ "model.transformer.blocks.9.att_proj.weight": "pytorch_model-00001-of-00002.bin",
131
+ "model.transformer.blocks.9.attn_out.weight": "pytorch_model-00001-of-00002.bin",
132
+ "model.transformer.blocks.9.ff_out.weight": "pytorch_model-00001-of-00002.bin",
133
+ "model.transformer.blocks.9.ff_proj.weight": "pytorch_model-00001-of-00002.bin",
134
+ "model.transformer.ff_out.weight": "pytorch_model-00002-of-00002.bin",
135
+ "model.transformer.wte.weight": "pytorch_model-00001-of-00002.bin"
136
+ }
137
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "eos_token": "<|endoftext|>",
3
+ "pad_token": "<|padding|>"
4
+ }
5
+
tokenization_olmo_fast.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from hf_olmo.tokenization_olmo_fast import OLMoTokenizerFast
2
+
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "|||IP_ADDRESS|||",
5
+ "lstrip": false,
6
+ "normalized": true,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": false
10
+ },
11
+ "1": {
12
+ "content": "<|padding|>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "50254": {
20
+ "content": " ",
21
+ "lstrip": false,
22
+ "normalized": true,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": false
26
+ },
27
+ "50255": {
28
+ "content": " ",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": false
34
+ },
35
+ "50256": {
36
+ "content": " ",
37
+ "lstrip": false,
38
+ "normalized": true,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": false
42
+ },
43
+ "50257": {
44
+ "content": " ",
45
+ "lstrip": false,
46
+ "normalized": true,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": false
50
+ },
51
+ "50258": {
52
+ "content": " ",
53
+ "lstrip": false,
54
+ "normalized": true,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": false
58
+ },
59
+ "50259": {
60
+ "content": " ",
61
+ "lstrip": false,
62
+ "normalized": true,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": false
66
+ },
67
+ "50260": {
68
+ "content": " ",
69
+ "lstrip": false,
70
+ "normalized": true,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": false
74
+ },
75
+ "50261": {
76
+ "content": " ",
77
+ "lstrip": false,
78
+ "normalized": true,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": false
82
+ },
83
+ "50262": {
84
+ "content": " ",
85
+ "lstrip": false,
86
+ "normalized": true,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": false
90
+ },
91
+ "50263": {
92
+ "content": " ",
93
+ "lstrip": false,
94
+ "normalized": true,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": false
98
+ },
99
+ "50264": {
100
+ "content": " ",
101
+ "lstrip": false,
102
+ "normalized": true,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": false
106
+ },
107
+ "50265": {
108
+ "content": " ",
109
+ "lstrip": false,
110
+ "normalized": true,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": false
114
+ },
115
+ "50266": {
116
+ "content": " ",
117
+ "lstrip": false,
118
+ "normalized": true,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": false
122
+ },
123
+ "50267": {
124
+ "content": " ",
125
+ "lstrip": false,
126
+ "normalized": true,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": false
130
+ },
131
+ "50268": {
132
+ "content": " ",
133
+ "lstrip": false,
134
+ "normalized": true,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": false
138
+ },
139
+ "50269": {
140
+ "content": " ",
141
+ "lstrip": false,
142
+ "normalized": true,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": false
146
+ },
147
+ "50270": {
148
+ "content": " ",
149
+ "lstrip": false,
150
+ "normalized": true,
151
+ "rstrip": false,
152
+ "single_word": false,
153
+ "special": false
154
+ },
155
+ "50271": {
156
+ "content": " ",
157
+ "lstrip": false,
158
+ "normalized": true,
159
+ "rstrip": false,
160
+ "single_word": false,
161
+ "special": false
162
+ },
163
+ "50272": {
164
+ "content": " ",
165
+ "lstrip": false,
166
+ "normalized": true,
167
+ "rstrip": false,
168
+ "single_word": false,
169
+ "special": false
170
+ },
171
+ "50273": {
172
+ "content": " ",
173
+ "lstrip": false,
174
+ "normalized": true,
175
+ "rstrip": false,
176
+ "single_word": false,
177
+ "special": false
178
+ },
179
+ "50274": {
180
+ "content": " ",
181
+ "lstrip": false,
182
+ "normalized": true,
183
+ "rstrip": false,
184
+ "single_word": false,
185
+ "special": false
186
+ },
187
+ "50275": {
188
+ "content": " ",
189
+ "lstrip": false,
190
+ "normalized": true,
191
+ "rstrip": false,
192
+ "single_word": false,
193
+ "special": false
194
+ },
195
+ "50276": {
196
+ "content": " ",
197
+ "lstrip": false,
198
+ "normalized": true,
199
+ "rstrip": false,
200
+ "single_word": false,
201
+ "special": false
202
+ },
203
+ "50277": {
204
+ "content": "|||EMAIL_ADDRESS|||",
205
+ "lstrip": false,
206
+ "normalized": true,
207
+ "rstrip": false,
208
+ "single_word": false,
209
+ "special": false
210
+ },
211
+ "50278": {
212
+ "content": "|||PHONE_NUMBER|||",
213
+ "lstrip": false,
214
+ "normalized": true,
215
+ "rstrip": false,
216
+ "single_word": false,
217
+ "special": false
218
+ },
219
+ "50279": {
220
+ "content": "<|endoftext|>",
221
+ "lstrip": false,
222
+ "normalized": false,
223
+ "rstrip": false,
224
+ "single_word": false,
225
+ "special": true
226
+ }
227
+ },
228
+ "clean_up_tokenization_spaces": true,
229
+ "eos_token": "<|endoftext|>",
230
+ "max_length": null,
231
+ "model_max_length": 1000000000000000019884624838656,
232
+ "pad_token": "<|padding|>",
233
+ "tokenizer_class": "OLMoTokenizer",
234
+ "truncation": "right",
235
+ "auto_map": {
236
+ "AutoConfig": "configuration_olmo.OLMoConfig",
237
+ "AutoTokenizer": [
238
+ "tokenization_olmo_fast.OLMoTokenizerFast",
239
+ "tokenization_olmo_fast.OLMoTokenizerFast"
240
+ ]
241
+ }
242
+ }