liuzihan.111 commited on
Commit
5fddcb4
1 Parent(s): 6bdfb7c
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/tiger/xglm-7.5B",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "gelu",
5
+ "architectures": [
6
+ "XGLMForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "attention_heads": 32,
10
+ "bos_token_id": 0,
11
+ "d_model": 4096,
12
+ "decoder_start_token_id": 2,
13
+ "dropout": 0.1,
14
+ "eos_token_id": 2,
15
+ "ffn_dim": 16384,
16
+ "init_std": 0.02,
17
+ "layerdrop": 0.0,
18
+ "max_position_embeddings": 2048,
19
+ "model_type": "xglm",
20
+ "num_layers": 32,
21
+ "pad_token_id": 1,
22
+ "scale_embedding": true,
23
+ "torch_dtype": "float16",
24
+ "transformers_version": "4.26.0",
25
+ "use_cache": true,
26
+ "vocab_size": 256008
27
+ }
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "decoder_start_token_id": 2,
5
+ "eos_token_id": 2,
6
+ "pad_token_id": 1,
7
+ "transformers_version": "4.26.0"
8
+ }
latest ADDED
@@ -0,0 +1 @@
 
 
1
+ global_step1750
pytorch_model-00001-of-00002.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46ebe857e3def375d976e7bf7aa0184f0ffb48bb75d1379ae5bed2e722c904cf
3
+ size 14985585491
pytorch_model-00002-of-00002.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d34f9a7a45565eb9d2023ea4d8286a08e9bf0abeb7f1fbecf58bac732825583
3
+ size 14985570451
pytorch_model.bin.index.json ADDED
@@ -0,0 +1,523 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 17082761216
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "pytorch_model-00002-of-00002.bin",
7
+ "model.embed_tokens.weight": "pytorch_model-00001-of-00002.bin",
8
+ "model.layer_norm.bias": "pytorch_model-00002-of-00002.bin",
9
+ "model.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
10
+ "model.layers.0.fc1.bias": "pytorch_model-00001-of-00002.bin",
11
+ "model.layers.0.fc1.weight": "pytorch_model-00001-of-00002.bin",
12
+ "model.layers.0.fc2.bias": "pytorch_model-00001-of-00002.bin",
13
+ "model.layers.0.fc2.weight": "pytorch_model-00001-of-00002.bin",
14
+ "model.layers.0.final_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
15
+ "model.layers.0.final_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
16
+ "model.layers.0.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
17
+ "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
18
+ "model.layers.0.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
19
+ "model.layers.0.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
20
+ "model.layers.0.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
21
+ "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
22
+ "model.layers.0.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
23
+ "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
24
+ "model.layers.0.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
25
+ "model.layers.0.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
26
+ "model.layers.1.fc1.bias": "pytorch_model-00001-of-00002.bin",
27
+ "model.layers.1.fc1.weight": "pytorch_model-00001-of-00002.bin",
28
+ "model.layers.1.fc2.bias": "pytorch_model-00001-of-00002.bin",
29
+ "model.layers.1.fc2.weight": "pytorch_model-00001-of-00002.bin",
30
+ "model.layers.1.final_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
31
+ "model.layers.1.final_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
32
+ "model.layers.1.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
33
+ "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
34
+ "model.layers.1.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
35
+ "model.layers.1.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
36
+ "model.layers.1.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
37
+ "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
38
+ "model.layers.1.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
39
+ "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
40
+ "model.layers.1.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
41
+ "model.layers.1.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
42
+ "model.layers.10.fc1.bias": "pytorch_model-00001-of-00002.bin",
43
+ "model.layers.10.fc1.weight": "pytorch_model-00001-of-00002.bin",
44
+ "model.layers.10.fc2.bias": "pytorch_model-00001-of-00002.bin",
45
+ "model.layers.10.fc2.weight": "pytorch_model-00001-of-00002.bin",
46
+ "model.layers.10.final_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
47
+ "model.layers.10.final_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
48
+ "model.layers.10.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
49
+ "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
50
+ "model.layers.10.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
51
+ "model.layers.10.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
52
+ "model.layers.10.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
53
+ "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
54
+ "model.layers.10.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
55
+ "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
56
+ "model.layers.10.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
57
+ "model.layers.10.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
58
+ "model.layers.11.fc1.bias": "pytorch_model-00001-of-00002.bin",
59
+ "model.layers.11.fc1.weight": "pytorch_model-00001-of-00002.bin",
60
+ "model.layers.11.fc2.bias": "pytorch_model-00001-of-00002.bin",
61
+ "model.layers.11.fc2.weight": "pytorch_model-00001-of-00002.bin",
62
+ "model.layers.11.final_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
63
+ "model.layers.11.final_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
64
+ "model.layers.11.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
65
+ "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
66
+ "model.layers.11.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
67
+ "model.layers.11.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
68
+ "model.layers.11.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
69
+ "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
70
+ "model.layers.11.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
71
+ "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
72
+ "model.layers.11.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
73
+ "model.layers.11.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
74
+ "model.layers.12.fc1.bias": "pytorch_model-00001-of-00002.bin",
75
+ "model.layers.12.fc1.weight": "pytorch_model-00001-of-00002.bin",
76
+ "model.layers.12.fc2.bias": "pytorch_model-00001-of-00002.bin",
77
+ "model.layers.12.fc2.weight": "pytorch_model-00001-of-00002.bin",
78
+ "model.layers.12.final_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
79
+ "model.layers.12.final_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
80
+ "model.layers.12.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
81
+ "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
82
+ "model.layers.12.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
83
+ "model.layers.12.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
84
+ "model.layers.12.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
85
+ "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
86
+ "model.layers.12.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
87
+ "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
88
+ "model.layers.12.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
89
+ "model.layers.12.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
90
+ "model.layers.13.fc1.bias": "pytorch_model-00001-of-00002.bin",
91
+ "model.layers.13.fc1.weight": "pytorch_model-00001-of-00002.bin",
92
+ "model.layers.13.fc2.bias": "pytorch_model-00001-of-00002.bin",
93
+ "model.layers.13.fc2.weight": "pytorch_model-00001-of-00002.bin",
94
+ "model.layers.13.final_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
95
+ "model.layers.13.final_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
96
+ "model.layers.13.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
97
+ "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
98
+ "model.layers.13.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
99
+ "model.layers.13.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
100
+ "model.layers.13.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
101
+ "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
102
+ "model.layers.13.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
103
+ "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
104
+ "model.layers.13.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
105
+ "model.layers.13.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
106
+ "model.layers.14.fc1.bias": "pytorch_model-00001-of-00002.bin",
107
+ "model.layers.14.fc1.weight": "pytorch_model-00001-of-00002.bin",
108
+ "model.layers.14.fc2.bias": "pytorch_model-00001-of-00002.bin",
109
+ "model.layers.14.fc2.weight": "pytorch_model-00001-of-00002.bin",
110
+ "model.layers.14.final_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
111
+ "model.layers.14.final_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
112
+ "model.layers.14.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
113
+ "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
114
+ "model.layers.14.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
115
+ "model.layers.14.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
116
+ "model.layers.14.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
117
+ "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
118
+ "model.layers.14.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
119
+ "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
120
+ "model.layers.14.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
121
+ "model.layers.14.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
122
+ "model.layers.15.fc1.bias": "pytorch_model-00001-of-00002.bin",
123
+ "model.layers.15.fc1.weight": "pytorch_model-00001-of-00002.bin",
124
+ "model.layers.15.fc2.bias": "pytorch_model-00001-of-00002.bin",
125
+ "model.layers.15.fc2.weight": "pytorch_model-00001-of-00002.bin",
126
+ "model.layers.15.final_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
127
+ "model.layers.15.final_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
128
+ "model.layers.15.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
129
+ "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
130
+ "model.layers.15.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
131
+ "model.layers.15.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
132
+ "model.layers.15.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
133
+ "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
134
+ "model.layers.15.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
135
+ "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
136
+ "model.layers.15.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
137
+ "model.layers.15.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
138
+ "model.layers.16.fc1.bias": "pytorch_model-00001-of-00002.bin",
139
+ "model.layers.16.fc1.weight": "pytorch_model-00001-of-00002.bin",
140
+ "model.layers.16.fc2.bias": "pytorch_model-00001-of-00002.bin",
141
+ "model.layers.16.fc2.weight": "pytorch_model-00001-of-00002.bin",
142
+ "model.layers.16.final_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
143
+ "model.layers.16.final_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
144
+ "model.layers.16.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
145
+ "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
146
+ "model.layers.16.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
147
+ "model.layers.16.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
148
+ "model.layers.16.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
149
+ "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
150
+ "model.layers.16.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
151
+ "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
152
+ "model.layers.16.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
153
+ "model.layers.16.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
154
+ "model.layers.17.fc1.bias": "pytorch_model-00001-of-00002.bin",
155
+ "model.layers.17.fc1.weight": "pytorch_model-00001-of-00002.bin",
156
+ "model.layers.17.fc2.bias": "pytorch_model-00001-of-00002.bin",
157
+ "model.layers.17.fc2.weight": "pytorch_model-00001-of-00002.bin",
158
+ "model.layers.17.final_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
159
+ "model.layers.17.final_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
160
+ "model.layers.17.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
161
+ "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
162
+ "model.layers.17.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
163
+ "model.layers.17.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
164
+ "model.layers.17.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
165
+ "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
166
+ "model.layers.17.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
167
+ "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
168
+ "model.layers.17.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
169
+ "model.layers.17.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
170
+ "model.layers.18.fc1.bias": "pytorch_model-00001-of-00002.bin",
171
+ "model.layers.18.fc1.weight": "pytorch_model-00001-of-00002.bin",
172
+ "model.layers.18.fc2.bias": "pytorch_model-00001-of-00002.bin",
173
+ "model.layers.18.fc2.weight": "pytorch_model-00001-of-00002.bin",
174
+ "model.layers.18.final_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
175
+ "model.layers.18.final_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
176
+ "model.layers.18.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
177
+ "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
178
+ "model.layers.18.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
179
+ "model.layers.18.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
180
+ "model.layers.18.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
181
+ "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
182
+ "model.layers.18.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
183
+ "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
184
+ "model.layers.18.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
185
+ "model.layers.18.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
186
+ "model.layers.19.fc1.bias": "pytorch_model-00002-of-00002.bin",
187
+ "model.layers.19.fc1.weight": "pytorch_model-00002-of-00002.bin",
188
+ "model.layers.19.fc2.bias": "pytorch_model-00002-of-00002.bin",
189
+ "model.layers.19.fc2.weight": "pytorch_model-00002-of-00002.bin",
190
+ "model.layers.19.final_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
191
+ "model.layers.19.final_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
192
+ "model.layers.19.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
193
+ "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
194
+ "model.layers.19.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
195
+ "model.layers.19.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
196
+ "model.layers.19.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
197
+ "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
198
+ "model.layers.19.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
199
+ "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
200
+ "model.layers.19.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
201
+ "model.layers.19.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
202
+ "model.layers.2.fc1.bias": "pytorch_model-00001-of-00002.bin",
203
+ "model.layers.2.fc1.weight": "pytorch_model-00001-of-00002.bin",
204
+ "model.layers.2.fc2.bias": "pytorch_model-00001-of-00002.bin",
205
+ "model.layers.2.fc2.weight": "pytorch_model-00001-of-00002.bin",
206
+ "model.layers.2.final_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
207
+ "model.layers.2.final_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
208
+ "model.layers.2.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
209
+ "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
210
+ "model.layers.2.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
211
+ "model.layers.2.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
212
+ "model.layers.2.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
213
+ "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
214
+ "model.layers.2.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
215
+ "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
216
+ "model.layers.2.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
217
+ "model.layers.2.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
218
+ "model.layers.20.fc1.bias": "pytorch_model-00002-of-00002.bin",
219
+ "model.layers.20.fc1.weight": "pytorch_model-00002-of-00002.bin",
220
+ "model.layers.20.fc2.bias": "pytorch_model-00002-of-00002.bin",
221
+ "model.layers.20.fc2.weight": "pytorch_model-00002-of-00002.bin",
222
+ "model.layers.20.final_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
223
+ "model.layers.20.final_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
224
+ "model.layers.20.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
225
+ "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
226
+ "model.layers.20.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
227
+ "model.layers.20.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
228
+ "model.layers.20.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
229
+ "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
230
+ "model.layers.20.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
231
+ "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
232
+ "model.layers.20.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
233
+ "model.layers.20.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
234
+ "model.layers.21.fc1.bias": "pytorch_model-00002-of-00002.bin",
235
+ "model.layers.21.fc1.weight": "pytorch_model-00002-of-00002.bin",
236
+ "model.layers.21.fc2.bias": "pytorch_model-00002-of-00002.bin",
237
+ "model.layers.21.fc2.weight": "pytorch_model-00002-of-00002.bin",
238
+ "model.layers.21.final_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
239
+ "model.layers.21.final_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
240
+ "model.layers.21.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
241
+ "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
242
+ "model.layers.21.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
243
+ "model.layers.21.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
244
+ "model.layers.21.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
245
+ "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
246
+ "model.layers.21.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
247
+ "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
248
+ "model.layers.21.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
249
+ "model.layers.21.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
250
+ "model.layers.22.fc1.bias": "pytorch_model-00002-of-00002.bin",
251
+ "model.layers.22.fc1.weight": "pytorch_model-00002-of-00002.bin",
252
+ "model.layers.22.fc2.bias": "pytorch_model-00002-of-00002.bin",
253
+ "model.layers.22.fc2.weight": "pytorch_model-00002-of-00002.bin",
254
+ "model.layers.22.final_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
255
+ "model.layers.22.final_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
256
+ "model.layers.22.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
257
+ "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
258
+ "model.layers.22.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
259
+ "model.layers.22.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
260
+ "model.layers.22.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
261
+ "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
262
+ "model.layers.22.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
263
+ "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
264
+ "model.layers.22.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
265
+ "model.layers.22.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
266
+ "model.layers.23.fc1.bias": "pytorch_model-00002-of-00002.bin",
267
+ "model.layers.23.fc1.weight": "pytorch_model-00002-of-00002.bin",
268
+ "model.layers.23.fc2.bias": "pytorch_model-00002-of-00002.bin",
269
+ "model.layers.23.fc2.weight": "pytorch_model-00002-of-00002.bin",
270
+ "model.layers.23.final_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
271
+ "model.layers.23.final_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
272
+ "model.layers.23.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
273
+ "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
274
+ "model.layers.23.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
275
+ "model.layers.23.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
276
+ "model.layers.23.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
277
+ "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
278
+ "model.layers.23.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
279
+ "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
280
+ "model.layers.23.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
281
+ "model.layers.23.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
282
+ "model.layers.24.fc1.bias": "pytorch_model-00002-of-00002.bin",
283
+ "model.layers.24.fc1.weight": "pytorch_model-00002-of-00002.bin",
284
+ "model.layers.24.fc2.bias": "pytorch_model-00002-of-00002.bin",
285
+ "model.layers.24.fc2.weight": "pytorch_model-00002-of-00002.bin",
286
+ "model.layers.24.final_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
287
+ "model.layers.24.final_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
288
+ "model.layers.24.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
289
+ "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
290
+ "model.layers.24.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
291
+ "model.layers.24.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
292
+ "model.layers.24.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
293
+ "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
294
+ "model.layers.24.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
295
+ "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
296
+ "model.layers.24.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
297
+ "model.layers.24.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
298
+ "model.layers.25.fc1.bias": "pytorch_model-00002-of-00002.bin",
299
+ "model.layers.25.fc1.weight": "pytorch_model-00002-of-00002.bin",
300
+ "model.layers.25.fc2.bias": "pytorch_model-00002-of-00002.bin",
301
+ "model.layers.25.fc2.weight": "pytorch_model-00002-of-00002.bin",
302
+ "model.layers.25.final_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
303
+ "model.layers.25.final_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
304
+ "model.layers.25.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
305
+ "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
306
+ "model.layers.25.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
307
+ "model.layers.25.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
308
+ "model.layers.25.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
309
+ "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
310
+ "model.layers.25.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
311
+ "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
312
+ "model.layers.25.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
313
+ "model.layers.25.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
314
+ "model.layers.26.fc1.bias": "pytorch_model-00002-of-00002.bin",
315
+ "model.layers.26.fc1.weight": "pytorch_model-00002-of-00002.bin",
316
+ "model.layers.26.fc2.bias": "pytorch_model-00002-of-00002.bin",
317
+ "model.layers.26.fc2.weight": "pytorch_model-00002-of-00002.bin",
318
+ "model.layers.26.final_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
319
+ "model.layers.26.final_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
320
+ "model.layers.26.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
321
+ "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
322
+ "model.layers.26.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
323
+ "model.layers.26.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
324
+ "model.layers.26.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
325
+ "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
326
+ "model.layers.26.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
327
+ "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
328
+ "model.layers.26.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
329
+ "model.layers.26.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
330
+ "model.layers.27.fc1.bias": "pytorch_model-00002-of-00002.bin",
331
+ "model.layers.27.fc1.weight": "pytorch_model-00002-of-00002.bin",
332
+ "model.layers.27.fc2.bias": "pytorch_model-00002-of-00002.bin",
333
+ "model.layers.27.fc2.weight": "pytorch_model-00002-of-00002.bin",
334
+ "model.layers.27.final_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
335
+ "model.layers.27.final_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
336
+ "model.layers.27.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
337
+ "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
338
+ "model.layers.27.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
339
+ "model.layers.27.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
340
+ "model.layers.27.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
341
+ "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
342
+ "model.layers.27.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
343
+ "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
344
+ "model.layers.27.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
345
+ "model.layers.27.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
346
+ "model.layers.28.fc1.bias": "pytorch_model-00002-of-00002.bin",
347
+ "model.layers.28.fc1.weight": "pytorch_model-00002-of-00002.bin",
348
+ "model.layers.28.fc2.bias": "pytorch_model-00002-of-00002.bin",
349
+ "model.layers.28.fc2.weight": "pytorch_model-00002-of-00002.bin",
350
+ "model.layers.28.final_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
351
+ "model.layers.28.final_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
352
+ "model.layers.28.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
353
+ "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
354
+ "model.layers.28.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
355
+ "model.layers.28.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
356
+ "model.layers.28.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
357
+ "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
358
+ "model.layers.28.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
359
+ "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
360
+ "model.layers.28.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
361
+ "model.layers.28.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
362
+ "model.layers.29.fc1.bias": "pytorch_model-00002-of-00002.bin",
363
+ "model.layers.29.fc1.weight": "pytorch_model-00002-of-00002.bin",
364
+ "model.layers.29.fc2.bias": "pytorch_model-00002-of-00002.bin",
365
+ "model.layers.29.fc2.weight": "pytorch_model-00002-of-00002.bin",
366
+ "model.layers.29.final_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
367
+ "model.layers.29.final_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
368
+ "model.layers.29.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
369
+ "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
370
+ "model.layers.29.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
371
+ "model.layers.29.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
372
+ "model.layers.29.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
373
+ "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
374
+ "model.layers.29.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
375
+ "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
376
+ "model.layers.29.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
377
+ "model.layers.29.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
378
+ "model.layers.3.fc1.bias": "pytorch_model-00001-of-00002.bin",
379
+ "model.layers.3.fc1.weight": "pytorch_model-00001-of-00002.bin",
380
+ "model.layers.3.fc2.bias": "pytorch_model-00001-of-00002.bin",
381
+ "model.layers.3.fc2.weight": "pytorch_model-00001-of-00002.bin",
382
+ "model.layers.3.final_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
383
+ "model.layers.3.final_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
384
+ "model.layers.3.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
385
+ "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
386
+ "model.layers.3.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
387
+ "model.layers.3.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
388
+ "model.layers.3.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
389
+ "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
390
+ "model.layers.3.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
391
+ "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
392
+ "model.layers.3.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
393
+ "model.layers.3.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
394
+ "model.layers.30.fc1.bias": "pytorch_model-00002-of-00002.bin",
395
+ "model.layers.30.fc1.weight": "pytorch_model-00002-of-00002.bin",
396
+ "model.layers.30.fc2.bias": "pytorch_model-00002-of-00002.bin",
397
+ "model.layers.30.fc2.weight": "pytorch_model-00002-of-00002.bin",
398
+ "model.layers.30.final_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
399
+ "model.layers.30.final_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
400
+ "model.layers.30.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
401
+ "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
402
+ "model.layers.30.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
403
+ "model.layers.30.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
404
+ "model.layers.30.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
405
+ "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
406
+ "model.layers.30.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
407
+ "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
408
+ "model.layers.30.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
409
+ "model.layers.30.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
410
+ "model.layers.31.fc1.bias": "pytorch_model-00002-of-00002.bin",
411
+ "model.layers.31.fc1.weight": "pytorch_model-00002-of-00002.bin",
412
+ "model.layers.31.fc2.bias": "pytorch_model-00002-of-00002.bin",
413
+ "model.layers.31.fc2.weight": "pytorch_model-00002-of-00002.bin",
414
+ "model.layers.31.final_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
415
+ "model.layers.31.final_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
416
+ "model.layers.31.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
417
+ "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
418
+ "model.layers.31.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
419
+ "model.layers.31.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
420
+ "model.layers.31.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
421
+ "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
422
+ "model.layers.31.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
423
+ "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
424
+ "model.layers.31.self_attn_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
425
+ "model.layers.31.self_attn_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
426
+ "model.layers.4.fc1.bias": "pytorch_model-00001-of-00002.bin",
427
+ "model.layers.4.fc1.weight": "pytorch_model-00001-of-00002.bin",
428
+ "model.layers.4.fc2.bias": "pytorch_model-00001-of-00002.bin",
429
+ "model.layers.4.fc2.weight": "pytorch_model-00001-of-00002.bin",
430
+ "model.layers.4.final_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
431
+ "model.layers.4.final_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
432
+ "model.layers.4.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
433
+ "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
434
+ "model.layers.4.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
435
+ "model.layers.4.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
436
+ "model.layers.4.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
437
+ "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
438
+ "model.layers.4.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
439
+ "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
440
+ "model.layers.4.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
441
+ "model.layers.4.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
442
+ "model.layers.5.fc1.bias": "pytorch_model-00001-of-00002.bin",
443
+ "model.layers.5.fc1.weight": "pytorch_model-00001-of-00002.bin",
444
+ "model.layers.5.fc2.bias": "pytorch_model-00001-of-00002.bin",
445
+ "model.layers.5.fc2.weight": "pytorch_model-00001-of-00002.bin",
446
+ "model.layers.5.final_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
447
+ "model.layers.5.final_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
448
+ "model.layers.5.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
449
+ "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
450
+ "model.layers.5.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
451
+ "model.layers.5.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
452
+ "model.layers.5.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
453
+ "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
454
+ "model.layers.5.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
455
+ "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
456
+ "model.layers.5.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
457
+ "model.layers.5.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
458
+ "model.layers.6.fc1.bias": "pytorch_model-00001-of-00002.bin",
459
+ "model.layers.6.fc1.weight": "pytorch_model-00001-of-00002.bin",
460
+ "model.layers.6.fc2.bias": "pytorch_model-00001-of-00002.bin",
461
+ "model.layers.6.fc2.weight": "pytorch_model-00001-of-00002.bin",
462
+ "model.layers.6.final_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
463
+ "model.layers.6.final_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
464
+ "model.layers.6.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
465
+ "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
466
+ "model.layers.6.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
467
+ "model.layers.6.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
468
+ "model.layers.6.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
469
+ "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
470
+ "model.layers.6.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
471
+ "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
472
+ "model.layers.6.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
473
+ "model.layers.6.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
474
+ "model.layers.7.fc1.bias": "pytorch_model-00001-of-00002.bin",
475
+ "model.layers.7.fc1.weight": "pytorch_model-00001-of-00002.bin",
476
+ "model.layers.7.fc2.bias": "pytorch_model-00001-of-00002.bin",
477
+ "model.layers.7.fc2.weight": "pytorch_model-00001-of-00002.bin",
478
+ "model.layers.7.final_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
479
+ "model.layers.7.final_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
480
+ "model.layers.7.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
481
+ "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
482
+ "model.layers.7.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
483
+ "model.layers.7.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
484
+ "model.layers.7.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
485
+ "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
486
+ "model.layers.7.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
487
+ "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
488
+ "model.layers.7.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
489
+ "model.layers.7.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
490
+ "model.layers.8.fc1.bias": "pytorch_model-00001-of-00002.bin",
491
+ "model.layers.8.fc1.weight": "pytorch_model-00001-of-00002.bin",
492
+ "model.layers.8.fc2.bias": "pytorch_model-00001-of-00002.bin",
493
+ "model.layers.8.fc2.weight": "pytorch_model-00001-of-00002.bin",
494
+ "model.layers.8.final_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
495
+ "model.layers.8.final_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
496
+ "model.layers.8.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
497
+ "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
498
+ "model.layers.8.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
499
+ "model.layers.8.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
500
+ "model.layers.8.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
501
+ "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
502
+ "model.layers.8.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
503
+ "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
504
+ "model.layers.8.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
505
+ "model.layers.8.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
506
+ "model.layers.9.fc1.bias": "pytorch_model-00001-of-00002.bin",
507
+ "model.layers.9.fc1.weight": "pytorch_model-00001-of-00002.bin",
508
+ "model.layers.9.fc2.bias": "pytorch_model-00001-of-00002.bin",
509
+ "model.layers.9.fc2.weight": "pytorch_model-00001-of-00002.bin",
510
+ "model.layers.9.final_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
511
+ "model.layers.9.final_layer_norm.weight": "pytorch_model-00001-of-00002.bin",
512
+ "model.layers.9.self_attn.k_proj.bias": "pytorch_model-00001-of-00002.bin",
513
+ "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
514
+ "model.layers.9.self_attn.out_proj.bias": "pytorch_model-00001-of-00002.bin",
515
+ "model.layers.9.self_attn.out_proj.weight": "pytorch_model-00001-of-00002.bin",
516
+ "model.layers.9.self_attn.q_proj.bias": "pytorch_model-00001-of-00002.bin",
517
+ "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
518
+ "model.layers.9.self_attn.v_proj.bias": "pytorch_model-00001-of-00002.bin",
519
+ "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
520
+ "model.layers.9.self_attn_layer_norm.bias": "pytorch_model-00001-of-00002.bin",
521
+ "model.layers.9.self_attn_layer_norm.weight": "pytorch_model-00001-of-00002.bin"
522
+ }
523
+ }
rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f932da7b85ba4bfa0e2191778ac486383227f4bc24acddfa6a467f62aaf179a5
3
+ size 14503
rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c0db463e104ee4ec1a143e979e1c6fc02877c9ae063b39b870d27aad5e972ef
3
+ size 14503
rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba7329df058a936f9d17bee005ddf0c9bc1c3c72d391adc550428e46bf1f9817
3
+ size 14503
rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d6907a947cf16e57246ac8c2924608004a4544d57378504e318ff5cce033e25
3
+ size 14503
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c49dc7e82c10227af764e518924cf2f9d50c00462750d184fa74697bba65eef8
3
+ size 4920706
special_tokens_map.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<madeupword0>",
4
+ "<madeupword1>",
5
+ "<madeupword2>",
6
+ "<madeupword3>",
7
+ "<madeupword4>",
8
+ "<madeupword5>",
9
+ "<madeupword6>"
10
+ ],
11
+ "bos_token": "<s>",
12
+ "cls_token": "<s>",
13
+ "eos_token": "</s>",
14
+ "pad_token": "<pad>",
15
+ "sep_token": "</s>",
16
+ "unk_token": "<unk>"
17
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<madeupword0>",
4
+ "<madeupword1>",
5
+ "<madeupword2>",
6
+ "<madeupword3>",
7
+ "<madeupword4>",
8
+ "<madeupword5>",
9
+ "<madeupword6>"
10
+ ],
11
+ "bos_token": "<s>",
12
+ "cls_token": "<s>",
13
+ "eos_token": "</s>",
14
+ "model_max_length": 1000000000000000019884624838656,
15
+ "name_or_path": "/home/tiger/xglm-7.5B",
16
+ "pad_token": "<pad>",
17
+ "sep_token": "</s>",
18
+ "sp_model_kwargs": {},
19
+ "special_tokens_map_file": "hf_models/xglm-564M/special_tokens_map.json",
20
+ "tokenizer_class": "XGLMTokenizer",
21
+ "unk_token": "<unk>"
22
+ }
trainer_state.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9625962596259626,
5
+ "global_step": 1750,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.28,
12
+ "learning_rate": 5e-06,
13
+ "loss": 2.368,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.55,
18
+ "learning_rate": 5e-06,
19
+ "loss": 2.2031,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 0.83,
24
+ "learning_rate": 5e-06,
25
+ "loss": 2.1764,
26
+ "step": 1500
27
+ }
28
+ ],
29
+ "max_steps": 2000,
30
+ "num_train_epochs": 2,
31
+ "total_flos": 4.42314986480468e+17,
32
+ "trial_name": null,
33
+ "trial_params": null
34
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:adaa50d69e8814e989b34f82ba977ee68c0e4f7ade826683984162512fc121d2
3
+ size 4527
zero_to_fp32.py ADDED
@@ -0,0 +1,482 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
4
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
5
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
6
+ # application.
7
+ #
8
+ # example: python zero_to_fp32.py . pytorch_model.bin
9
+
10
+ import argparse
11
+ import torch
12
+ import glob
13
+ import math
14
+ import os
15
+ import re
16
+ from collections import OrderedDict
17
+
18
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
19
+ # DeepSpeed data structures it has to be available in the current python environment.
20
+ from deepspeed.utils import logger
21
+ from deepspeed.checkpoint.constants import (DS_VERSION,
22
+ OPTIMIZER_STATE_DICT,
23
+ SINGLE_PARTITION_OF_FP32_GROUPS,
24
+ FP32_FLAT_GROUPS,
25
+ ZERO_STAGE,
26
+ PARTITION_COUNT,
27
+ PARAM_SHAPES,
28
+ BUFFER_NAMES)
29
+
30
+ debug = 0
31
+
32
+ # load to cpu
33
+ device = torch.device('cpu')
34
+
35
+
36
+ def atoi(text):
37
+ return int(text) if text.isdigit() else text
38
+
39
+
40
+ def natural_keys(text):
41
+ '''
42
+ alist.sort(key=natural_keys) sorts in human order
43
+ http://nedbatchelder.com/blog/200712/human_sorting.html
44
+ (See Toothy's implementation in the comments)
45
+ '''
46
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
47
+
48
+
49
+ def get_model_state_file(checkpoint_dir, zero_stage):
50
+ if not os.path.isdir(checkpoint_dir):
51
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
52
+
53
+ # there should be only one file
54
+ if zero_stage == 2:
55
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
56
+ elif zero_stage == 3:
57
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
58
+
59
+ if not os.path.exists(file):
60
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
61
+
62
+ return file
63
+
64
+
65
+ def get_optim_files(checkpoint_dir):
66
+ # XXX: need to test that this simple glob rule works for multi-node setup too
67
+ optim_files = sorted(glob.glob(os.path.join(checkpoint_dir,
68
+ "*_optim_states.pt")),
69
+ key=natural_keys)
70
+
71
+ if len(optim_files) == 0:
72
+ raise FileNotFoundError(
73
+ f"can't find '*_optim_states.pt' files in directory '{checkpoint_dir}'")
74
+
75
+ return optim_files
76
+
77
+
78
+ def parse_model_state(file):
79
+ state_dict = torch.load(file, map_location=device)
80
+
81
+ if BUFFER_NAMES not in state_dict:
82
+ raise ValueError(f"{file} is not a model state checkpoint")
83
+ buffer_names = state_dict[BUFFER_NAMES]
84
+ if debug:
85
+ print("Found buffers:", buffer_names)
86
+
87
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
88
+ buffers = {
89
+ k: v.float()
90
+ for k,
91
+ v in state_dict["module"].items() if k in buffer_names
92
+ }
93
+ param_shapes = state_dict[PARAM_SHAPES]
94
+
95
+ ds_version = state_dict.get(DS_VERSION, None)
96
+
97
+ return buffers, param_shapes, ds_version
98
+
99
+
100
+ def parse_optim_states(files, ds_checkpoint_dir):
101
+
102
+ total_files = len(files)
103
+ state_dicts = []
104
+ for f in files:
105
+ state_dicts.append(torch.load(f, map_location=device))
106
+
107
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
108
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
109
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
110
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
111
+
112
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
113
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
114
+ # use the max of the partition_count to get the dp world_size.
115
+
116
+ if type(world_size) is list:
117
+ world_size = max(world_size)
118
+
119
+ if world_size != total_files:
120
+ raise ValueError(
121
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
122
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
123
+ )
124
+
125
+ # the groups are named differently in each stage
126
+ if zero_stage == 2:
127
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
128
+ elif zero_stage == 3:
129
+ fp32_groups_key = FP32_FLAT_GROUPS
130
+ else:
131
+ raise ValueError(f"unknown zero stage {zero_stage}")
132
+
133
+ if zero_stage == 2:
134
+ fp32_flat_groups = [
135
+ state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key]
136
+ for i in range(len(state_dicts))
137
+ ]
138
+ elif zero_stage == 3:
139
+ # if there is more than one param group, there will be multiple flattened tensors - one
140
+ # flattened tensor per group - for simplicity merge them into a single tensor
141
+ #
142
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
143
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
144
+
145
+ fp32_flat_groups = [
146
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key],
147
+ 0) for i in range(len(state_dicts))
148
+ ]
149
+
150
+ return zero_stage, world_size, fp32_flat_groups
151
+
152
+
153
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
154
+ """
155
+ Returns fp32 state_dict reconstructed from ds checkpoint
156
+
157
+ Args:
158
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
159
+
160
+ """
161
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
162
+
163
+ optim_files = get_optim_files(ds_checkpoint_dir)
164
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
165
+ print(
166
+ f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
167
+
168
+ model_file = get_model_state_file(ds_checkpoint_dir, zero_stage)
169
+ buffers, param_shapes, ds_version = parse_model_state(model_file)
170
+ print(f'Parsing checkpoint created by deepspeed=={ds_version}')
171
+
172
+ if zero_stage == 2:
173
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size,
174
+ param_shapes,
175
+ fp32_flat_groups,
176
+ buffers)
177
+ elif zero_stage == 3:
178
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size,
179
+ param_shapes,
180
+ fp32_flat_groups,
181
+ buffers)
182
+
183
+
184
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size,
185
+ param_shapes,
186
+ fp32_flat_groups,
187
+ buffers):
188
+
189
+ # Reconstruction protocol:
190
+ #
191
+ # XXX: document this
192
+
193
+ if debug:
194
+ for i in range(world_size):
195
+ for j in range(len(fp32_flat_groups[0])):
196
+ print(
197
+ f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
198
+
199
+ # XXX: memory usage doubles here (zero2)
200
+ num_param_groups = len(fp32_flat_groups[0])
201
+ merged_single_partition_of_fp32_groups = []
202
+ for i in range(num_param_groups):
203
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
204
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
205
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
206
+ avail_numel = sum([
207
+ full_single_fp32_vector.numel()
208
+ for full_single_fp32_vector in merged_single_partition_of_fp32_groups
209
+ ])
210
+
211
+ if debug:
212
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
213
+ wanted_numel = sum(
214
+ [sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
215
+ # not asserting if there is a mismatch due to possible padding
216
+ print(f"Have {avail_numel} numels to process.")
217
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
218
+
219
+ state_dict = OrderedDict()
220
+
221
+ # buffers
222
+ state_dict.update(buffers)
223
+ if debug:
224
+ print(f"added {len(buffers)} buffers")
225
+
226
+ # params
227
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
228
+ # out-of-core computing solution
229
+ total_numel = 0
230
+ total_params = 0
231
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
232
+ offset = 0
233
+ avail_numel = full_single_fp32_vector.numel()
234
+ for name, shape in shapes.items():
235
+
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+ total_params += 1
239
+
240
+ if debug:
241
+ print(
242
+ f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} "
243
+ )
244
+ state_dict[name] = full_single_fp32_vector.narrow(
245
+ 0,
246
+ offset,
247
+ unpartitioned_numel).view(shape)
248
+ offset += unpartitioned_numel
249
+
250
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
251
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
252
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
253
+ # live optimizer object, so we are checking that the numbers are within the right range
254
+ align_to = 2 * world_size
255
+
256
+ def zero2_align(x):
257
+ return align_to * math.ceil(x / align_to)
258
+
259
+ if debug:
260
+ print(f"original offset={offset}, avail_numel={avail_numel}")
261
+
262
+ offset = zero2_align(offset)
263
+ avail_numel = zero2_align(avail_numel)
264
+
265
+ if debug:
266
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
267
+
268
+ # Sanity check
269
+ if offset != avail_numel:
270
+ raise ValueError(
271
+ f"consumed {offset} numels out of {avail_numel} - something is wrong")
272
+
273
+ print(
274
+ f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
275
+ )
276
+
277
+ return state_dict
278
+
279
+
280
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
281
+ remainder = unpartitioned_numel % world_size
282
+ padding_numel = (world_size - remainder) if remainder else 0
283
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
284
+ return partitioned_numel, padding_numel
285
+
286
+
287
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size,
288
+ param_shapes,
289
+ fp32_flat_groups,
290
+ buffers):
291
+
292
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
293
+ # param, re-consolidating each param, while dealing with padding if any
294
+
295
+ avail_numel = fp32_flat_groups[0].numel() * world_size
296
+ # merge list of dicts, preserving order
297
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
298
+
299
+ if debug:
300
+ for i in range(world_size):
301
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
302
+
303
+ wanted_params = len(param_shapes)
304
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
305
+ # not asserting if there is a mismatch due to possible padding
306
+ print(f"Have {avail_numel} numels to process.")
307
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
308
+
309
+ state_dict = OrderedDict()
310
+
311
+ # buffers
312
+ state_dict.update(buffers)
313
+ if debug:
314
+ print(f"added {len(buffers)} buffers")
315
+
316
+ # params
317
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
318
+ # out-of-core computing solution
319
+ offset = 0
320
+ total_numel = 0
321
+ total_params = 0
322
+ for name, shape in param_shapes.items():
323
+
324
+ unpartitioned_numel = shape.numel()
325
+ total_numel += unpartitioned_numel
326
+ total_params += 1
327
+
328
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
329
+
330
+ if debug:
331
+ print(
332
+ f"{total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
333
+ )
334
+
335
+ # XXX: memory usage doubles here
336
+ state_dict[name] = torch.cat(
337
+ tuple(fp32_flat_groups[i].narrow(0,
338
+ offset,
339
+ partitioned_numel)
340
+ for i in range(world_size)),
341
+ 0).narrow(0,
342
+ 0,
343
+ unpartitioned_numel).view(shape)
344
+ offset += partitioned_numel
345
+
346
+ offset *= world_size
347
+
348
+ # Sanity check
349
+ if offset != avail_numel:
350
+ raise ValueError(
351
+ f"consumed {offset} numels out of {avail_numel} - something is wrong")
352
+
353
+ print(
354
+ f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
355
+ )
356
+
357
+ return state_dict
358
+
359
+
360
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
361
+ """
362
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
363
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
364
+ via a model hub.
365
+
366
+ Args:
367
+ - ``checkpoint_dir``: path to the desired checkpoint folder
368
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
369
+
370
+ Returns:
371
+ - pytorch ``state_dict``
372
+
373
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
374
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
375
+ the checkpoint.
376
+
377
+ A typical usage might be ::
378
+
379
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
380
+ # do the training and checkpoint saving
381
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
382
+ model = model.cpu() # move to cpu
383
+ model.load_state_dict(state_dict)
384
+ # submit to model hub or save the model to share with others
385
+
386
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
387
+ application. i.e. you will need to re-initialize the deepspeed engine, since
388
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
389
+
390
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
391
+
392
+ """
393
+ if tag is None:
394
+ latest_path = os.path.join(checkpoint_dir, 'latest')
395
+ if os.path.isfile(latest_path):
396
+ with open(latest_path, 'r') as fd:
397
+ tag = fd.read().strip()
398
+ else:
399
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
400
+
401
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
402
+
403
+ if not os.path.isdir(ds_checkpoint_dir):
404
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
405
+
406
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
407
+
408
+
409
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
410
+ """
411
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
412
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
413
+
414
+ Args:
415
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
416
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
417
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
418
+ """
419
+
420
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
421
+ print(f"Saving fp32 state dict to {output_file}")
422
+ torch.save(state_dict, output_file)
423
+
424
+
425
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
426
+ """
427
+ 1. Put the provided model to cpu
428
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
429
+ 3. Load it into the provided model
430
+
431
+ Args:
432
+ - ``model``: the model object to update
433
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
434
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
435
+
436
+ Returns:
437
+ - ``model`: modified model
438
+
439
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
440
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
441
+ conveniently placed for you in the checkpoint folder.
442
+
443
+ A typical usage might be ::
444
+
445
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
446
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
447
+ # submit to model hub or save the model to share with others
448
+
449
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
450
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
451
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
452
+
453
+ """
454
+ logger.info(f"Extracting fp32 weights")
455
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
456
+
457
+ logger.info(f"Overwriting model with fp32 weights")
458
+ model = model.cpu()
459
+ model.load_state_dict(state_dict, strict=False)
460
+
461
+ return model
462
+
463
+
464
+ if __name__ == "__main__":
465
+
466
+ parser = argparse.ArgumentParser()
467
+ parser.add_argument(
468
+ "checkpoint_dir",
469
+ type=str,
470
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
471
+ parser.add_argument(
472
+ "output_file",
473
+ type=str,
474
+ help=
475
+ "path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)"
476
+ )
477
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
478
+ args = parser.parse_args()
479
+
480
+ debug = args.debug
481
+
482
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)