lautel commited on
Commit
d8efa0f
verified
1 Parent(s): abe00a2

Upload 5 files

Browse files
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<unk>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "additional_special_tokens": [],
32
+ "bos_token": "<s>",
33
+ "chat_template": "{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] %}\n {%- set loop_messages = messages[1:] %}\n{%- else %}\n {%- set loop_messages = messages %}\n{%- endif %}\n\n{{- bos_token }}\n{%- for message in loop_messages %}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}\n {{- raise_exception('After the optional system message, conversation roles must alternate user/assistant/user/assistant/...') }}\n {%- endif %}\n {%- if message['role'] == 'user' %}\n {%- if loop.first and system_message is defined %}\n {{- ' [INST] ' + system_message + '\\n\\n' + message['content'] + ' [/INST]' }}\n {%- else %}\n {{- ' [INST] ' + message['content'] + ' [/INST]' }}\n {%- endif %}\n {%- elif message['role'] == 'assistant' %}\n {{- ' ' + message['content'] + eos_token}}\n {%- else %}\n {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\n {%- endif %}\n{%- endfor %}\n",
34
+ "clean_up_tokenization_spaces": false,
35
+ "eos_token": "</s>",
36
+ "legacy": false,
37
+ "model_max_length": 124,
38
+ "pad_token": "<unk>",
39
+ "padding": true,
40
+ "padding_side": "left",
41
+ "sp_model_kwargs": {},
42
+ "spaces_between_special_tokens": false,
43
+ "tokenizer_class": "LlamaTokenizer",
44
+ "truncation": true,
45
+ "truncation_side": "left",
46
+ "unk_token": "<unk>",
47
+ "use_default_system_prompt": false
48
+ }
trainer_state.json ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9997314859567156,
5
+ "eval_steps": 500,
6
+ "global_step": 2327,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0214811234627571,
13
+ "grad_norm": 11.80153053831896,
14
+ "learning_rate": 1.1933174224343676e-06,
15
+ "loss": 5.7433,
16
+ "step": 50
17
+ },
18
+ {
19
+ "epoch": 0.0429622469255142,
20
+ "grad_norm": 3.9560565670689787,
21
+ "learning_rate": 2.386634844868735e-06,
22
+ "loss": 4.6773,
23
+ "step": 100
24
+ },
25
+ {
26
+ "epoch": 0.0644433703882713,
27
+ "grad_norm": 3.1697190737531535,
28
+ "learning_rate": 3.579952267303103e-06,
29
+ "loss": 4.1765,
30
+ "step": 150
31
+ },
32
+ {
33
+ "epoch": 0.0859244938510284,
34
+ "grad_norm": 3.0629881164511814,
35
+ "learning_rate": 4.77326968973747e-06,
36
+ "loss": 3.9862,
37
+ "step": 200
38
+ },
39
+ {
40
+ "epoch": 0.1074056173137855,
41
+ "grad_norm": 3.782319031089094,
42
+ "learning_rate": 5.966587112171838e-06,
43
+ "loss": 3.8765,
44
+ "step": 250
45
+ },
46
+ {
47
+ "epoch": 0.1288867407765426,
48
+ "grad_norm": 4.981876775635735,
49
+ "learning_rate": 7.159904534606206e-06,
50
+ "loss": 3.7555,
51
+ "step": 300
52
+ },
53
+ {
54
+ "epoch": 0.1503678642392997,
55
+ "grad_norm": 5.086535704279024,
56
+ "learning_rate": 8.353221957040574e-06,
57
+ "loss": 3.6723,
58
+ "step": 350
59
+ },
60
+ {
61
+ "epoch": 0.1718489877020568,
62
+ "grad_norm": 5.687053039301101,
63
+ "learning_rate": 9.54653937947494e-06,
64
+ "loss": 3.6078,
65
+ "step": 400
66
+ },
67
+ {
68
+ "epoch": 0.1933301111648139,
69
+ "grad_norm": 5.528308193865339,
70
+ "learning_rate": 9.999870719949443e-06,
71
+ "loss": 3.5469,
72
+ "step": 450
73
+ },
74
+ {
75
+ "epoch": 0.214811234627571,
76
+ "grad_norm": 5.706889809403439,
77
+ "learning_rate": 9.999117393223764e-06,
78
+ "loss": 3.4902,
79
+ "step": 500
80
+ },
81
+ {
82
+ "epoch": 0.23629235809032811,
83
+ "grad_norm": 8.153143367841682,
84
+ "learning_rate": 9.99769155693599e-06,
85
+ "loss": 3.4188,
86
+ "step": 550
87
+ },
88
+ {
89
+ "epoch": 0.2577734815530852,
90
+ "grad_norm": 5.845809899861177,
91
+ "learning_rate": 9.995593402897697e-06,
92
+ "loss": 3.3961,
93
+ "step": 600
94
+ },
95
+ {
96
+ "epoch": 0.2792546050158423,
97
+ "grad_norm": 6.193786755519171,
98
+ "learning_rate": 9.992823213364433e-06,
99
+ "loss": 3.3581,
100
+ "step": 650
101
+ },
102
+ {
103
+ "epoch": 0.3007357284785994,
104
+ "grad_norm": 6.287910360630115,
105
+ "learning_rate": 9.989381360997774e-06,
106
+ "loss": 3.3181,
107
+ "step": 700
108
+ },
109
+ {
110
+ "epoch": 0.3222168519413565,
111
+ "grad_norm": 6.7245258610858025,
112
+ "learning_rate": 9.985268308815177e-06,
113
+ "loss": 3.2873,
114
+ "step": 750
115
+ },
116
+ {
117
+ "epoch": 0.3436979754041136,
118
+ "grad_norm": 6.2374846893994125,
119
+ "learning_rate": 9.980484610127696e-06,
120
+ "loss": 3.2627,
121
+ "step": 800
122
+ },
123
+ {
124
+ "epoch": 0.3651790988668707,
125
+ "grad_norm": 6.157634090577404,
126
+ "learning_rate": 9.975030908465552e-06,
127
+ "loss": 3.2385,
128
+ "step": 850
129
+ },
130
+ {
131
+ "epoch": 0.3866602223296278,
132
+ "grad_norm": 6.055313983623131,
133
+ "learning_rate": 9.968907937491556e-06,
134
+ "loss": 3.2332,
135
+ "step": 900
136
+ },
137
+ {
138
+ "epoch": 0.4081413457923849,
139
+ "grad_norm": 5.643811929136974,
140
+ "learning_rate": 9.962116520902414e-06,
141
+ "loss": 3.2023,
142
+ "step": 950
143
+ },
144
+ {
145
+ "epoch": 0.429622469255142,
146
+ "grad_norm": 5.926620348550728,
147
+ "learning_rate": 9.954657572317915e-06,
148
+ "loss": 3.1881,
149
+ "step": 1000
150
+ },
151
+ {
152
+ "epoch": 0.4511035927178991,
153
+ "grad_norm": 6.701182779844167,
154
+ "learning_rate": 9.946532095158037e-06,
155
+ "loss": 3.1679,
156
+ "step": 1050
157
+ },
158
+ {
159
+ "epoch": 0.47258471618065623,
160
+ "grad_norm": 6.36759463453718,
161
+ "learning_rate": 9.937741182507946e-06,
162
+ "loss": 3.1442,
163
+ "step": 1100
164
+ },
165
+ {
166
+ "epoch": 0.49406583964341333,
167
+ "grad_norm": 7.096154490236862,
168
+ "learning_rate": 9.928286016970965e-06,
169
+ "loss": 3.155,
170
+ "step": 1150
171
+ },
172
+ {
173
+ "epoch": 0.5155469631061704,
174
+ "grad_norm": 6.088872047795586,
175
+ "learning_rate": 9.918167870509469e-06,
176
+ "loss": 3.1275,
177
+ "step": 1200
178
+ },
179
+ {
180
+ "epoch": 0.5370280865689275,
181
+ "grad_norm": 6.761966156468945,
182
+ "learning_rate": 9.907388104273776e-06,
183
+ "loss": 3.095,
184
+ "step": 1250
185
+ },
186
+ {
187
+ "epoch": 0.5585092100316846,
188
+ "grad_norm": 6.21960112723293,
189
+ "learning_rate": 9.895948168419045e-06,
190
+ "loss": 3.0861,
191
+ "step": 1300
192
+ },
193
+ {
194
+ "epoch": 0.5799903334944417,
195
+ "grad_norm": 6.396312085808871,
196
+ "learning_rate": 9.883849601910185e-06,
197
+ "loss": 3.1002,
198
+ "step": 1350
199
+ },
200
+ {
201
+ "epoch": 0.6014714569571988,
202
+ "grad_norm": 6.84214950380231,
203
+ "learning_rate": 9.871094032314833e-06,
204
+ "loss": 3.1037,
205
+ "step": 1400
206
+ },
207
+ {
208
+ "epoch": 0.6229525804199559,
209
+ "grad_norm": 6.454543297291967,
210
+ "learning_rate": 9.857683175584394e-06,
211
+ "loss": 3.0833,
212
+ "step": 1450
213
+ },
214
+ {
215
+ "epoch": 0.644433703882713,
216
+ "grad_norm": 6.794657141475511,
217
+ "learning_rate": 9.843618835823208e-06,
218
+ "loss": 3.0685,
219
+ "step": 1500
220
+ },
221
+ {
222
+ "epoch": 0.6659148273454701,
223
+ "grad_norm": 6.573147319811302,
224
+ "learning_rate": 9.82890290504585e-06,
225
+ "loss": 3.047,
226
+ "step": 1550
227
+ },
228
+ {
229
+ "epoch": 0.6873959508082272,
230
+ "grad_norm": 6.666311208864922,
231
+ "learning_rate": 9.813537362922608e-06,
232
+ "loss": 3.0326,
233
+ "step": 1600
234
+ },
235
+ {
236
+ "epoch": 0.7088770742709843,
237
+ "grad_norm": 6.490840785958152,
238
+ "learning_rate": 9.797524276513162e-06,
239
+ "loss": 3.0562,
240
+ "step": 1650
241
+ },
242
+ {
243
+ "epoch": 0.7303581977337414,
244
+ "grad_norm": 7.096203367165056,
245
+ "learning_rate": 9.780865799988516e-06,
246
+ "loss": 3.031,
247
+ "step": 1700
248
+ },
249
+ {
250
+ "epoch": 0.7518393211964985,
251
+ "grad_norm": 6.752554004118515,
252
+ "learning_rate": 9.763564174341203e-06,
253
+ "loss": 3.0211,
254
+ "step": 1750
255
+ },
256
+ {
257
+ "epoch": 0.7733204446592556,
258
+ "grad_norm": 6.267973471607311,
259
+ "learning_rate": 9.745621727083817e-06,
260
+ "loss": 3.017,
261
+ "step": 1800
262
+ },
263
+ {
264
+ "epoch": 0.7948015681220127,
265
+ "grad_norm": 6.4153741524585435,
266
+ "learning_rate": 9.727040871935898e-06,
267
+ "loss": 3.0248,
268
+ "step": 1850
269
+ },
270
+ {
271
+ "epoch": 0.8162826915847698,
272
+ "grad_norm": 6.44854230007588,
273
+ "learning_rate": 9.707824108499233e-06,
274
+ "loss": 2.974,
275
+ "step": 1900
276
+ },
277
+ {
278
+ "epoch": 0.837763815047527,
279
+ "grad_norm": 6.434347899473903,
280
+ "learning_rate": 9.68797402192159e-06,
281
+ "loss": 2.9643,
282
+ "step": 1950
283
+ },
284
+ {
285
+ "epoch": 0.859244938510284,
286
+ "grad_norm": 6.623547979694668,
287
+ "learning_rate": 9.667493282548945e-06,
288
+ "loss": 2.9832,
289
+ "step": 2000
290
+ },
291
+ {
292
+ "epoch": 0.8807260619730412,
293
+ "grad_norm": 7.423714925680245,
294
+ "learning_rate": 9.646384645566263e-06,
295
+ "loss": 2.9494,
296
+ "step": 2050
297
+ },
298
+ {
299
+ "epoch": 0.9022071854357983,
300
+ "grad_norm": 6.539828516530332,
301
+ "learning_rate": 9.624650950626846e-06,
302
+ "loss": 2.9489,
303
+ "step": 2100
304
+ },
305
+ {
306
+ "epoch": 0.9236883088985554,
307
+ "grad_norm": 6.884977149689985,
308
+ "learning_rate": 9.602295121470328e-06,
309
+ "loss": 2.9446,
310
+ "step": 2150
311
+ },
312
+ {
313
+ "epoch": 0.9451694323613125,
314
+ "grad_norm": 7.167446447983352,
315
+ "learning_rate": 9.57932016552936e-06,
316
+ "loss": 2.9367,
317
+ "step": 2200
318
+ },
319
+ {
320
+ "epoch": 0.9666505558240696,
321
+ "grad_norm": 6.06922872483934,
322
+ "learning_rate": 9.555729173525035e-06,
323
+ "loss": 2.9252,
324
+ "step": 2250
325
+ },
326
+ {
327
+ "epoch": 0.9881316792868267,
328
+ "grad_norm": 6.386687065769196,
329
+ "learning_rate": 9.531525319051097e-06,
330
+ "loss": 2.9342,
331
+ "step": 2300
332
+ }
333
+ ],
334
+ "logging_steps": 50,
335
+ "max_steps": 13962,
336
+ "num_input_tokens_seen": 0,
337
+ "num_train_epochs": 6,
338
+ "save_steps": 1.0,
339
+ "stateful_callbacks": {
340
+ "TrainerControl": {
341
+ "args": {
342
+ "should_epoch_stop": false,
343
+ "should_evaluate": false,
344
+ "should_log": false,
345
+ "should_save": true,
346
+ "should_training_stop": false
347
+ },
348
+ "attributes": {}
349
+ }
350
+ },
351
+ "total_flos": 0.0,
352
+ "train_batch_size": 4,
353
+ "trial_name": null,
354
+ "trial_params": null
355
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11fa96dc09f08ee109815f437a4e1a59635a72fc349dbd2021b2ccdd7a930d96
3
+ size 6648