aadityap commited on
Commit
480bbfe
1 Parent(s): aec1dfa

Model save

Browse files
Files changed (4) hide show
  1. README.md +66 -0
  2. all_results.json +9 -0
  3. train_results.json +9 -0
  4. trainer_state.json +408 -0
README.md ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: barc0/Llama-3.1-ARC-Potpourri-Transduction-8B
3
+ library_name: peft
4
+ license: llama3.1
5
+ tags:
6
+ - trl
7
+ - sft
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: problem0_model_aug_10
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # problem0_model_aug_10
18
+
19
+ This model is a fine-tuned version of [barc0/Llama-3.1-ARC-Potpourri-Transduction-8B](https://huggingface.co/barc0/Llama-3.1-ARC-Potpourri-Transduction-8B) on an unknown dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.1473
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 0.0001
41
+ - train_batch_size: 1
42
+ - eval_batch_size: 1
43
+ - seed: 42
44
+ - distributed_type: multi-GPU
45
+ - gradient_accumulation_steps: 2
46
+ - total_train_batch_size: 2
47
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
48
+ - lr_scheduler_type: cosine
49
+ - lr_scheduler_warmup_ratio: 0.1
50
+ - num_epochs: 2
51
+
52
+ ### Training results
53
+
54
+ | Training Loss | Epoch | Step | Validation Loss |
55
+ |:-------------:|:-----:|:----:|:---------------:|
56
+ | 0.2436 | 1.0 | 25 | 0.1446 |
57
+ | 0.12 | 2.0 | 50 | 0.1473 |
58
+
59
+
60
+ ### Framework versions
61
+
62
+ - PEFT 0.13.2
63
+ - Transformers 4.47.0.dev0
64
+ - Pytorch 2.4.0+cu121
65
+ - Datasets 3.1.0
66
+ - Tokenizers 0.20.3
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "total_flos": 643887562752.0,
4
+ "train_loss": 0.10991490356624127,
5
+ "train_runtime": 185.8623,
6
+ "train_samples": 50,
7
+ "train_samples_per_second": 0.538,
8
+ "train_steps_per_second": 0.269
9
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "total_flos": 643887562752.0,
4
+ "train_loss": 0.10991490356624127,
5
+ "train_runtime": 185.8623,
6
+ "train_samples": 50,
7
+ "train_samples_per_second": 0.538,
8
+ "train_steps_per_second": 0.269
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,408 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.0,
5
+ "eval_steps": 500,
6
+ "global_step": 50,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.04,
13
+ "grad_norm": 0.1475104469860361,
14
+ "learning_rate": 2e-05,
15
+ "loss": 0.0999,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.08,
20
+ "grad_norm": 0.17336762271557374,
21
+ "learning_rate": 4e-05,
22
+ "loss": 0.1105,
23
+ "step": 2
24
+ },
25
+ {
26
+ "epoch": 0.12,
27
+ "grad_norm": 0.1625647711677745,
28
+ "learning_rate": 6e-05,
29
+ "loss": 0.1518,
30
+ "step": 3
31
+ },
32
+ {
33
+ "epoch": 0.16,
34
+ "grad_norm": 0.2835973362154201,
35
+ "learning_rate": 8e-05,
36
+ "loss": 0.412,
37
+ "step": 4
38
+ },
39
+ {
40
+ "epoch": 0.2,
41
+ "grad_norm": 0.102899700816259,
42
+ "learning_rate": 0.0001,
43
+ "loss": 0.1253,
44
+ "step": 5
45
+ },
46
+ {
47
+ "epoch": 0.24,
48
+ "grad_norm": 0.10525052737112932,
49
+ "learning_rate": 9.987820251299122e-05,
50
+ "loss": 0.1777,
51
+ "step": 6
52
+ },
53
+ {
54
+ "epoch": 0.28,
55
+ "grad_norm": 0.0841421968850327,
56
+ "learning_rate": 9.951340343707852e-05,
57
+ "loss": 0.1138,
58
+ "step": 7
59
+ },
60
+ {
61
+ "epoch": 0.32,
62
+ "grad_norm": 0.21097849799520993,
63
+ "learning_rate": 9.890738003669029e-05,
64
+ "loss": 0.3093,
65
+ "step": 8
66
+ },
67
+ {
68
+ "epoch": 0.36,
69
+ "grad_norm": 0.13129190430279525,
70
+ "learning_rate": 9.806308479691595e-05,
71
+ "loss": 0.1067,
72
+ "step": 9
73
+ },
74
+ {
75
+ "epoch": 0.4,
76
+ "grad_norm": 0.22029292639589196,
77
+ "learning_rate": 9.698463103929542e-05,
78
+ "loss": 0.2229,
79
+ "step": 10
80
+ },
81
+ {
82
+ "epoch": 0.44,
83
+ "grad_norm": 0.2605739248411656,
84
+ "learning_rate": 9.567727288213005e-05,
85
+ "loss": 0.3048,
86
+ "step": 11
87
+ },
88
+ {
89
+ "epoch": 0.48,
90
+ "grad_norm": 0.09516786744052268,
91
+ "learning_rate": 9.414737964294636e-05,
92
+ "loss": 0.0444,
93
+ "step": 12
94
+ },
95
+ {
96
+ "epoch": 0.52,
97
+ "grad_norm": 0.11035350979508174,
98
+ "learning_rate": 9.24024048078213e-05,
99
+ "loss": 0.0919,
100
+ "step": 13
101
+ },
102
+ {
103
+ "epoch": 0.56,
104
+ "grad_norm": 0.1860366786152161,
105
+ "learning_rate": 9.045084971874738e-05,
106
+ "loss": 0.1798,
107
+ "step": 14
108
+ },
109
+ {
110
+ "epoch": 0.6,
111
+ "grad_norm": 0.2051102247541834,
112
+ "learning_rate": 8.83022221559489e-05,
113
+ "loss": 0.1682,
114
+ "step": 15
115
+ },
116
+ {
117
+ "epoch": 0.64,
118
+ "grad_norm": 0.2120620056755024,
119
+ "learning_rate": 8.596699001693255e-05,
120
+ "loss": 0.2017,
121
+ "step": 16
122
+ },
123
+ {
124
+ "epoch": 0.68,
125
+ "grad_norm": 0.12492795843072409,
126
+ "learning_rate": 8.345653031794292e-05,
127
+ "loss": 0.0715,
128
+ "step": 17
129
+ },
130
+ {
131
+ "epoch": 0.72,
132
+ "grad_norm": 0.09372961310287645,
133
+ "learning_rate": 8.07830737662829e-05,
134
+ "loss": 0.063,
135
+ "step": 18
136
+ },
137
+ {
138
+ "epoch": 0.76,
139
+ "grad_norm": 0.09404028728202063,
140
+ "learning_rate": 7.795964517353735e-05,
141
+ "loss": 0.1209,
142
+ "step": 19
143
+ },
144
+ {
145
+ "epoch": 0.8,
146
+ "grad_norm": 0.11155850164626473,
147
+ "learning_rate": 7.500000000000001e-05,
148
+ "loss": 0.048,
149
+ "step": 20
150
+ },
151
+ {
152
+ "epoch": 0.84,
153
+ "grad_norm": 0.14193160043170325,
154
+ "learning_rate": 7.191855733945387e-05,
155
+ "loss": 0.1016,
156
+ "step": 21
157
+ },
158
+ {
159
+ "epoch": 0.88,
160
+ "grad_norm": 0.12622860771717062,
161
+ "learning_rate": 6.873032967079561e-05,
162
+ "loss": 0.0836,
163
+ "step": 22
164
+ },
165
+ {
166
+ "epoch": 0.92,
167
+ "grad_norm": 0.19822006343379234,
168
+ "learning_rate": 6.545084971874738e-05,
169
+ "loss": 0.1943,
170
+ "step": 23
171
+ },
172
+ {
173
+ "epoch": 0.96,
174
+ "grad_norm": 0.17907254526794228,
175
+ "learning_rate": 6.209609477998338e-05,
176
+ "loss": 0.1258,
177
+ "step": 24
178
+ },
179
+ {
180
+ "epoch": 1.0,
181
+ "grad_norm": 0.35070363993403475,
182
+ "learning_rate": 5.868240888334653e-05,
183
+ "loss": 0.2436,
184
+ "step": 25
185
+ },
186
+ {
187
+ "epoch": 1.0,
188
+ "eval_loss": 0.1445501148700714,
189
+ "eval_runtime": 0.4011,
190
+ "eval_samples_per_second": 2.493,
191
+ "eval_steps_per_second": 2.493,
192
+ "step": 25
193
+ },
194
+ {
195
+ "epoch": 1.04,
196
+ "grad_norm": 0.331374058760647,
197
+ "learning_rate": 5.522642316338268e-05,
198
+ "loss": 0.1818,
199
+ "step": 26
200
+ },
201
+ {
202
+ "epoch": 1.08,
203
+ "grad_norm": 0.10212151784996519,
204
+ "learning_rate": 5.174497483512506e-05,
205
+ "loss": 0.0545,
206
+ "step": 27
207
+ },
208
+ {
209
+ "epoch": 1.12,
210
+ "grad_norm": 0.12127508733134722,
211
+ "learning_rate": 4.825502516487497e-05,
212
+ "loss": 0.0662,
213
+ "step": 28
214
+ },
215
+ {
216
+ "epoch": 1.16,
217
+ "grad_norm": 0.10472782126959304,
218
+ "learning_rate": 4.477357683661734e-05,
219
+ "loss": 0.0712,
220
+ "step": 29
221
+ },
222
+ {
223
+ "epoch": 1.2,
224
+ "grad_norm": 0.09739912535877197,
225
+ "learning_rate": 4.131759111665349e-05,
226
+ "loss": 0.0716,
227
+ "step": 30
228
+ },
229
+ {
230
+ "epoch": 1.24,
231
+ "grad_norm": 0.07818016052332849,
232
+ "learning_rate": 3.790390522001662e-05,
233
+ "loss": 0.0428,
234
+ "step": 31
235
+ },
236
+ {
237
+ "epoch": 1.28,
238
+ "grad_norm": 0.1309525888712825,
239
+ "learning_rate": 3.4549150281252636e-05,
240
+ "loss": 0.0632,
241
+ "step": 32
242
+ },
243
+ {
244
+ "epoch": 1.32,
245
+ "grad_norm": 0.05268163632164765,
246
+ "learning_rate": 3.12696703292044e-05,
247
+ "loss": 0.0308,
248
+ "step": 33
249
+ },
250
+ {
251
+ "epoch": 1.3599999999999999,
252
+ "grad_norm": 0.07825640200592207,
253
+ "learning_rate": 2.8081442660546125e-05,
254
+ "loss": 0.0439,
255
+ "step": 34
256
+ },
257
+ {
258
+ "epoch": 1.4,
259
+ "grad_norm": 0.0654461820686998,
260
+ "learning_rate": 2.500000000000001e-05,
261
+ "loss": 0.0337,
262
+ "step": 35
263
+ },
264
+ {
265
+ "epoch": 1.44,
266
+ "grad_norm": 0.11019625858497928,
267
+ "learning_rate": 2.2040354826462668e-05,
268
+ "loss": 0.0732,
269
+ "step": 36
270
+ },
271
+ {
272
+ "epoch": 1.48,
273
+ "grad_norm": 0.0589572493253463,
274
+ "learning_rate": 1.9216926233717085e-05,
275
+ "loss": 0.0342,
276
+ "step": 37
277
+ },
278
+ {
279
+ "epoch": 1.52,
280
+ "grad_norm": 0.06589353609148312,
281
+ "learning_rate": 1.6543469682057106e-05,
282
+ "loss": 0.0325,
283
+ "step": 38
284
+ },
285
+ {
286
+ "epoch": 1.56,
287
+ "grad_norm": 0.14358958467272426,
288
+ "learning_rate": 1.4033009983067452e-05,
289
+ "loss": 0.0941,
290
+ "step": 39
291
+ },
292
+ {
293
+ "epoch": 1.6,
294
+ "grad_norm": 0.10816135111790075,
295
+ "learning_rate": 1.1697777844051105e-05,
296
+ "loss": 0.0577,
297
+ "step": 40
298
+ },
299
+ {
300
+ "epoch": 1.6400000000000001,
301
+ "grad_norm": 0.06366691648082863,
302
+ "learning_rate": 9.549150281252633e-06,
303
+ "loss": 0.0333,
304
+ "step": 41
305
+ },
306
+ {
307
+ "epoch": 1.6800000000000002,
308
+ "grad_norm": 0.15879304612419673,
309
+ "learning_rate": 7.597595192178702e-06,
310
+ "loss": 0.0666,
311
+ "step": 42
312
+ },
313
+ {
314
+ "epoch": 1.72,
315
+ "grad_norm": 0.0765495019619371,
316
+ "learning_rate": 5.852620357053651e-06,
317
+ "loss": 0.029,
318
+ "step": 43
319
+ },
320
+ {
321
+ "epoch": 1.76,
322
+ "grad_norm": 0.1515518653937463,
323
+ "learning_rate": 4.322727117869951e-06,
324
+ "loss": 0.1331,
325
+ "step": 44
326
+ },
327
+ {
328
+ "epoch": 1.8,
329
+ "grad_norm": 0.08525038364295008,
330
+ "learning_rate": 3.0153689607045845e-06,
331
+ "loss": 0.0489,
332
+ "step": 45
333
+ },
334
+ {
335
+ "epoch": 1.8399999999999999,
336
+ "grad_norm": 0.09069389566818667,
337
+ "learning_rate": 1.9369152030840556e-06,
338
+ "loss": 0.0417,
339
+ "step": 46
340
+ },
341
+ {
342
+ "epoch": 1.88,
343
+ "grad_norm": 0.15581325970621174,
344
+ "learning_rate": 1.0926199633097157e-06,
345
+ "loss": 0.1167,
346
+ "step": 47
347
+ },
348
+ {
349
+ "epoch": 1.92,
350
+ "grad_norm": 0.07551809989198507,
351
+ "learning_rate": 4.865965629214819e-07,
352
+ "loss": 0.0324,
353
+ "step": 48
354
+ },
355
+ {
356
+ "epoch": 1.96,
357
+ "grad_norm": 0.06320999636101195,
358
+ "learning_rate": 1.2179748700879012e-07,
359
+ "loss": 0.0494,
360
+ "step": 49
361
+ },
362
+ {
363
+ "epoch": 2.0,
364
+ "grad_norm": 0.13783812798955924,
365
+ "learning_rate": 0.0,
366
+ "loss": 0.12,
367
+ "step": 50
368
+ },
369
+ {
370
+ "epoch": 2.0,
371
+ "eval_loss": 0.14731274545192719,
372
+ "eval_runtime": 1.4061,
373
+ "eval_samples_per_second": 0.711,
374
+ "eval_steps_per_second": 0.711,
375
+ "step": 50
376
+ },
377
+ {
378
+ "epoch": 2.0,
379
+ "step": 50,
380
+ "total_flos": 643887562752.0,
381
+ "train_loss": 0.10991490356624127,
382
+ "train_runtime": 185.8623,
383
+ "train_samples_per_second": 0.538,
384
+ "train_steps_per_second": 0.269
385
+ }
386
+ ],
387
+ "logging_steps": 1,
388
+ "max_steps": 50,
389
+ "num_input_tokens_seen": 0,
390
+ "num_train_epochs": 2,
391
+ "save_steps": 500,
392
+ "stateful_callbacks": {
393
+ "TrainerControl": {
394
+ "args": {
395
+ "should_epoch_stop": false,
396
+ "should_evaluate": false,
397
+ "should_log": false,
398
+ "should_save": true,
399
+ "should_training_stop": true
400
+ },
401
+ "attributes": {}
402
+ }
403
+ },
404
+ "total_flos": 643887562752.0,
405
+ "train_batch_size": 1,
406
+ "trial_name": null,
407
+ "trial_params": null
408
+ }