|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 13.285714285714286, |
|
"eval_steps": 500, |
|
"global_step": 70, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.19047619047619047, |
|
"grad_norm": 11.114676475524902, |
|
"learning_rate": 2e-05, |
|
"loss": 4.9155, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.38095238095238093, |
|
"grad_norm": 10.886661529541016, |
|
"learning_rate": 4e-05, |
|
"loss": 5.2012, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 10.0223388671875, |
|
"learning_rate": 6e-05, |
|
"loss": 4.4341, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.7619047619047619, |
|
"grad_norm": 8.667222023010254, |
|
"learning_rate": 8e-05, |
|
"loss": 4.6939, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.9523809523809523, |
|
"grad_norm": 6.8060832023620605, |
|
"learning_rate": 0.0001, |
|
"loss": 3.9619, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 1.1428571428571428, |
|
"grad_norm": 10.650321006774902, |
|
"learning_rate": 0.00012, |
|
"loss": 6.2246, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 1.3333333333333333, |
|
"grad_norm": 5.177021026611328, |
|
"learning_rate": 0.00014, |
|
"loss": 3.1182, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 1.5238095238095237, |
|
"grad_norm": 4.4564313888549805, |
|
"learning_rate": 0.00016, |
|
"loss": 3.4089, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 1.7142857142857144, |
|
"grad_norm": 3.8821651935577393, |
|
"learning_rate": 0.00018, |
|
"loss": 2.8449, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 1.9047619047619047, |
|
"grad_norm": 4.001584529876709, |
|
"learning_rate": 0.0002, |
|
"loss": 2.541, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 2.0476190476190474, |
|
"grad_norm": 5.516446590423584, |
|
"learning_rate": 0.0001998629534754574, |
|
"loss": 3.0792, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 2.238095238095238, |
|
"grad_norm": 3.4709484577178955, |
|
"learning_rate": 0.00019945218953682734, |
|
"loss": 2.7096, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 2.4285714285714284, |
|
"grad_norm": 2.045745372772217, |
|
"learning_rate": 0.00019876883405951377, |
|
"loss": 1.6279, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 2.619047619047619, |
|
"grad_norm": 2.5490376949310303, |
|
"learning_rate": 0.00019781476007338058, |
|
"loss": 1.9507, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 2.8095238095238093, |
|
"grad_norm": 2.9036448001861572, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 1.9104, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 4.653842449188232, |
|
"learning_rate": 0.00019510565162951537, |
|
"loss": 3.1175, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 3.1904761904761907, |
|
"grad_norm": 2.397826671600342, |
|
"learning_rate": 0.00019335804264972018, |
|
"loss": 1.7851, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 3.380952380952381, |
|
"grad_norm": 2.003856897354126, |
|
"learning_rate": 0.0001913545457642601, |
|
"loss": 1.3112, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 3.571428571428571, |
|
"grad_norm": 1.5474413633346558, |
|
"learning_rate": 0.0001891006524188368, |
|
"loss": 1.4914, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 3.761904761904762, |
|
"grad_norm": 1.5208486318588257, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 1.2772, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 3.9523809523809526, |
|
"grad_norm": 1.8079063892364502, |
|
"learning_rate": 0.00018386705679454242, |
|
"loss": 1.3938, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 4.142857142857143, |
|
"grad_norm": 3.1666009426116943, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 1.5114, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 4.333333333333333, |
|
"grad_norm": 1.6823382377624512, |
|
"learning_rate": 0.0001777145961456971, |
|
"loss": 1.4207, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 4.523809523809524, |
|
"grad_norm": 1.3504416942596436, |
|
"learning_rate": 0.00017431448254773944, |
|
"loss": 0.9105, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 4.714285714285714, |
|
"grad_norm": 1.3855412006378174, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 1.0832, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 4.904761904761905, |
|
"grad_norm": 1.378572940826416, |
|
"learning_rate": 0.00016691306063588583, |
|
"loss": 1.1635, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 5.095238095238095, |
|
"grad_norm": 2.8164830207824707, |
|
"learning_rate": 0.00016293203910498376, |
|
"loss": 1.4419, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 5.285714285714286, |
|
"grad_norm": 1.147581696510315, |
|
"learning_rate": 0.00015877852522924732, |
|
"loss": 0.6387, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 5.476190476190476, |
|
"grad_norm": 1.5747478008270264, |
|
"learning_rate": 0.00015446390350150273, |
|
"loss": 1.0435, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 5.666666666666667, |
|
"grad_norm": 1.3611525297164917, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.9718, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 5.857142857142857, |
|
"grad_norm": 1.1871294975280762, |
|
"learning_rate": 0.00014539904997395468, |
|
"loss": 0.7409, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 6.0476190476190474, |
|
"grad_norm": 4.008199214935303, |
|
"learning_rate": 0.00014067366430758004, |
|
"loss": 1.6548, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 6.238095238095238, |
|
"grad_norm": 1.111243486404419, |
|
"learning_rate": 0.00013583679495453, |
|
"loss": 0.5545, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 6.428571428571429, |
|
"grad_norm": 1.1197227239608765, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 0.5296, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 6.619047619047619, |
|
"grad_norm": 1.1618750095367432, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 0.5923, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 6.809523809523809, |
|
"grad_norm": 1.0321106910705566, |
|
"learning_rate": 0.00012079116908177593, |
|
"loss": 0.6033, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 3.302760601043701, |
|
"learning_rate": 0.0001156434465040231, |
|
"loss": 1.269, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 7.190476190476191, |
|
"grad_norm": 1.3132500648498535, |
|
"learning_rate": 0.00011045284632676536, |
|
"loss": 0.5142, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 7.380952380952381, |
|
"grad_norm": 0.8678253293037415, |
|
"learning_rate": 0.0001052335956242944, |
|
"loss": 0.4287, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 7.571428571428571, |
|
"grad_norm": 1.0537868738174438, |
|
"learning_rate": 0.0001, |
|
"loss": 0.5592, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 7.761904761904762, |
|
"grad_norm": 0.9256157279014587, |
|
"learning_rate": 9.476640437570562e-05, |
|
"loss": 0.3902, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 7.9523809523809526, |
|
"grad_norm": 0.938805341720581, |
|
"learning_rate": 8.954715367323468e-05, |
|
"loss": 0.442, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 8.142857142857142, |
|
"grad_norm": 1.98072350025177, |
|
"learning_rate": 8.435655349597689e-05, |
|
"loss": 0.6278, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 8.333333333333334, |
|
"grad_norm": 0.8120888471603394, |
|
"learning_rate": 7.920883091822408e-05, |
|
"loss": 0.2643, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 8.523809523809524, |
|
"grad_norm": 0.8413763642311096, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 0.3042, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 8.714285714285714, |
|
"grad_norm": 0.7817604541778564, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 0.3137, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 8.904761904761905, |
|
"grad_norm": 0.7885234355926514, |
|
"learning_rate": 6.416320504546997e-05, |
|
"loss": 0.2808, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 9.095238095238095, |
|
"grad_norm": 1.5161521434783936, |
|
"learning_rate": 5.9326335692419995e-05, |
|
"loss": 0.5427, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 9.285714285714286, |
|
"grad_norm": 0.5991838574409485, |
|
"learning_rate": 5.4600950026045326e-05, |
|
"loss": 0.2195, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 9.476190476190476, |
|
"grad_norm": 0.7848747968673706, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 0.2632, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 9.666666666666666, |
|
"grad_norm": 0.6151169538497925, |
|
"learning_rate": 4.5536096498497295e-05, |
|
"loss": 0.2032, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 9.857142857142858, |
|
"grad_norm": 1.1136269569396973, |
|
"learning_rate": 4.12214747707527e-05, |
|
"loss": 0.2819, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 10.095238095238095, |
|
"grad_norm": 0.659062922000885, |
|
"learning_rate": 3.7067960895016275e-05, |
|
"loss": 0.1995, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 10.285714285714286, |
|
"grad_norm": 0.6866421699523926, |
|
"learning_rate": 3.308693936411421e-05, |
|
"loss": 0.18, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 10.476190476190476, |
|
"grad_norm": 0.6459683775901794, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 0.2022, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 10.666666666666666, |
|
"grad_norm": 0.6483227014541626, |
|
"learning_rate": 2.5685517452260567e-05, |
|
"loss": 0.1876, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 10.857142857142858, |
|
"grad_norm": 0.5956599116325378, |
|
"learning_rate": 2.2285403854302912e-05, |
|
"loss": 0.1969, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 11.047619047619047, |
|
"grad_norm": 2.297029733657837, |
|
"learning_rate": 1.9098300562505266e-05, |
|
"loss": 0.3517, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 11.238095238095237, |
|
"grad_norm": 0.5542204976081848, |
|
"learning_rate": 1.6132943205457606e-05, |
|
"loss": 0.1701, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 11.428571428571429, |
|
"grad_norm": 0.5855220556259155, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 0.1588, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 11.619047619047619, |
|
"grad_norm": 0.6266547441482544, |
|
"learning_rate": 1.0899347581163221e-05, |
|
"loss": 0.1762, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 11.80952380952381, |
|
"grad_norm": 0.5979227423667908, |
|
"learning_rate": 8.645454235739903e-06, |
|
"loss": 0.1604, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"grad_norm": 1.2808066606521606, |
|
"learning_rate": 6.6419573502798374e-06, |
|
"loss": 0.2592, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 12.19047619047619, |
|
"grad_norm": 0.5678392052650452, |
|
"learning_rate": 4.8943483704846475e-06, |
|
"loss": 0.1635, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 12.380952380952381, |
|
"grad_norm": 0.746486246585846, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 0.1914, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 12.571428571428571, |
|
"grad_norm": 0.6837660074234009, |
|
"learning_rate": 2.1852399266194314e-06, |
|
"loss": 0.1632, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 12.761904761904763, |
|
"grad_norm": 0.6320339441299438, |
|
"learning_rate": 1.231165940486234e-06, |
|
"loss": 0.1514, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 12.952380952380953, |
|
"grad_norm": 0.6253515481948853, |
|
"learning_rate": 5.478104631726711e-07, |
|
"loss": 0.1538, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 13.095238095238095, |
|
"grad_norm": 0.8237464427947998, |
|
"learning_rate": 1.3704652454261668e-07, |
|
"loss": 0.1993, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 13.285714285714286, |
|
"grad_norm": 0.6406182646751404, |
|
"learning_rate": 0.0, |
|
"loss": 0.1828, |
|
"step": 70 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 70, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 14, |
|
"save_steps": 5, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 62994788646912.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|