GaetanMichelet's picture
Model save
f890c0c verified
raw
history blame
20.9 kB
{
"best_metric": 1.2911922931671143,
"best_model_checkpoint": "data/Llama-31-8B_task-1_60-samples_config-4/checkpoint-103",
"epoch": 42.78260869565217,
"eval_steps": 500,
"global_step": 123,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.34782608695652173,
"grad_norm": 1.8420466184616089,
"learning_rate": 3.3333333333333335e-07,
"loss": 2.0791,
"step": 1
},
{
"epoch": 0.6956521739130435,
"grad_norm": 1.7365580797195435,
"learning_rate": 6.666666666666667e-07,
"loss": 2.1668,
"step": 2
},
{
"epoch": 0.6956521739130435,
"eval_loss": 2.0786845684051514,
"eval_runtime": 4.8257,
"eval_samples_per_second": 2.487,
"eval_steps_per_second": 2.487,
"step": 2
},
{
"epoch": 1.391304347826087,
"grad_norm": 1.8509169816970825,
"learning_rate": 1.3333333333333334e-06,
"loss": 2.1494,
"step": 4
},
{
"epoch": 1.7391304347826086,
"eval_loss": 2.073704481124878,
"eval_runtime": 4.8307,
"eval_samples_per_second": 2.484,
"eval_steps_per_second": 2.484,
"step": 5
},
{
"epoch": 2.0869565217391304,
"grad_norm": 1.5950208902359009,
"learning_rate": 2.0000000000000003e-06,
"loss": 2.0264,
"step": 6
},
{
"epoch": 2.782608695652174,
"grad_norm": 1.7936512231826782,
"learning_rate": 2.666666666666667e-06,
"loss": 2.127,
"step": 8
},
{
"epoch": 2.782608695652174,
"eval_loss": 2.063272714614868,
"eval_runtime": 4.8304,
"eval_samples_per_second": 2.484,
"eval_steps_per_second": 2.484,
"step": 8
},
{
"epoch": 3.4782608695652173,
"grad_norm": 1.7478910684585571,
"learning_rate": 3.3333333333333333e-06,
"loss": 2.023,
"step": 10
},
{
"epoch": 3.8260869565217392,
"eval_loss": 2.0469565391540527,
"eval_runtime": 4.8249,
"eval_samples_per_second": 2.487,
"eval_steps_per_second": 2.487,
"step": 11
},
{
"epoch": 4.173913043478261,
"grad_norm": 1.7332440614700317,
"learning_rate": 4.000000000000001e-06,
"loss": 2.1049,
"step": 12
},
{
"epoch": 4.869565217391305,
"grad_norm": 1.879156470298767,
"learning_rate": 4.666666666666667e-06,
"loss": 2.0881,
"step": 14
},
{
"epoch": 4.869565217391305,
"eval_loss": 2.021301507949829,
"eval_runtime": 4.8272,
"eval_samples_per_second": 2.486,
"eval_steps_per_second": 2.486,
"step": 14
},
{
"epoch": 5.565217391304348,
"grad_norm": 1.6294264793395996,
"learning_rate": 5.333333333333334e-06,
"loss": 2.0378,
"step": 16
},
{
"epoch": 5.913043478260869,
"eval_loss": 1.9882330894470215,
"eval_runtime": 4.8259,
"eval_samples_per_second": 2.487,
"eval_steps_per_second": 2.487,
"step": 17
},
{
"epoch": 6.260869565217392,
"grad_norm": 1.750487208366394,
"learning_rate": 6e-06,
"loss": 1.9557,
"step": 18
},
{
"epoch": 6.956521739130435,
"grad_norm": 1.8384217023849487,
"learning_rate": 6.666666666666667e-06,
"loss": 2.0073,
"step": 20
},
{
"epoch": 6.956521739130435,
"eval_loss": 1.9450572729110718,
"eval_runtime": 4.8247,
"eval_samples_per_second": 2.487,
"eval_steps_per_second": 2.487,
"step": 20
},
{
"epoch": 7.6521739130434785,
"grad_norm": 1.9144703149795532,
"learning_rate": 7.333333333333333e-06,
"loss": 1.9467,
"step": 22
},
{
"epoch": 8.0,
"eval_loss": 1.8880866765975952,
"eval_runtime": 4.8205,
"eval_samples_per_second": 2.489,
"eval_steps_per_second": 2.489,
"step": 23
},
{
"epoch": 8.347826086956522,
"grad_norm": 1.8363193273544312,
"learning_rate": 8.000000000000001e-06,
"loss": 1.9133,
"step": 24
},
{
"epoch": 8.695652173913043,
"eval_loss": 1.8427385091781616,
"eval_runtime": 4.8315,
"eval_samples_per_second": 2.484,
"eval_steps_per_second": 2.484,
"step": 25
},
{
"epoch": 9.043478260869565,
"grad_norm": 1.9099247455596924,
"learning_rate": 8.666666666666668e-06,
"loss": 1.8557,
"step": 26
},
{
"epoch": 9.73913043478261,
"grad_norm": 1.7712645530700684,
"learning_rate": 9.333333333333334e-06,
"loss": 1.8018,
"step": 28
},
{
"epoch": 9.73913043478261,
"eval_loss": 1.765236496925354,
"eval_runtime": 4.8337,
"eval_samples_per_second": 2.483,
"eval_steps_per_second": 2.483,
"step": 28
},
{
"epoch": 10.434782608695652,
"grad_norm": 1.6165164709091187,
"learning_rate": 1e-05,
"loss": 1.691,
"step": 30
},
{
"epoch": 10.782608695652174,
"eval_loss": 1.6855782270431519,
"eval_runtime": 4.8292,
"eval_samples_per_second": 2.485,
"eval_steps_per_second": 2.485,
"step": 31
},
{
"epoch": 11.130434782608695,
"grad_norm": 1.27354097366333,
"learning_rate": 9.99864620589731e-06,
"loss": 1.738,
"step": 32
},
{
"epoch": 11.826086956521738,
"grad_norm": 1.100017786026001,
"learning_rate": 9.994585556692624e-06,
"loss": 1.6135,
"step": 34
},
{
"epoch": 11.826086956521738,
"eval_loss": 1.6189905405044556,
"eval_runtime": 4.8275,
"eval_samples_per_second": 2.486,
"eval_steps_per_second": 2.486,
"step": 34
},
{
"epoch": 12.521739130434783,
"grad_norm": 0.9115222096443176,
"learning_rate": 9.987820251299121e-06,
"loss": 1.5471,
"step": 36
},
{
"epoch": 12.869565217391305,
"eval_loss": 1.5723379850387573,
"eval_runtime": 4.8274,
"eval_samples_per_second": 2.486,
"eval_steps_per_second": 2.486,
"step": 37
},
{
"epoch": 13.217391304347826,
"grad_norm": 0.6941347122192383,
"learning_rate": 9.978353953249023e-06,
"loss": 1.5992,
"step": 38
},
{
"epoch": 13.91304347826087,
"grad_norm": 0.6947245597839355,
"learning_rate": 9.966191788709716e-06,
"loss": 1.5155,
"step": 40
},
{
"epoch": 13.91304347826087,
"eval_loss": 1.5439869165420532,
"eval_runtime": 4.8269,
"eval_samples_per_second": 2.486,
"eval_steps_per_second": 2.486,
"step": 40
},
{
"epoch": 14.608695652173914,
"grad_norm": 0.659094512462616,
"learning_rate": 9.951340343707852e-06,
"loss": 1.4371,
"step": 42
},
{
"epoch": 14.956521739130435,
"eval_loss": 1.5234659910202026,
"eval_runtime": 4.826,
"eval_samples_per_second": 2.487,
"eval_steps_per_second": 2.487,
"step": 43
},
{
"epoch": 15.304347826086957,
"grad_norm": 0.6309556365013123,
"learning_rate": 9.933807660562898e-06,
"loss": 1.4931,
"step": 44
},
{
"epoch": 16.0,
"grad_norm": 0.6867908239364624,
"learning_rate": 9.913603233532067e-06,
"loss": 1.4825,
"step": 46
},
{
"epoch": 16.0,
"eval_loss": 1.5018547773361206,
"eval_runtime": 4.825,
"eval_samples_per_second": 2.487,
"eval_steps_per_second": 2.487,
"step": 46
},
{
"epoch": 16.695652173913043,
"grad_norm": 0.6625152230262756,
"learning_rate": 9.890738003669029e-06,
"loss": 1.4532,
"step": 48
},
{
"epoch": 16.695652173913043,
"eval_loss": 1.489387035369873,
"eval_runtime": 4.8314,
"eval_samples_per_second": 2.484,
"eval_steps_per_second": 2.484,
"step": 48
},
{
"epoch": 17.391304347826086,
"grad_norm": 0.61110919713974,
"learning_rate": 9.86522435289912e-06,
"loss": 1.4277,
"step": 50
},
{
"epoch": 17.73913043478261,
"eval_loss": 1.469335913658142,
"eval_runtime": 4.8333,
"eval_samples_per_second": 2.483,
"eval_steps_per_second": 2.483,
"step": 51
},
{
"epoch": 18.08695652173913,
"grad_norm": 0.5363492369651794,
"learning_rate": 9.83707609731432e-06,
"loss": 1.4107,
"step": 52
},
{
"epoch": 18.782608695652176,
"grad_norm": 0.5772045850753784,
"learning_rate": 9.806308479691595e-06,
"loss": 1.366,
"step": 54
},
{
"epoch": 18.782608695652176,
"eval_loss": 1.45041024684906,
"eval_runtime": 4.8269,
"eval_samples_per_second": 2.486,
"eval_steps_per_second": 2.486,
"step": 54
},
{
"epoch": 19.47826086956522,
"grad_norm": 0.5792605876922607,
"learning_rate": 9.77293816123866e-06,
"loss": 1.4417,
"step": 56
},
{
"epoch": 19.82608695652174,
"eval_loss": 1.433027744293213,
"eval_runtime": 4.8255,
"eval_samples_per_second": 2.487,
"eval_steps_per_second": 2.487,
"step": 57
},
{
"epoch": 20.17391304347826,
"grad_norm": 0.5746141076087952,
"learning_rate": 9.736983212571646e-06,
"loss": 1.3183,
"step": 58
},
{
"epoch": 20.869565217391305,
"grad_norm": 0.5542544722557068,
"learning_rate": 9.698463103929542e-06,
"loss": 1.3645,
"step": 60
},
{
"epoch": 20.869565217391305,
"eval_loss": 1.4169892072677612,
"eval_runtime": 4.8302,
"eval_samples_per_second": 2.484,
"eval_steps_per_second": 2.484,
"step": 60
},
{
"epoch": 21.565217391304348,
"grad_norm": 0.5561607480049133,
"learning_rate": 9.657398694630713e-06,
"loss": 1.3153,
"step": 62
},
{
"epoch": 21.91304347826087,
"eval_loss": 1.4028769731521606,
"eval_runtime": 4.8257,
"eval_samples_per_second": 2.487,
"eval_steps_per_second": 2.487,
"step": 63
},
{
"epoch": 22.26086956521739,
"grad_norm": 0.5926358699798584,
"learning_rate": 9.613812221777212e-06,
"loss": 1.3394,
"step": 64
},
{
"epoch": 22.956521739130434,
"grad_norm": 0.5176757574081421,
"learning_rate": 9.567727288213005e-06,
"loss": 1.3036,
"step": 66
},
{
"epoch": 22.956521739130434,
"eval_loss": 1.3847342729568481,
"eval_runtime": 4.825,
"eval_samples_per_second": 2.487,
"eval_steps_per_second": 2.487,
"step": 66
},
{
"epoch": 23.652173913043477,
"grad_norm": 0.5416279435157776,
"learning_rate": 9.519168849742603e-06,
"loss": 1.2775,
"step": 68
},
{
"epoch": 24.0,
"eval_loss": 1.3691829442977905,
"eval_runtime": 4.8201,
"eval_samples_per_second": 2.49,
"eval_steps_per_second": 2.49,
"step": 69
},
{
"epoch": 24.347826086956523,
"grad_norm": 0.4943753778934479,
"learning_rate": 9.468163201617063e-06,
"loss": 1.2726,
"step": 70
},
{
"epoch": 24.695652173913043,
"eval_loss": 1.3620725870132446,
"eval_runtime": 4.8293,
"eval_samples_per_second": 2.485,
"eval_steps_per_second": 2.485,
"step": 71
},
{
"epoch": 25.043478260869566,
"grad_norm": 0.5579866766929626,
"learning_rate": 9.414737964294636e-06,
"loss": 1.2557,
"step": 72
},
{
"epoch": 25.73913043478261,
"grad_norm": 0.530767560005188,
"learning_rate": 9.358922068483813e-06,
"loss": 1.2949,
"step": 74
},
{
"epoch": 25.73913043478261,
"eval_loss": 1.3509678840637207,
"eval_runtime": 4.8304,
"eval_samples_per_second": 2.484,
"eval_steps_per_second": 2.484,
"step": 74
},
{
"epoch": 26.434782608695652,
"grad_norm": 0.5218483805656433,
"learning_rate": 9.30074573947683e-06,
"loss": 1.1424,
"step": 76
},
{
"epoch": 26.782608695652176,
"eval_loss": 1.3406327962875366,
"eval_runtime": 4.837,
"eval_samples_per_second": 2.481,
"eval_steps_per_second": 2.481,
"step": 77
},
{
"epoch": 27.130434782608695,
"grad_norm": 0.583118736743927,
"learning_rate": 9.24024048078213e-06,
"loss": 1.2227,
"step": 78
},
{
"epoch": 27.82608695652174,
"grad_norm": 0.5403833389282227,
"learning_rate": 9.177439057064684e-06,
"loss": 1.2489,
"step": 80
},
{
"epoch": 27.82608695652174,
"eval_loss": 1.3327490091323853,
"eval_runtime": 4.8298,
"eval_samples_per_second": 2.485,
"eval_steps_per_second": 2.485,
"step": 80
},
{
"epoch": 28.52173913043478,
"grad_norm": 0.5446699857711792,
"learning_rate": 9.112375476403313e-06,
"loss": 1.1662,
"step": 82
},
{
"epoch": 28.869565217391305,
"eval_loss": 1.3224714994430542,
"eval_runtime": 4.8305,
"eval_samples_per_second": 2.484,
"eval_steps_per_second": 2.484,
"step": 83
},
{
"epoch": 29.217391304347824,
"grad_norm": 0.5531767010688782,
"learning_rate": 9.045084971874738e-06,
"loss": 1.14,
"step": 84
},
{
"epoch": 29.91304347826087,
"grad_norm": 0.5570783615112305,
"learning_rate": 8.97560398247424e-06,
"loss": 1.1614,
"step": 86
},
{
"epoch": 29.91304347826087,
"eval_loss": 1.314386248588562,
"eval_runtime": 4.8261,
"eval_samples_per_second": 2.486,
"eval_steps_per_second": 2.486,
"step": 86
},
{
"epoch": 30.608695652173914,
"grad_norm": 0.5725581645965576,
"learning_rate": 8.903970133383297e-06,
"loss": 1.146,
"step": 88
},
{
"epoch": 30.956521739130434,
"eval_loss": 1.3093677759170532,
"eval_runtime": 4.8256,
"eval_samples_per_second": 2.487,
"eval_steps_per_second": 2.487,
"step": 89
},
{
"epoch": 31.304347826086957,
"grad_norm": 0.5645802617073059,
"learning_rate": 8.83022221559489e-06,
"loss": 1.1311,
"step": 90
},
{
"epoch": 32.0,
"grad_norm": 0.5483241081237793,
"learning_rate": 8.754400164907496e-06,
"loss": 1.1177,
"step": 92
},
{
"epoch": 32.0,
"eval_loss": 1.3024587631225586,
"eval_runtime": 4.8305,
"eval_samples_per_second": 2.484,
"eval_steps_per_second": 2.484,
"step": 92
},
{
"epoch": 32.69565217391305,
"grad_norm": 0.5573391914367676,
"learning_rate": 8.676545040299145e-06,
"loss": 1.0748,
"step": 94
},
{
"epoch": 32.69565217391305,
"eval_loss": 1.2985492944717407,
"eval_runtime": 4.8339,
"eval_samples_per_second": 2.482,
"eval_steps_per_second": 2.482,
"step": 94
},
{
"epoch": 33.391304347826086,
"grad_norm": 0.6121187806129456,
"learning_rate": 8.596699001693257e-06,
"loss": 1.118,
"step": 96
},
{
"epoch": 33.73913043478261,
"eval_loss": 1.2956913709640503,
"eval_runtime": 4.8239,
"eval_samples_per_second": 2.488,
"eval_steps_per_second": 2.488,
"step": 97
},
{
"epoch": 34.08695652173913,
"grad_norm": 0.6799437999725342,
"learning_rate": 8.51490528712831e-06,
"loss": 1.0753,
"step": 98
},
{
"epoch": 34.78260869565217,
"grad_norm": 0.5860521793365479,
"learning_rate": 8.43120818934367e-06,
"loss": 1.0599,
"step": 100
},
{
"epoch": 34.78260869565217,
"eval_loss": 1.29244863986969,
"eval_runtime": 4.8276,
"eval_samples_per_second": 2.486,
"eval_steps_per_second": 2.486,
"step": 100
},
{
"epoch": 35.47826086956522,
"grad_norm": 0.5923620462417603,
"learning_rate": 8.345653031794292e-06,
"loss": 1.0607,
"step": 102
},
{
"epoch": 35.82608695652174,
"eval_loss": 1.2911922931671143,
"eval_runtime": 4.8238,
"eval_samples_per_second": 2.488,
"eval_steps_per_second": 2.488,
"step": 103
},
{
"epoch": 36.17391304347826,
"grad_norm": 0.6971337199211121,
"learning_rate": 8.258286144107277e-06,
"loss": 1.0059,
"step": 104
},
{
"epoch": 36.869565217391305,
"grad_norm": 0.6799646615982056,
"learning_rate": 8.16915483699355e-06,
"loss": 1.0041,
"step": 106
},
{
"epoch": 36.869565217391305,
"eval_loss": 1.2954756021499634,
"eval_runtime": 4.8252,
"eval_samples_per_second": 2.487,
"eval_steps_per_second": 2.487,
"step": 106
},
{
"epoch": 37.56521739130435,
"grad_norm": 0.6722318530082703,
"learning_rate": 8.078307376628292e-06,
"loss": 1.0132,
"step": 108
},
{
"epoch": 37.91304347826087,
"eval_loss": 1.2979621887207031,
"eval_runtime": 4.8341,
"eval_samples_per_second": 2.482,
"eval_steps_per_second": 2.482,
"step": 109
},
{
"epoch": 38.26086956521739,
"grad_norm": 0.6754195094108582,
"learning_rate": 7.985792958513932e-06,
"loss": 0.9641,
"step": 110
},
{
"epoch": 38.95652173913044,
"grad_norm": 0.7231629490852356,
"learning_rate": 7.891661680839932e-06,
"loss": 1.0062,
"step": 112
},
{
"epoch": 38.95652173913044,
"eval_loss": 1.3067742586135864,
"eval_runtime": 4.8268,
"eval_samples_per_second": 2.486,
"eval_steps_per_second": 2.486,
"step": 112
},
{
"epoch": 39.65217391304348,
"grad_norm": 0.802635669708252,
"learning_rate": 7.795964517353734e-06,
"loss": 0.9466,
"step": 114
},
{
"epoch": 40.0,
"eval_loss": 1.3117856979370117,
"eval_runtime": 4.8215,
"eval_samples_per_second": 2.489,
"eval_steps_per_second": 2.489,
"step": 115
},
{
"epoch": 40.34782608695652,
"grad_norm": 0.8345409631729126,
"learning_rate": 7.698753289757565e-06,
"loss": 0.9728,
"step": 116
},
{
"epoch": 40.69565217391305,
"eval_loss": 1.3146535158157349,
"eval_runtime": 4.8262,
"eval_samples_per_second": 2.486,
"eval_steps_per_second": 2.486,
"step": 117
},
{
"epoch": 41.04347826086956,
"grad_norm": 0.7516780495643616,
"learning_rate": 7.600080639646077e-06,
"loss": 0.8733,
"step": 118
},
{
"epoch": 41.73913043478261,
"grad_norm": 0.778958261013031,
"learning_rate": 7.500000000000001e-06,
"loss": 0.882,
"step": 120
},
{
"epoch": 41.73913043478261,
"eval_loss": 1.319496989250183,
"eval_runtime": 4.8294,
"eval_samples_per_second": 2.485,
"eval_steps_per_second": 2.485,
"step": 120
},
{
"epoch": 42.43478260869565,
"grad_norm": 0.8342879414558411,
"learning_rate": 7.398565566251232e-06,
"loss": 0.9193,
"step": 122
},
{
"epoch": 42.78260869565217,
"eval_loss": 1.3276245594024658,
"eval_runtime": 4.8249,
"eval_samples_per_second": 2.487,
"eval_steps_per_second": 2.487,
"step": 123
},
{
"epoch": 42.78260869565217,
"step": 123,
"total_flos": 2.9607479084056576e+16,
"train_loss": 1.4191040304618152,
"train_runtime": 2316.2574,
"train_samples_per_second": 2.979,
"train_steps_per_second": 0.13
}
],
"logging_steps": 2,
"max_steps": 300,
"num_input_tokens_seen": 0,
"num_train_epochs": 150,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 7,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.9607479084056576e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}