|
{ |
|
"best_metric": 0.9736120104789734, |
|
"best_model_checkpoint": "data/Gemma-2-2B_task-1_120-samples_config-1_auto/checkpoint-55", |
|
"epoch": 12.0, |
|
"eval_steps": 500, |
|
"global_step": 132, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09090909090909091, |
|
"grad_norm": 1.896773099899292, |
|
"learning_rate": 1.818181818181818e-06, |
|
"loss": 1.6148, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.18181818181818182, |
|
"grad_norm": 1.7315363883972168, |
|
"learning_rate": 3.636363636363636e-06, |
|
"loss": 1.5745, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.36363636363636365, |
|
"grad_norm": 1.9249202013015747, |
|
"learning_rate": 7.272727272727272e-06, |
|
"loss": 1.6592, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.5454545454545454, |
|
"grad_norm": 1.9421212673187256, |
|
"learning_rate": 1.0909090909090909e-05, |
|
"loss": 1.6111, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.7272727272727273, |
|
"grad_norm": 1.8695214986801147, |
|
"learning_rate": 1.4545454545454545e-05, |
|
"loss": 1.5167, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.9090909090909091, |
|
"grad_norm": 2.2962398529052734, |
|
"learning_rate": 1.8181818181818182e-05, |
|
"loss": 1.7508, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.5551661252975464, |
|
"eval_runtime": 2.1687, |
|
"eval_samples_per_second": 11.067, |
|
"eval_steps_per_second": 11.067, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 1.0909090909090908, |
|
"grad_norm": 1.9042454957962036, |
|
"learning_rate": 2.1818181818181818e-05, |
|
"loss": 1.543, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.2727272727272727, |
|
"grad_norm": 1.5851277112960815, |
|
"learning_rate": 2.5454545454545454e-05, |
|
"loss": 1.3843, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.4545454545454546, |
|
"grad_norm": 1.3642412424087524, |
|
"learning_rate": 2.909090909090909e-05, |
|
"loss": 1.279, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.6363636363636362, |
|
"grad_norm": 1.492651343345642, |
|
"learning_rate": 3.272727272727273e-05, |
|
"loss": 1.3709, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.8181818181818183, |
|
"grad_norm": 1.7590340375900269, |
|
"learning_rate": 3.6363636363636364e-05, |
|
"loss": 1.2589, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.9888437390327454, |
|
"learning_rate": 4e-05, |
|
"loss": 1.066, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 1.1328343152999878, |
|
"eval_runtime": 2.1687, |
|
"eval_samples_per_second": 11.066, |
|
"eval_steps_per_second": 11.066, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.1818181818181817, |
|
"grad_norm": 1.20571768283844, |
|
"learning_rate": 4.3636363636363636e-05, |
|
"loss": 1.0086, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.3636363636363638, |
|
"grad_norm": 0.9041438698768616, |
|
"learning_rate": 4.7272727272727275e-05, |
|
"loss": 1.0831, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.5454545454545454, |
|
"grad_norm": 0.9103597402572632, |
|
"learning_rate": 5.090909090909091e-05, |
|
"loss": 1.071, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.7272727272727275, |
|
"grad_norm": 0.870693564414978, |
|
"learning_rate": 5.4545454545454546e-05, |
|
"loss": 1.0293, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.909090909090909, |
|
"grad_norm": 0.8961207866668701, |
|
"learning_rate": 5.818181818181818e-05, |
|
"loss": 0.9348, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 1.0277460813522339, |
|
"eval_runtime": 2.1645, |
|
"eval_samples_per_second": 11.088, |
|
"eval_steps_per_second": 11.088, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 3.090909090909091, |
|
"grad_norm": 0.8866666555404663, |
|
"learning_rate": 6.181818181818182e-05, |
|
"loss": 0.9195, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 3.2727272727272725, |
|
"grad_norm": 0.8282307982444763, |
|
"learning_rate": 6.545454545454546e-05, |
|
"loss": 0.8315, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 3.4545454545454546, |
|
"grad_norm": 0.9073108434677124, |
|
"learning_rate": 6.90909090909091e-05, |
|
"loss": 0.9055, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 3.6363636363636362, |
|
"grad_norm": 0.8472995758056641, |
|
"learning_rate": 7.272727272727273e-05, |
|
"loss": 0.8648, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 3.8181818181818183, |
|
"grad_norm": 0.9500349164009094, |
|
"learning_rate": 7.636363636363637e-05, |
|
"loss": 0.8957, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 1.039790153503418, |
|
"learning_rate": 8e-05, |
|
"loss": 0.8176, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 0.9743786454200745, |
|
"eval_runtime": 2.1442, |
|
"eval_samples_per_second": 11.193, |
|
"eval_steps_per_second": 11.193, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 4.181818181818182, |
|
"grad_norm": 1.096146821975708, |
|
"learning_rate": 8.363636363636364e-05, |
|
"loss": 0.7854, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 4.363636363636363, |
|
"grad_norm": 1.0144972801208496, |
|
"learning_rate": 8.727272727272727e-05, |
|
"loss": 0.795, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 4.545454545454545, |
|
"grad_norm": 1.1709052324295044, |
|
"learning_rate": 9.090909090909092e-05, |
|
"loss": 0.7459, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 4.7272727272727275, |
|
"grad_norm": 1.1173423528671265, |
|
"learning_rate": 9.454545454545455e-05, |
|
"loss": 0.7177, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 4.909090909090909, |
|
"grad_norm": 1.1534667015075684, |
|
"learning_rate": 9.818181818181818e-05, |
|
"loss": 0.6379, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 0.9736120104789734, |
|
"eval_runtime": 2.173, |
|
"eval_samples_per_second": 11.045, |
|
"eval_steps_per_second": 11.045, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 5.090909090909091, |
|
"grad_norm": 1.1661216020584106, |
|
"learning_rate": 9.999899300364532e-05, |
|
"loss": 0.6415, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 5.2727272727272725, |
|
"grad_norm": 1.0937227010726929, |
|
"learning_rate": 9.99909372761763e-05, |
|
"loss": 0.5881, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 5.454545454545454, |
|
"grad_norm": 1.1940053701400757, |
|
"learning_rate": 9.997482711915927e-05, |
|
"loss": 0.5414, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 5.636363636363637, |
|
"grad_norm": 1.3874033689498901, |
|
"learning_rate": 9.99506651282272e-05, |
|
"loss": 0.5359, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 5.818181818181818, |
|
"grad_norm": 1.7549492120742798, |
|
"learning_rate": 9.991845519630678e-05, |
|
"loss": 0.5691, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 1.616335153579712, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 0.497, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 1.0944799184799194, |
|
"eval_runtime": 2.1138, |
|
"eval_samples_per_second": 11.354, |
|
"eval_steps_per_second": 11.354, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 6.181818181818182, |
|
"grad_norm": 1.2271863222122192, |
|
"learning_rate": 9.982991356370404e-05, |
|
"loss": 0.3866, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 6.363636363636363, |
|
"grad_norm": 1.521119475364685, |
|
"learning_rate": 9.977359612865423e-05, |
|
"loss": 0.427, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 6.545454545454545, |
|
"grad_norm": 1.6232653856277466, |
|
"learning_rate": 9.970925928158274e-05, |
|
"loss": 0.407, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 6.7272727272727275, |
|
"grad_norm": 1.5563262701034546, |
|
"learning_rate": 9.963691338830044e-05, |
|
"loss": 0.2937, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 6.909090909090909, |
|
"grad_norm": 1.8535608053207397, |
|
"learning_rate": 9.955657010501806e-05, |
|
"loss": 0.3367, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 1.3431849479675293, |
|
"eval_runtime": 2.2799, |
|
"eval_samples_per_second": 10.527, |
|
"eval_steps_per_second": 10.527, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 7.090909090909091, |
|
"grad_norm": 1.7326850891113281, |
|
"learning_rate": 9.946824237646824e-05, |
|
"loss": 0.291, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 7.2727272727272725, |
|
"grad_norm": 1.5598558187484741, |
|
"learning_rate": 9.937194443381972e-05, |
|
"loss": 0.2737, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 7.454545454545454, |
|
"grad_norm": 1.730252742767334, |
|
"learning_rate": 9.926769179238466e-05, |
|
"loss": 0.1833, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 7.636363636363637, |
|
"grad_norm": 1.9764121770858765, |
|
"learning_rate": 9.915550124911866e-05, |
|
"loss": 0.1923, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 7.818181818181818, |
|
"grad_norm": 2.5331475734710693, |
|
"learning_rate": 9.903539087991462e-05, |
|
"loss": 0.2406, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 2.1329357624053955, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 0.2147, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 1.6289167404174805, |
|
"eval_runtime": 2.1403, |
|
"eval_samples_per_second": 11.213, |
|
"eval_steps_per_second": 11.213, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 8.181818181818182, |
|
"grad_norm": 1.5905722379684448, |
|
"learning_rate": 9.877148934427037e-05, |
|
"loss": 0.1366, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 8.363636363636363, |
|
"grad_norm": 1.624558925628662, |
|
"learning_rate": 9.862774069706346e-05, |
|
"loss": 0.111, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 8.545454545454545, |
|
"grad_norm": 1.9237786531448364, |
|
"learning_rate": 9.847615725553456e-05, |
|
"loss": 0.1213, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 8.727272727272727, |
|
"grad_norm": 1.8648244142532349, |
|
"learning_rate": 9.831676344247342e-05, |
|
"loss": 0.1816, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 8.909090909090908, |
|
"grad_norm": 3.522146224975586, |
|
"learning_rate": 9.814958493905963e-05, |
|
"loss": 0.1552, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 1.8935288190841675, |
|
"eval_runtime": 2.1658, |
|
"eval_samples_per_second": 11.082, |
|
"eval_steps_per_second": 11.082, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 9.090909090909092, |
|
"grad_norm": 1.4460781812667847, |
|
"learning_rate": 9.797464868072488e-05, |
|
"loss": 0.0982, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 9.272727272727273, |
|
"grad_norm": 1.1425362825393677, |
|
"learning_rate": 9.779198285281325e-05, |
|
"loss": 0.0741, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 9.454545454545455, |
|
"grad_norm": 1.5075608491897583, |
|
"learning_rate": 9.760161688604008e-05, |
|
"loss": 0.0819, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 9.636363636363637, |
|
"grad_norm": 1.9946374893188477, |
|
"learning_rate": 9.740358145174998e-05, |
|
"loss": 0.0879, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 9.818181818181818, |
|
"grad_norm": 2.043844223022461, |
|
"learning_rate": 9.719790845697533e-05, |
|
"loss": 0.0921, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 1.4709323644638062, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.0668, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 2.018200635910034, |
|
"eval_runtime": 2.1785, |
|
"eval_samples_per_second": 11.017, |
|
"eval_steps_per_second": 11.017, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 10.181818181818182, |
|
"grad_norm": 1.0680981874465942, |
|
"learning_rate": 9.676378356149734e-05, |
|
"loss": 0.0426, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 10.363636363636363, |
|
"grad_norm": 0.9540239572525024, |
|
"learning_rate": 9.653540160603956e-05, |
|
"loss": 0.047, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 10.545454545454545, |
|
"grad_norm": 1.3698723316192627, |
|
"learning_rate": 9.629952196931901e-05, |
|
"loss": 0.0501, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 10.727272727272727, |
|
"grad_norm": 1.75531005859375, |
|
"learning_rate": 9.60561826557425e-05, |
|
"loss": 0.0535, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 10.909090909090908, |
|
"grad_norm": 1.7834439277648926, |
|
"learning_rate": 9.580542287160348e-05, |
|
"loss": 0.0425, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_loss": 2.3609249591827393, |
|
"eval_runtime": 2.1793, |
|
"eval_samples_per_second": 11.013, |
|
"eval_steps_per_second": 11.013, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 11.090909090909092, |
|
"grad_norm": 1.7064998149871826, |
|
"learning_rate": 9.554728301876526e-05, |
|
"loss": 0.0459, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 11.272727272727273, |
|
"grad_norm": 1.7905004024505615, |
|
"learning_rate": 9.528180468815155e-05, |
|
"loss": 0.0385, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 11.454545454545455, |
|
"grad_norm": 1.7331182956695557, |
|
"learning_rate": 9.50090306530454e-05, |
|
"loss": 0.0615, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 11.636363636363637, |
|
"grad_norm": 1.467869758605957, |
|
"learning_rate": 9.472900486219769e-05, |
|
"loss": 0.0444, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 11.818181818181818, |
|
"grad_norm": 1.434717059135437, |
|
"learning_rate": 9.444177243274618e-05, |
|
"loss": 0.04, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"grad_norm": 1.624537467956543, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 0.0507, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_loss": 2.3114547729492188, |
|
"eval_runtime": 2.3476, |
|
"eval_samples_per_second": 10.223, |
|
"eval_steps_per_second": 10.223, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"step": 132, |
|
"total_flos": 4581273894912000.0, |
|
"train_loss": 0.5821334374221888, |
|
"train_runtime": 471.3358, |
|
"train_samples_per_second": 9.335, |
|
"train_steps_per_second": 1.167 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 550, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 50, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 7, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4581273894912000.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|