lesso07's picture
Training in progress, step 100, checkpoint
e8eb86f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.006003482019571352,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 6.003482019571351e-05,
"grad_norm": 9.387470245361328,
"learning_rate": 1e-05,
"loss": 2.8454,
"step": 1
},
{
"epoch": 6.003482019571351e-05,
"eval_loss": NaN,
"eval_runtime": 1747.2967,
"eval_samples_per_second": 8.028,
"eval_steps_per_second": 1.004,
"step": 1
},
{
"epoch": 0.00012006964039142703,
"grad_norm": 8.35964584350586,
"learning_rate": 2e-05,
"loss": 1.8863,
"step": 2
},
{
"epoch": 0.00018010446058714055,
"grad_norm": 6.206863880157471,
"learning_rate": 3e-05,
"loss": 1.2171,
"step": 3
},
{
"epoch": 0.00024013928078285405,
"grad_norm": 4.522266864776611,
"learning_rate": 4e-05,
"loss": 0.9352,
"step": 4
},
{
"epoch": 0.0003001741009785676,
"grad_norm": 5.3215155601501465,
"learning_rate": 5e-05,
"loss": 2.12,
"step": 5
},
{
"epoch": 0.0003602089211742811,
"grad_norm": 10.670658111572266,
"learning_rate": 6e-05,
"loss": 1.4522,
"step": 6
},
{
"epoch": 0.0004202437413699946,
"grad_norm": 14.341257095336914,
"learning_rate": 7e-05,
"loss": 2.9383,
"step": 7
},
{
"epoch": 0.0004802785615657081,
"grad_norm": 13.476455688476562,
"learning_rate": 8e-05,
"loss": 1.5796,
"step": 8
},
{
"epoch": 0.0005403133817614216,
"grad_norm": 6.012520790100098,
"learning_rate": 9e-05,
"loss": 0.7204,
"step": 9
},
{
"epoch": 0.0005403133817614216,
"eval_loss": NaN,
"eval_runtime": 1748.6263,
"eval_samples_per_second": 8.022,
"eval_steps_per_second": 1.003,
"step": 9
},
{
"epoch": 0.0006003482019571352,
"grad_norm": 4.6742472648620605,
"learning_rate": 0.0001,
"loss": 0.9866,
"step": 10
},
{
"epoch": 0.0006603830221528486,
"grad_norm": 0.0,
"learning_rate": 9.99695413509548e-05,
"loss": 0.0,
"step": 11
},
{
"epoch": 0.0007204178423485622,
"grad_norm": 6.591522216796875,
"learning_rate": 9.987820251299122e-05,
"loss": 0.7643,
"step": 12
},
{
"epoch": 0.0007804526625442757,
"grad_norm": 5.335558891296387,
"learning_rate": 9.972609476841367e-05,
"loss": 0.2543,
"step": 13
},
{
"epoch": 0.0008404874827399892,
"grad_norm": 3.498502731323242,
"learning_rate": 9.951340343707852e-05,
"loss": 0.1858,
"step": 14
},
{
"epoch": 0.0009005223029357027,
"grad_norm": 0.0,
"learning_rate": 9.924038765061042e-05,
"loss": 0.0,
"step": 15
},
{
"epoch": 0.0009605571231314162,
"grad_norm": 1.4111747741699219,
"learning_rate": 9.890738003669029e-05,
"loss": 0.0441,
"step": 16
},
{
"epoch": 0.0010205919433271297,
"grad_norm": 0.43236783146858215,
"learning_rate": 9.851478631379982e-05,
"loss": 0.0093,
"step": 17
},
{
"epoch": 0.0010806267635228432,
"grad_norm": 0.7553333640098572,
"learning_rate": 9.806308479691595e-05,
"loss": 0.0163,
"step": 18
},
{
"epoch": 0.0010806267635228432,
"eval_loss": NaN,
"eval_runtime": 1747.826,
"eval_samples_per_second": 8.025,
"eval_steps_per_second": 1.004,
"step": 18
},
{
"epoch": 0.0011406615837185568,
"grad_norm": 3.277437925338745,
"learning_rate": 9.755282581475769e-05,
"loss": 0.0186,
"step": 19
},
{
"epoch": 0.0012006964039142703,
"grad_norm": 0.09917615354061127,
"learning_rate": 9.698463103929542e-05,
"loss": 0.0016,
"step": 20
},
{
"epoch": 0.0012607312241099838,
"grad_norm": 1.3171261548995972,
"learning_rate": 9.635919272833938e-05,
"loss": 0.0108,
"step": 21
},
{
"epoch": 0.0013207660443056973,
"grad_norm": 0.7155139446258545,
"learning_rate": 9.567727288213005e-05,
"loss": 0.0045,
"step": 22
},
{
"epoch": 0.0013808008645014107,
"grad_norm": 0.26169103384017944,
"learning_rate": 9.493970231495835e-05,
"loss": 0.0033,
"step": 23
},
{
"epoch": 0.0014408356846971244,
"grad_norm": 7.987000465393066,
"learning_rate": 9.414737964294636e-05,
"loss": 0.302,
"step": 24
},
{
"epoch": 0.001500870504892838,
"grad_norm": 0.0,
"learning_rate": 9.330127018922194e-05,
"loss": 0.0,
"step": 25
},
{
"epoch": 0.0015609053250885514,
"grad_norm": 0.009821158833801746,
"learning_rate": 9.24024048078213e-05,
"loss": 0.0003,
"step": 26
},
{
"epoch": 0.0016209401452842648,
"grad_norm": 0.01827707327902317,
"learning_rate": 9.145187862775209e-05,
"loss": 0.0005,
"step": 27
},
{
"epoch": 0.0016209401452842648,
"eval_loss": NaN,
"eval_runtime": 1747.1761,
"eval_samples_per_second": 8.028,
"eval_steps_per_second": 1.004,
"step": 27
},
{
"epoch": 0.0016809749654799783,
"grad_norm": 0.12341640144586563,
"learning_rate": 9.045084971874738e-05,
"loss": 0.0023,
"step": 28
},
{
"epoch": 0.001741009785675692,
"grad_norm": 0.04023746773600578,
"learning_rate": 8.940053768033609e-05,
"loss": 0.0003,
"step": 29
},
{
"epoch": 0.0018010446058714055,
"grad_norm": 0.0,
"learning_rate": 8.83022221559489e-05,
"loss": 0.0,
"step": 30
},
{
"epoch": 0.001861079426067119,
"grad_norm": 0.4180465638637543,
"learning_rate": 8.715724127386972e-05,
"loss": 0.002,
"step": 31
},
{
"epoch": 0.0019211142462628324,
"grad_norm": 0.028819318860769272,
"learning_rate": 8.596699001693255e-05,
"loss": 0.0004,
"step": 32
},
{
"epoch": 0.001981149066458546,
"grad_norm": 28.845888137817383,
"learning_rate": 8.473291852294987e-05,
"loss": 1.3233,
"step": 33
},
{
"epoch": 0.0020411838866542594,
"grad_norm": 0.015622777864336967,
"learning_rate": 8.345653031794292e-05,
"loss": 0.0005,
"step": 34
},
{
"epoch": 0.002101218706849973,
"grad_norm": 8.221495628356934,
"learning_rate": 8.213938048432697e-05,
"loss": 0.0868,
"step": 35
},
{
"epoch": 0.0021612535270456863,
"grad_norm": 0.7402325868606567,
"learning_rate": 8.07830737662829e-05,
"loss": 0.0035,
"step": 36
},
{
"epoch": 0.0021612535270456863,
"eval_loss": NaN,
"eval_runtime": 1747.9827,
"eval_samples_per_second": 8.025,
"eval_steps_per_second": 1.003,
"step": 36
},
{
"epoch": 0.0022212883472414,
"grad_norm": 0.1636962592601776,
"learning_rate": 7.938926261462366e-05,
"loss": 0.002,
"step": 37
},
{
"epoch": 0.0022813231674371137,
"grad_norm": 4.500185012817383,
"learning_rate": 7.795964517353735e-05,
"loss": 0.1112,
"step": 38
},
{
"epoch": 0.002341357987632827,
"grad_norm": 0.0,
"learning_rate": 7.649596321166024e-05,
"loss": 0.0,
"step": 39
},
{
"epoch": 0.0024013928078285406,
"grad_norm": 7.916851043701172,
"learning_rate": 7.500000000000001e-05,
"loss": 0.754,
"step": 40
},
{
"epoch": 0.002461427628024254,
"grad_norm": 0.376034140586853,
"learning_rate": 7.347357813929454e-05,
"loss": 0.0019,
"step": 41
},
{
"epoch": 0.0025214624482199676,
"grad_norm": 0.1723334640264511,
"learning_rate": 7.191855733945387e-05,
"loss": 0.002,
"step": 42
},
{
"epoch": 0.002581497268415681,
"grad_norm": 7.519275188446045,
"learning_rate": 7.033683215379002e-05,
"loss": 0.0085,
"step": 43
},
{
"epoch": 0.0026415320886113945,
"grad_norm": 1.5027763843536377,
"learning_rate": 6.873032967079561e-05,
"loss": 0.0151,
"step": 44
},
{
"epoch": 0.002701566908807108,
"grad_norm": 0.010223852470517159,
"learning_rate": 6.710100716628344e-05,
"loss": 0.0003,
"step": 45
},
{
"epoch": 0.002701566908807108,
"eval_loss": NaN,
"eval_runtime": 1748.0845,
"eval_samples_per_second": 8.024,
"eval_steps_per_second": 1.003,
"step": 45
},
{
"epoch": 0.0027616017290028215,
"grad_norm": 0.003923419397324324,
"learning_rate": 6.545084971874738e-05,
"loss": 0.0002,
"step": 46
},
{
"epoch": 0.002821636549198535,
"grad_norm": 0.033521488308906555,
"learning_rate": 6.378186779084995e-05,
"loss": 0.0006,
"step": 47
},
{
"epoch": 0.002881671369394249,
"grad_norm": 0.006656797137111425,
"learning_rate": 6.209609477998338e-05,
"loss": 0.0002,
"step": 48
},
{
"epoch": 0.0029417061895899623,
"grad_norm": 0.0,
"learning_rate": 6.0395584540887963e-05,
"loss": 0.0,
"step": 49
},
{
"epoch": 0.003001741009785676,
"grad_norm": 0.007206365000456572,
"learning_rate": 5.868240888334653e-05,
"loss": 0.0002,
"step": 50
},
{
"epoch": 0.0030617758299813893,
"grad_norm": 2.403134346008301,
"learning_rate": 5.695865504800327e-05,
"loss": 0.2376,
"step": 51
},
{
"epoch": 0.0031218106501771027,
"grad_norm": 0.07276768982410431,
"learning_rate": 5.522642316338268e-05,
"loss": 0.0009,
"step": 52
},
{
"epoch": 0.003181845470372816,
"grad_norm": 14.310860633850098,
"learning_rate": 5.348782368720626e-05,
"loss": 0.0216,
"step": 53
},
{
"epoch": 0.0032418802905685297,
"grad_norm": 0.9602569937705994,
"learning_rate": 5.174497483512506e-05,
"loss": 0.0081,
"step": 54
},
{
"epoch": 0.0032418802905685297,
"eval_loss": NaN,
"eval_runtime": 1747.7961,
"eval_samples_per_second": 8.026,
"eval_steps_per_second": 1.004,
"step": 54
},
{
"epoch": 0.003301915110764243,
"grad_norm": 0.0,
"learning_rate": 5e-05,
"loss": 0.0,
"step": 55
},
{
"epoch": 0.0033619499309599566,
"grad_norm": 0.16967162489891052,
"learning_rate": 4.825502516487497e-05,
"loss": 0.002,
"step": 56
},
{
"epoch": 0.00342198475115567,
"grad_norm": 0.03777099773287773,
"learning_rate": 4.6512176312793736e-05,
"loss": 0.0006,
"step": 57
},
{
"epoch": 0.003482019571351384,
"grad_norm": 0.0,
"learning_rate": 4.477357683661734e-05,
"loss": 0.0,
"step": 58
},
{
"epoch": 0.0035420543915470975,
"grad_norm": 0.15030822157859802,
"learning_rate": 4.3041344951996746e-05,
"loss": 0.0018,
"step": 59
},
{
"epoch": 0.003602089211742811,
"grad_norm": 0.0023677474819123745,
"learning_rate": 4.131759111665349e-05,
"loss": 0.0001,
"step": 60
},
{
"epoch": 0.0036621240319385244,
"grad_norm": 0.01745045743882656,
"learning_rate": 3.960441545911204e-05,
"loss": 0.0002,
"step": 61
},
{
"epoch": 0.003722158852134238,
"grad_norm": 0.018494900315999985,
"learning_rate": 3.790390522001662e-05,
"loss": 0.0002,
"step": 62
},
{
"epoch": 0.0037821936723299514,
"grad_norm": 0.1796453297138214,
"learning_rate": 3.6218132209150045e-05,
"loss": 0.0019,
"step": 63
},
{
"epoch": 0.0037821936723299514,
"eval_loss": NaN,
"eval_runtime": 1748.782,
"eval_samples_per_second": 8.021,
"eval_steps_per_second": 1.003,
"step": 63
},
{
"epoch": 0.003842228492525665,
"grad_norm": 0.0,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.0,
"step": 64
},
{
"epoch": 0.0039022633127213783,
"grad_norm": 0.014414871111512184,
"learning_rate": 3.289899283371657e-05,
"loss": 0.0004,
"step": 65
},
{
"epoch": 0.003962298132917092,
"grad_norm": 0.0,
"learning_rate": 3.12696703292044e-05,
"loss": 0.0,
"step": 66
},
{
"epoch": 0.004022332953112806,
"grad_norm": 0.0,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.0,
"step": 67
},
{
"epoch": 0.004082367773308519,
"grad_norm": 0.25538694858551025,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.003,
"step": 68
},
{
"epoch": 0.004142402593504233,
"grad_norm": 0.007418240420520306,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.0002,
"step": 69
},
{
"epoch": 0.004202437413699946,
"grad_norm": 0.002056136028841138,
"learning_rate": 2.500000000000001e-05,
"loss": 0.0001,
"step": 70
},
{
"epoch": 0.00426247223389566,
"grad_norm": 0.04461194574832916,
"learning_rate": 2.350403678833976e-05,
"loss": 0.0006,
"step": 71
},
{
"epoch": 0.004322507054091373,
"grad_norm": 0.10500391572713852,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.0008,
"step": 72
},
{
"epoch": 0.004322507054091373,
"eval_loss": NaN,
"eval_runtime": 1748.6975,
"eval_samples_per_second": 8.021,
"eval_steps_per_second": 1.003,
"step": 72
},
{
"epoch": 0.0043825418742870865,
"grad_norm": 0.16719873249530792,
"learning_rate": 2.061073738537635e-05,
"loss": 0.0029,
"step": 73
},
{
"epoch": 0.0044425766944828,
"grad_norm": 0.0035197085235267878,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.0001,
"step": 74
},
{
"epoch": 0.0045026115146785135,
"grad_norm": 0.005952209699898958,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.0001,
"step": 75
},
{
"epoch": 0.004562646334874227,
"grad_norm": 0.0,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.0,
"step": 76
},
{
"epoch": 0.00462268115506994,
"grad_norm": 0.0,
"learning_rate": 1.526708147705013e-05,
"loss": 0.0,
"step": 77
},
{
"epoch": 0.004682715975265654,
"grad_norm": 0.0021064886823296547,
"learning_rate": 1.4033009983067452e-05,
"loss": 0.0001,
"step": 78
},
{
"epoch": 0.004742750795461367,
"grad_norm": 0.016838403418660164,
"learning_rate": 1.2842758726130283e-05,
"loss": 0.0004,
"step": 79
},
{
"epoch": 0.004802785615657081,
"grad_norm": 0.0046536740846931934,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.0001,
"step": 80
},
{
"epoch": 0.004862820435852794,
"grad_norm": 0.01591491512954235,
"learning_rate": 1.0599462319663905e-05,
"loss": 0.0003,
"step": 81
},
{
"epoch": 0.004862820435852794,
"eval_loss": NaN,
"eval_runtime": 1749.4659,
"eval_samples_per_second": 8.018,
"eval_steps_per_second": 1.003,
"step": 81
},
{
"epoch": 0.004922855256048508,
"grad_norm": 0.0,
"learning_rate": 9.549150281252633e-06,
"loss": 0.0,
"step": 82
},
{
"epoch": 0.004982890076244221,
"grad_norm": 0.0,
"learning_rate": 8.548121372247918e-06,
"loss": 0.0,
"step": 83
},
{
"epoch": 0.005042924896439935,
"grad_norm": 0.003679132554680109,
"learning_rate": 7.597595192178702e-06,
"loss": 0.0002,
"step": 84
},
{
"epoch": 0.005102959716635649,
"grad_norm": 0.05891428515315056,
"learning_rate": 6.698729810778065e-06,
"loss": 0.0015,
"step": 85
},
{
"epoch": 0.005162994536831362,
"grad_norm": 0.004211484454572201,
"learning_rate": 5.852620357053651e-06,
"loss": 0.0001,
"step": 86
},
{
"epoch": 0.005223029357027076,
"grad_norm": 0.016904192045331,
"learning_rate": 5.060297685041659e-06,
"loss": 0.0004,
"step": 87
},
{
"epoch": 0.005283064177222789,
"grad_norm": 0.05485766381025314,
"learning_rate": 4.322727117869951e-06,
"loss": 0.0007,
"step": 88
},
{
"epoch": 0.005343098997418503,
"grad_norm": 3.945460557937622,
"learning_rate": 3.6408072716606346e-06,
"loss": 0.2777,
"step": 89
},
{
"epoch": 0.005403133817614216,
"grad_norm": 0.0012216288596391678,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.0001,
"step": 90
},
{
"epoch": 0.005403133817614216,
"eval_loss": NaN,
"eval_runtime": 1749.58,
"eval_samples_per_second": 8.017,
"eval_steps_per_second": 1.003,
"step": 90
},
{
"epoch": 0.00546316863780993,
"grad_norm": 0.0035565090365707874,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.0001,
"step": 91
},
{
"epoch": 0.005523203458005643,
"grad_norm": 0.0,
"learning_rate": 1.9369152030840556e-06,
"loss": 0.0,
"step": 92
},
{
"epoch": 0.005583238278201357,
"grad_norm": 0.04645892232656479,
"learning_rate": 1.4852136862001764e-06,
"loss": 0.0006,
"step": 93
},
{
"epoch": 0.00564327309839707,
"grad_norm": 0.0014873318141326308,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.0001,
"step": 94
},
{
"epoch": 0.005703307918592784,
"grad_norm": 0.2146553099155426,
"learning_rate": 7.596123493895991e-07,
"loss": 0.003,
"step": 95
},
{
"epoch": 0.005763342738788498,
"grad_norm": 16.49122428894043,
"learning_rate": 4.865965629214819e-07,
"loss": 1.4834,
"step": 96
},
{
"epoch": 0.005823377558984211,
"grad_norm": 0.004562173970043659,
"learning_rate": 2.7390523158633554e-07,
"loss": 0.0001,
"step": 97
},
{
"epoch": 0.005883412379179925,
"grad_norm": 0.0012735759373754263,
"learning_rate": 1.2179748700879012e-07,
"loss": 0.0001,
"step": 98
},
{
"epoch": 0.005943447199375638,
"grad_norm": 0.0040828571654856205,
"learning_rate": 3.04586490452119e-08,
"loss": 0.0001,
"step": 99
},
{
"epoch": 0.005943447199375638,
"eval_loss": NaN,
"eval_runtime": 1749.7589,
"eval_samples_per_second": 8.017,
"eval_steps_per_second": 1.002,
"step": 99
},
{
"epoch": 0.006003482019571352,
"grad_norm": 0.0023373435251414776,
"learning_rate": 0.0,
"loss": 0.0001,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.715631936621773e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}