lesso07's picture
Training in progress, step 100, checkpoint
df6d2c0 verified
raw
history blame
20.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.12399256044637322,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0012399256044637321,
"grad_norm": 5.637380599975586,
"learning_rate": 1e-05,
"loss": 7.3895,
"step": 1
},
{
"epoch": 0.0012399256044637321,
"eval_loss": 8.801884651184082,
"eval_runtime": 74.7273,
"eval_samples_per_second": 9.086,
"eval_steps_per_second": 1.137,
"step": 1
},
{
"epoch": 0.0024798512089274642,
"grad_norm": 6.304917812347412,
"learning_rate": 2e-05,
"loss": 8.3013,
"step": 2
},
{
"epoch": 0.0037197768133911966,
"grad_norm": 8.021940231323242,
"learning_rate": 3e-05,
"loss": 9.8095,
"step": 3
},
{
"epoch": 0.0049597024178549285,
"grad_norm": 7.569181442260742,
"learning_rate": 4e-05,
"loss": 8.8083,
"step": 4
},
{
"epoch": 0.006199628022318661,
"grad_norm": 7.446678638458252,
"learning_rate": 5e-05,
"loss": 8.2179,
"step": 5
},
{
"epoch": 0.007439553626782393,
"grad_norm": 7.229241371154785,
"learning_rate": 6e-05,
"loss": 7.3662,
"step": 6
},
{
"epoch": 0.008679479231246125,
"grad_norm": 10.287120819091797,
"learning_rate": 7e-05,
"loss": 8.2256,
"step": 7
},
{
"epoch": 0.009919404835709857,
"grad_norm": 12.380455017089844,
"learning_rate": 8e-05,
"loss": 8.0177,
"step": 8
},
{
"epoch": 0.011159330440173589,
"grad_norm": 15.093891143798828,
"learning_rate": 9e-05,
"loss": 7.6833,
"step": 9
},
{
"epoch": 0.011159330440173589,
"eval_loss": 5.5082526206970215,
"eval_runtime": 74.6366,
"eval_samples_per_second": 9.097,
"eval_steps_per_second": 1.139,
"step": 9
},
{
"epoch": 0.012399256044637322,
"grad_norm": 14.59557819366455,
"learning_rate": 0.0001,
"loss": 4.6802,
"step": 10
},
{
"epoch": 0.013639181649101054,
"grad_norm": 17.438085556030273,
"learning_rate": 9.99695413509548e-05,
"loss": 4.2521,
"step": 11
},
{
"epoch": 0.014879107253564786,
"grad_norm": 9.12221908569336,
"learning_rate": 9.987820251299122e-05,
"loss": 2.4461,
"step": 12
},
{
"epoch": 0.016119032858028518,
"grad_norm": 15.613394737243652,
"learning_rate": 9.972609476841367e-05,
"loss": 2.8293,
"step": 13
},
{
"epoch": 0.01735895846249225,
"grad_norm": 18.53150749206543,
"learning_rate": 9.951340343707852e-05,
"loss": 2.5979,
"step": 14
},
{
"epoch": 0.018598884066955982,
"grad_norm": 16.26443099975586,
"learning_rate": 9.924038765061042e-05,
"loss": 1.8092,
"step": 15
},
{
"epoch": 0.019838809671419714,
"grad_norm": 8.532876014709473,
"learning_rate": 9.890738003669029e-05,
"loss": 0.756,
"step": 16
},
{
"epoch": 0.021078735275883446,
"grad_norm": 11.426972389221191,
"learning_rate": 9.851478631379982e-05,
"loss": 0.9299,
"step": 17
},
{
"epoch": 0.022318660880347178,
"grad_norm": 13.029273986816406,
"learning_rate": 9.806308479691595e-05,
"loss": 1.1201,
"step": 18
},
{
"epoch": 0.022318660880347178,
"eval_loss": 0.8598870038986206,
"eval_runtime": 74.5999,
"eval_samples_per_second": 9.102,
"eval_steps_per_second": 1.139,
"step": 18
},
{
"epoch": 0.023558586484810913,
"grad_norm": 9.530491828918457,
"learning_rate": 9.755282581475769e-05,
"loss": 1.0725,
"step": 19
},
{
"epoch": 0.024798512089274645,
"grad_norm": 9.176093101501465,
"learning_rate": 9.698463103929542e-05,
"loss": 0.7809,
"step": 20
},
{
"epoch": 0.026038437693738377,
"grad_norm": 12.480063438415527,
"learning_rate": 9.635919272833938e-05,
"loss": 0.5477,
"step": 21
},
{
"epoch": 0.02727836329820211,
"grad_norm": 4.838983058929443,
"learning_rate": 9.567727288213005e-05,
"loss": 0.2715,
"step": 22
},
{
"epoch": 0.02851828890266584,
"grad_norm": 7.991964340209961,
"learning_rate": 9.493970231495835e-05,
"loss": 0.7399,
"step": 23
},
{
"epoch": 0.029758214507129573,
"grad_norm": 6.881007671356201,
"learning_rate": 9.414737964294636e-05,
"loss": 0.7484,
"step": 24
},
{
"epoch": 0.030998140111593304,
"grad_norm": 3.942451238632202,
"learning_rate": 9.330127018922194e-05,
"loss": 0.2735,
"step": 25
},
{
"epoch": 0.032238065716057036,
"grad_norm": 5.763513088226318,
"learning_rate": 9.24024048078213e-05,
"loss": 0.499,
"step": 26
},
{
"epoch": 0.03347799132052077,
"grad_norm": 3.860086441040039,
"learning_rate": 9.145187862775209e-05,
"loss": 0.3428,
"step": 27
},
{
"epoch": 0.03347799132052077,
"eval_loss": 0.523270308971405,
"eval_runtime": 74.8185,
"eval_samples_per_second": 9.075,
"eval_steps_per_second": 1.136,
"step": 27
},
{
"epoch": 0.0347179169249845,
"grad_norm": 5.98535680770874,
"learning_rate": 9.045084971874738e-05,
"loss": 1.0863,
"step": 28
},
{
"epoch": 0.03595784252944823,
"grad_norm": 5.963525295257568,
"learning_rate": 8.940053768033609e-05,
"loss": 0.7751,
"step": 29
},
{
"epoch": 0.037197768133911964,
"grad_norm": 5.22709321975708,
"learning_rate": 8.83022221559489e-05,
"loss": 0.792,
"step": 30
},
{
"epoch": 0.038437693738375696,
"grad_norm": 6.858890056610107,
"learning_rate": 8.715724127386972e-05,
"loss": 0.8289,
"step": 31
},
{
"epoch": 0.03967761934283943,
"grad_norm": 4.340625762939453,
"learning_rate": 8.596699001693255e-05,
"loss": 0.3799,
"step": 32
},
{
"epoch": 0.04091754494730316,
"grad_norm": 2.322028636932373,
"learning_rate": 8.473291852294987e-05,
"loss": 0.178,
"step": 33
},
{
"epoch": 0.04215747055176689,
"grad_norm": 5.8546881675720215,
"learning_rate": 8.345653031794292e-05,
"loss": 0.966,
"step": 34
},
{
"epoch": 0.04339739615623062,
"grad_norm": 3.5222458839416504,
"learning_rate": 8.213938048432697e-05,
"loss": 0.2276,
"step": 35
},
{
"epoch": 0.044637321760694355,
"grad_norm": 3.2773797512054443,
"learning_rate": 8.07830737662829e-05,
"loss": 0.4179,
"step": 36
},
{
"epoch": 0.044637321760694355,
"eval_loss": 0.4533182680606842,
"eval_runtime": 74.6166,
"eval_samples_per_second": 9.1,
"eval_steps_per_second": 1.139,
"step": 36
},
{
"epoch": 0.04587724736515809,
"grad_norm": 3.4450690746307373,
"learning_rate": 7.938926261462366e-05,
"loss": 0.4395,
"step": 37
},
{
"epoch": 0.047117172969621826,
"grad_norm": 4.250949382781982,
"learning_rate": 7.795964517353735e-05,
"loss": 0.4893,
"step": 38
},
{
"epoch": 0.04835709857408556,
"grad_norm": 3.408641815185547,
"learning_rate": 7.649596321166024e-05,
"loss": 0.2889,
"step": 39
},
{
"epoch": 0.04959702417854929,
"grad_norm": 5.3293657302856445,
"learning_rate": 7.500000000000001e-05,
"loss": 0.6241,
"step": 40
},
{
"epoch": 0.05083694978301302,
"grad_norm": 3.267144203186035,
"learning_rate": 7.347357813929454e-05,
"loss": 0.2162,
"step": 41
},
{
"epoch": 0.052076875387476754,
"grad_norm": 4.215457916259766,
"learning_rate": 7.191855733945387e-05,
"loss": 0.3686,
"step": 42
},
{
"epoch": 0.053316800991940486,
"grad_norm": 2.5560827255249023,
"learning_rate": 7.033683215379002e-05,
"loss": 0.2603,
"step": 43
},
{
"epoch": 0.05455672659640422,
"grad_norm": 2.027841567993164,
"learning_rate": 6.873032967079561e-05,
"loss": 0.1701,
"step": 44
},
{
"epoch": 0.05579665220086795,
"grad_norm": 5.5120720863342285,
"learning_rate": 6.710100716628344e-05,
"loss": 0.5381,
"step": 45
},
{
"epoch": 0.05579665220086795,
"eval_loss": 0.41586047410964966,
"eval_runtime": 74.5473,
"eval_samples_per_second": 9.108,
"eval_steps_per_second": 1.14,
"step": 45
},
{
"epoch": 0.05703657780533168,
"grad_norm": 6.599036693572998,
"learning_rate": 6.545084971874738e-05,
"loss": 0.581,
"step": 46
},
{
"epoch": 0.05827650340979541,
"grad_norm": 5.571805477142334,
"learning_rate": 6.378186779084995e-05,
"loss": 0.256,
"step": 47
},
{
"epoch": 0.059516429014259145,
"grad_norm": 3.339383363723755,
"learning_rate": 6.209609477998338e-05,
"loss": 0.5642,
"step": 48
},
{
"epoch": 0.06075635461872288,
"grad_norm": 5.781419277191162,
"learning_rate": 6.0395584540887963e-05,
"loss": 1.1324,
"step": 49
},
{
"epoch": 0.06199628022318661,
"grad_norm": 2.382009983062744,
"learning_rate": 5.868240888334653e-05,
"loss": 0.3211,
"step": 50
},
{
"epoch": 0.06323620582765034,
"grad_norm": 1.921871542930603,
"learning_rate": 5.695865504800327e-05,
"loss": 0.126,
"step": 51
},
{
"epoch": 0.06447613143211407,
"grad_norm": 3.4905238151550293,
"learning_rate": 5.522642316338268e-05,
"loss": 0.2554,
"step": 52
},
{
"epoch": 0.0657160570365778,
"grad_norm": 4.125042915344238,
"learning_rate": 5.348782368720626e-05,
"loss": 0.551,
"step": 53
},
{
"epoch": 0.06695598264104154,
"grad_norm": 2.766287088394165,
"learning_rate": 5.174497483512506e-05,
"loss": 0.2406,
"step": 54
},
{
"epoch": 0.06695598264104154,
"eval_loss": 0.3915672302246094,
"eval_runtime": 74.5661,
"eval_samples_per_second": 9.106,
"eval_steps_per_second": 1.14,
"step": 54
},
{
"epoch": 0.06819590824550527,
"grad_norm": 3.8420464992523193,
"learning_rate": 5e-05,
"loss": 0.625,
"step": 55
},
{
"epoch": 0.069435833849969,
"grad_norm": 2.1842472553253174,
"learning_rate": 4.825502516487497e-05,
"loss": 0.1173,
"step": 56
},
{
"epoch": 0.07067575945443273,
"grad_norm": 2.6556928157806396,
"learning_rate": 4.6512176312793736e-05,
"loss": 0.3296,
"step": 57
},
{
"epoch": 0.07191568505889646,
"grad_norm": 2.9910953044891357,
"learning_rate": 4.477357683661734e-05,
"loss": 0.1465,
"step": 58
},
{
"epoch": 0.0731556106633602,
"grad_norm": 4.733443737030029,
"learning_rate": 4.3041344951996746e-05,
"loss": 1.0143,
"step": 59
},
{
"epoch": 0.07439553626782393,
"grad_norm": 6.778609752655029,
"learning_rate": 4.131759111665349e-05,
"loss": 0.8531,
"step": 60
},
{
"epoch": 0.07563546187228766,
"grad_norm": 3.0271034240722656,
"learning_rate": 3.960441545911204e-05,
"loss": 0.2574,
"step": 61
},
{
"epoch": 0.07687538747675139,
"grad_norm": 3.5702953338623047,
"learning_rate": 3.790390522001662e-05,
"loss": 0.5729,
"step": 62
},
{
"epoch": 0.07811531308121512,
"grad_norm": 3.5134518146514893,
"learning_rate": 3.6218132209150045e-05,
"loss": 0.3596,
"step": 63
},
{
"epoch": 0.07811531308121512,
"eval_loss": 0.37827181816101074,
"eval_runtime": 74.5548,
"eval_samples_per_second": 9.107,
"eval_steps_per_second": 1.14,
"step": 63
},
{
"epoch": 0.07935523868567886,
"grad_norm": 1.9535410404205322,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.1719,
"step": 64
},
{
"epoch": 0.08059516429014259,
"grad_norm": 4.465679168701172,
"learning_rate": 3.289899283371657e-05,
"loss": 0.8999,
"step": 65
},
{
"epoch": 0.08183508989460632,
"grad_norm": 1.199249505996704,
"learning_rate": 3.12696703292044e-05,
"loss": 0.0601,
"step": 66
},
{
"epoch": 0.08307501549907005,
"grad_norm": 2.8671321868896484,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.3225,
"step": 67
},
{
"epoch": 0.08431494110353378,
"grad_norm": 3.700127363204956,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.2454,
"step": 68
},
{
"epoch": 0.08555486670799752,
"grad_norm": 2.447843313217163,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.3173,
"step": 69
},
{
"epoch": 0.08679479231246125,
"grad_norm": 4.898487567901611,
"learning_rate": 2.500000000000001e-05,
"loss": 0.6874,
"step": 70
},
{
"epoch": 0.08803471791692498,
"grad_norm": 3.498596429824829,
"learning_rate": 2.350403678833976e-05,
"loss": 0.5571,
"step": 71
},
{
"epoch": 0.08927464352138871,
"grad_norm": 2.4713706970214844,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.1637,
"step": 72
},
{
"epoch": 0.08927464352138871,
"eval_loss": 0.3753897249698639,
"eval_runtime": 74.5535,
"eval_samples_per_second": 9.108,
"eval_steps_per_second": 1.14,
"step": 72
},
{
"epoch": 0.09051456912585244,
"grad_norm": 4.455976486206055,
"learning_rate": 2.061073738537635e-05,
"loss": 0.2915,
"step": 73
},
{
"epoch": 0.09175449473031617,
"grad_norm": 3.757282018661499,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.4243,
"step": 74
},
{
"epoch": 0.0929944203347799,
"grad_norm": 3.680955410003662,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.4761,
"step": 75
},
{
"epoch": 0.09423434593924365,
"grad_norm": 2.8234503269195557,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.2459,
"step": 76
},
{
"epoch": 0.09547427154370738,
"grad_norm": 3.906480073928833,
"learning_rate": 1.526708147705013e-05,
"loss": 0.4904,
"step": 77
},
{
"epoch": 0.09671419714817112,
"grad_norm": 4.214174270629883,
"learning_rate": 1.4033009983067452e-05,
"loss": 0.7033,
"step": 78
},
{
"epoch": 0.09795412275263485,
"grad_norm": 2.5430684089660645,
"learning_rate": 1.2842758726130283e-05,
"loss": 0.2709,
"step": 79
},
{
"epoch": 0.09919404835709858,
"grad_norm": 3.603428363800049,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.2043,
"step": 80
},
{
"epoch": 0.10043397396156231,
"grad_norm": 2.2767882347106934,
"learning_rate": 1.0599462319663905e-05,
"loss": 0.1009,
"step": 81
},
{
"epoch": 0.10043397396156231,
"eval_loss": 0.36546072363853455,
"eval_runtime": 74.5654,
"eval_samples_per_second": 9.106,
"eval_steps_per_second": 1.14,
"step": 81
},
{
"epoch": 0.10167389956602604,
"grad_norm": 1.9613827466964722,
"learning_rate": 9.549150281252633e-06,
"loss": 0.2421,
"step": 82
},
{
"epoch": 0.10291382517048978,
"grad_norm": 3.4064152240753174,
"learning_rate": 8.548121372247918e-06,
"loss": 0.6087,
"step": 83
},
{
"epoch": 0.10415375077495351,
"grad_norm": 3.468510150909424,
"learning_rate": 7.597595192178702e-06,
"loss": 0.3645,
"step": 84
},
{
"epoch": 0.10539367637941724,
"grad_norm": 2.997673749923706,
"learning_rate": 6.698729810778065e-06,
"loss": 0.2891,
"step": 85
},
{
"epoch": 0.10663360198388097,
"grad_norm": 3.279283285140991,
"learning_rate": 5.852620357053651e-06,
"loss": 0.3884,
"step": 86
},
{
"epoch": 0.1078735275883447,
"grad_norm": 3.1893913745880127,
"learning_rate": 5.060297685041659e-06,
"loss": 0.2048,
"step": 87
},
{
"epoch": 0.10911345319280844,
"grad_norm": 3.0490288734436035,
"learning_rate": 4.322727117869951e-06,
"loss": 0.2788,
"step": 88
},
{
"epoch": 0.11035337879727217,
"grad_norm": 4.683725833892822,
"learning_rate": 3.6408072716606346e-06,
"loss": 0.2737,
"step": 89
},
{
"epoch": 0.1115933044017359,
"grad_norm": 3.1918933391571045,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.4477,
"step": 90
},
{
"epoch": 0.1115933044017359,
"eval_loss": 0.3621177077293396,
"eval_runtime": 74.5607,
"eval_samples_per_second": 9.107,
"eval_steps_per_second": 1.14,
"step": 90
},
{
"epoch": 0.11283323000619963,
"grad_norm": 2.881042003631592,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.2986,
"step": 91
},
{
"epoch": 0.11407315561066336,
"grad_norm": 2.159529685974121,
"learning_rate": 1.9369152030840556e-06,
"loss": 0.1908,
"step": 92
},
{
"epoch": 0.1153130812151271,
"grad_norm": 2.5343451499938965,
"learning_rate": 1.4852136862001764e-06,
"loss": 0.3447,
"step": 93
},
{
"epoch": 0.11655300681959083,
"grad_norm": 2.5903573036193848,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.3459,
"step": 94
},
{
"epoch": 0.11779293242405456,
"grad_norm": 1.5675890445709229,
"learning_rate": 7.596123493895991e-07,
"loss": 0.0968,
"step": 95
},
{
"epoch": 0.11903285802851829,
"grad_norm": 3.2924253940582275,
"learning_rate": 4.865965629214819e-07,
"loss": 0.4554,
"step": 96
},
{
"epoch": 0.12027278363298202,
"grad_norm": 3.223909378051758,
"learning_rate": 2.7390523158633554e-07,
"loss": 0.2568,
"step": 97
},
{
"epoch": 0.12151270923744575,
"grad_norm": 2.694246292114258,
"learning_rate": 1.2179748700879012e-07,
"loss": 0.4494,
"step": 98
},
{
"epoch": 0.12275263484190949,
"grad_norm": 2.3312549591064453,
"learning_rate": 3.04586490452119e-08,
"loss": 0.365,
"step": 99
},
{
"epoch": 0.12275263484190949,
"eval_loss": 0.36193743348121643,
"eval_runtime": 74.542,
"eval_samples_per_second": 9.109,
"eval_steps_per_second": 1.14,
"step": 99
},
{
"epoch": 0.12399256044637322,
"grad_norm": 2.271353244781494,
"learning_rate": 0.0,
"loss": 0.2151,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.41887283560448e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}