bert-finetuned-combo-textbook / trainer_state.json
willherbert27's picture
willherbert27/bert-combined-textbook
25e3320 verified
raw
history blame
36.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 20.0,
"eval_steps": 500,
"global_step": 98720,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1,
"grad_norm": 27.71003532409668,
"learning_rate": 9.949351701782822e-06,
"loss": 2.7967,
"step": 500
},
{
"epoch": 0.2,
"grad_norm": 23.26209831237793,
"learning_rate": 9.898703403565641e-06,
"loss": 2.5399,
"step": 1000
},
{
"epoch": 0.3,
"grad_norm": 23.482177734375,
"learning_rate": 9.84805510534846e-06,
"loss": 2.5093,
"step": 1500
},
{
"epoch": 0.41,
"grad_norm": 22.170177459716797,
"learning_rate": 9.797406807131282e-06,
"loss": 2.4094,
"step": 2000
},
{
"epoch": 0.51,
"grad_norm": 19.412521362304688,
"learning_rate": 9.746758508914101e-06,
"loss": 2.3084,
"step": 2500
},
{
"epoch": 0.61,
"grad_norm": 28.102920532226562,
"learning_rate": 9.696110210696922e-06,
"loss": 2.364,
"step": 3000
},
{
"epoch": 0.71,
"grad_norm": 24.38800048828125,
"learning_rate": 9.645461912479742e-06,
"loss": 2.2956,
"step": 3500
},
{
"epoch": 0.81,
"grad_norm": 18.825523376464844,
"learning_rate": 9.594813614262561e-06,
"loss": 2.2741,
"step": 4000
},
{
"epoch": 0.91,
"grad_norm": 19.316570281982422,
"learning_rate": 9.544165316045382e-06,
"loss": 2.2161,
"step": 4500
},
{
"epoch": 1.0,
"eval_loss": NaN,
"eval_runtime": 77.6479,
"eval_samples_per_second": 127.151,
"eval_steps_per_second": 15.905,
"step": 4936
},
{
"epoch": 1.01,
"grad_norm": 17.73604393005371,
"learning_rate": 9.493517017828202e-06,
"loss": 2.2035,
"step": 5000
},
{
"epoch": 1.11,
"grad_norm": 24.61719512939453,
"learning_rate": 9.442868719611021e-06,
"loss": 2.1512,
"step": 5500
},
{
"epoch": 1.22,
"grad_norm": 24.99042510986328,
"learning_rate": 9.392220421393842e-06,
"loss": 2.1579,
"step": 6000
},
{
"epoch": 1.32,
"grad_norm": 28.80664825439453,
"learning_rate": 9.341572123176662e-06,
"loss": 2.1624,
"step": 6500
},
{
"epoch": 1.42,
"grad_norm": 17.5797176361084,
"learning_rate": 9.290923824959483e-06,
"loss": 2.1107,
"step": 7000
},
{
"epoch": 1.52,
"grad_norm": 16.641563415527344,
"learning_rate": 9.240275526742302e-06,
"loss": 2.0927,
"step": 7500
},
{
"epoch": 1.62,
"grad_norm": 20.170042037963867,
"learning_rate": 9.189627228525122e-06,
"loss": 2.1255,
"step": 8000
},
{
"epoch": 1.72,
"grad_norm": 19.44417381286621,
"learning_rate": 9.138978930307943e-06,
"loss": 2.0959,
"step": 8500
},
{
"epoch": 1.82,
"grad_norm": 15.98162841796875,
"learning_rate": 9.088330632090762e-06,
"loss": 2.107,
"step": 9000
},
{
"epoch": 1.92,
"grad_norm": 26.916950225830078,
"learning_rate": 9.037682333873583e-06,
"loss": 2.0531,
"step": 9500
},
{
"epoch": 2.0,
"eval_loss": 1.9590071439743042,
"eval_runtime": 77.6637,
"eval_samples_per_second": 127.125,
"eval_steps_per_second": 15.902,
"step": 9872
},
{
"epoch": 2.03,
"grad_norm": 16.49506187438965,
"learning_rate": 8.987034035656403e-06,
"loss": 2.0818,
"step": 10000
},
{
"epoch": 2.13,
"grad_norm": 33.3337516784668,
"learning_rate": 8.936385737439222e-06,
"loss": 2.083,
"step": 10500
},
{
"epoch": 2.23,
"grad_norm": 25.783584594726562,
"learning_rate": 8.885737439222043e-06,
"loss": 2.0203,
"step": 11000
},
{
"epoch": 2.33,
"grad_norm": 15.96865177154541,
"learning_rate": 8.835089141004863e-06,
"loss": 1.9965,
"step": 11500
},
{
"epoch": 2.43,
"grad_norm": 17.585542678833008,
"learning_rate": 8.784440842787682e-06,
"loss": 2.0138,
"step": 12000
},
{
"epoch": 2.53,
"grad_norm": 17.34233856201172,
"learning_rate": 8.733792544570503e-06,
"loss": 1.9905,
"step": 12500
},
{
"epoch": 2.63,
"grad_norm": 22.320497512817383,
"learning_rate": 8.683144246353323e-06,
"loss": 2.0074,
"step": 13000
},
{
"epoch": 2.74,
"grad_norm": 19.803224563598633,
"learning_rate": 8.632495948136144e-06,
"loss": 1.9483,
"step": 13500
},
{
"epoch": 2.84,
"grad_norm": 17.85085678100586,
"learning_rate": 8.581847649918963e-06,
"loss": 2.014,
"step": 14000
},
{
"epoch": 2.94,
"grad_norm": 23.786117553710938,
"learning_rate": 8.531199351701782e-06,
"loss": 1.978,
"step": 14500
},
{
"epoch": 3.0,
"eval_loss": NaN,
"eval_runtime": 77.6234,
"eval_samples_per_second": 127.191,
"eval_steps_per_second": 15.91,
"step": 14808
},
{
"epoch": 3.04,
"grad_norm": 14.98945140838623,
"learning_rate": 8.480551053484604e-06,
"loss": 1.9568,
"step": 15000
},
{
"epoch": 3.14,
"grad_norm": 21.061824798583984,
"learning_rate": 8.429902755267425e-06,
"loss": 1.9454,
"step": 15500
},
{
"epoch": 3.24,
"grad_norm": 17.81777000427246,
"learning_rate": 8.379254457050244e-06,
"loss": 1.9033,
"step": 16000
},
{
"epoch": 3.34,
"grad_norm": 19.77060890197754,
"learning_rate": 8.328606158833064e-06,
"loss": 1.971,
"step": 16500
},
{
"epoch": 3.44,
"grad_norm": 23.06456756591797,
"learning_rate": 8.277957860615885e-06,
"loss": 1.9344,
"step": 17000
},
{
"epoch": 3.55,
"grad_norm": 21.723846435546875,
"learning_rate": 8.227309562398704e-06,
"loss": 1.9551,
"step": 17500
},
{
"epoch": 3.65,
"grad_norm": 14.961769104003906,
"learning_rate": 8.176661264181525e-06,
"loss": 1.8939,
"step": 18000
},
{
"epoch": 3.75,
"grad_norm": 35.004859924316406,
"learning_rate": 8.126012965964345e-06,
"loss": 1.9347,
"step": 18500
},
{
"epoch": 3.85,
"grad_norm": 20.03676986694336,
"learning_rate": 8.075364667747164e-06,
"loss": 1.8949,
"step": 19000
},
{
"epoch": 3.95,
"grad_norm": 23.595853805541992,
"learning_rate": 8.024716369529985e-06,
"loss": 1.9616,
"step": 19500
},
{
"epoch": 4.0,
"eval_loss": NaN,
"eval_runtime": 77.6621,
"eval_samples_per_second": 127.128,
"eval_steps_per_second": 15.902,
"step": 19744
},
{
"epoch": 4.05,
"grad_norm": 24.895097732543945,
"learning_rate": 7.974068071312805e-06,
"loss": 1.9181,
"step": 20000
},
{
"epoch": 4.15,
"grad_norm": 16.272125244140625,
"learning_rate": 7.923419773095624e-06,
"loss": 1.8546,
"step": 20500
},
{
"epoch": 4.25,
"grad_norm": 16.912342071533203,
"learning_rate": 7.872771474878445e-06,
"loss": 1.8722,
"step": 21000
},
{
"epoch": 4.36,
"grad_norm": 19.567813873291016,
"learning_rate": 7.822123176661264e-06,
"loss": 1.8421,
"step": 21500
},
{
"epoch": 4.46,
"grad_norm": 24.266704559326172,
"learning_rate": 7.771474878444086e-06,
"loss": 1.8481,
"step": 22000
},
{
"epoch": 4.56,
"grad_norm": 39.59248733520508,
"learning_rate": 7.720826580226905e-06,
"loss": 1.8366,
"step": 22500
},
{
"epoch": 4.66,
"grad_norm": 28.474586486816406,
"learning_rate": 7.670178282009724e-06,
"loss": 1.8959,
"step": 23000
},
{
"epoch": 4.76,
"grad_norm": 25.967967987060547,
"learning_rate": 7.6195299837925455e-06,
"loss": 1.8536,
"step": 23500
},
{
"epoch": 4.86,
"grad_norm": 22.55752944946289,
"learning_rate": 7.568881685575366e-06,
"loss": 1.8809,
"step": 24000
},
{
"epoch": 4.96,
"grad_norm": 26.150907516479492,
"learning_rate": 7.518233387358186e-06,
"loss": 1.8559,
"step": 24500
},
{
"epoch": 5.0,
"eval_loss": 1.8337851762771606,
"eval_runtime": 77.7133,
"eval_samples_per_second": 127.044,
"eval_steps_per_second": 15.892,
"step": 24680
},
{
"epoch": 5.06,
"grad_norm": 16.329139709472656,
"learning_rate": 7.4675850891410055e-06,
"loss": 1.855,
"step": 25000
},
{
"epoch": 5.17,
"grad_norm": 21.760122299194336,
"learning_rate": 7.416936790923826e-06,
"loss": 1.8109,
"step": 25500
},
{
"epoch": 5.27,
"grad_norm": 20.415075302124023,
"learning_rate": 7.366288492706646e-06,
"loss": 1.7785,
"step": 26000
},
{
"epoch": 5.37,
"grad_norm": 14.889837265014648,
"learning_rate": 7.315640194489466e-06,
"loss": 1.8091,
"step": 26500
},
{
"epoch": 5.47,
"grad_norm": 20.167137145996094,
"learning_rate": 7.264991896272286e-06,
"loss": 1.7611,
"step": 27000
},
{
"epoch": 5.57,
"grad_norm": 23.077468872070312,
"learning_rate": 7.214343598055106e-06,
"loss": 1.817,
"step": 27500
},
{
"epoch": 5.67,
"grad_norm": 21.367464065551758,
"learning_rate": 7.163695299837926e-06,
"loss": 1.7946,
"step": 28000
},
{
"epoch": 5.77,
"grad_norm": 15.969755172729492,
"learning_rate": 7.1130470016207465e-06,
"loss": 1.7874,
"step": 28500
},
{
"epoch": 5.88,
"grad_norm": 21.294042587280273,
"learning_rate": 7.062398703403566e-06,
"loss": 1.7853,
"step": 29000
},
{
"epoch": 5.98,
"grad_norm": 19.84623908996582,
"learning_rate": 7.011750405186386e-06,
"loss": 1.7902,
"step": 29500
},
{
"epoch": 6.0,
"eval_loss": 1.7772495746612549,
"eval_runtime": 77.7233,
"eval_samples_per_second": 127.028,
"eval_steps_per_second": 15.89,
"step": 29616
},
{
"epoch": 6.08,
"grad_norm": 21.59821128845215,
"learning_rate": 6.961102106969206e-06,
"loss": 1.7444,
"step": 30000
},
{
"epoch": 6.18,
"grad_norm": 20.259178161621094,
"learning_rate": 6.910453808752027e-06,
"loss": 1.7723,
"step": 30500
},
{
"epoch": 6.28,
"grad_norm": 19.63451385498047,
"learning_rate": 6.859805510534847e-06,
"loss": 1.7154,
"step": 31000
},
{
"epoch": 6.38,
"grad_norm": 24.970842361450195,
"learning_rate": 6.809157212317666e-06,
"loss": 1.7357,
"step": 31500
},
{
"epoch": 6.48,
"grad_norm": 20.864076614379883,
"learning_rate": 6.758508914100487e-06,
"loss": 1.7623,
"step": 32000
},
{
"epoch": 6.58,
"grad_norm": 23.677553176879883,
"learning_rate": 6.707860615883307e-06,
"loss": 1.7245,
"step": 32500
},
{
"epoch": 6.69,
"grad_norm": 21.432083129882812,
"learning_rate": 6.657212317666127e-06,
"loss": 1.7456,
"step": 33000
},
{
"epoch": 6.79,
"grad_norm": 27.3277587890625,
"learning_rate": 6.6065640194489465e-06,
"loss": 1.778,
"step": 33500
},
{
"epoch": 6.89,
"grad_norm": 19.86969566345215,
"learning_rate": 6.555915721231767e-06,
"loss": 1.7793,
"step": 34000
},
{
"epoch": 6.99,
"grad_norm": 22.414249420166016,
"learning_rate": 6.505267423014587e-06,
"loss": 1.7603,
"step": 34500
},
{
"epoch": 7.0,
"eval_loss": 1.7686270475387573,
"eval_runtime": 77.7088,
"eval_samples_per_second": 127.051,
"eval_steps_per_second": 15.893,
"step": 34552
},
{
"epoch": 7.09,
"grad_norm": 26.82056999206543,
"learning_rate": 6.454619124797407e-06,
"loss": 1.7195,
"step": 35000
},
{
"epoch": 7.19,
"grad_norm": 31.928022384643555,
"learning_rate": 6.403970826580227e-06,
"loss": 1.7568,
"step": 35500
},
{
"epoch": 7.29,
"grad_norm": 20.51543426513672,
"learning_rate": 6.353322528363047e-06,
"loss": 1.709,
"step": 36000
},
{
"epoch": 7.39,
"grad_norm": 21.46121597290039,
"learning_rate": 6.302674230145867e-06,
"loss": 1.7127,
"step": 36500
},
{
"epoch": 7.5,
"grad_norm": 21.47946548461914,
"learning_rate": 6.252025931928688e-06,
"loss": 1.7442,
"step": 37000
},
{
"epoch": 7.6,
"grad_norm": 21.469694137573242,
"learning_rate": 6.201377633711507e-06,
"loss": 1.7433,
"step": 37500
},
{
"epoch": 7.7,
"grad_norm": 17.47914695739746,
"learning_rate": 6.150729335494327e-06,
"loss": 1.7518,
"step": 38000
},
{
"epoch": 7.8,
"grad_norm": 17.23418617248535,
"learning_rate": 6.100081037277148e-06,
"loss": 1.7166,
"step": 38500
},
{
"epoch": 7.9,
"grad_norm": 20.12972640991211,
"learning_rate": 6.0494327390599686e-06,
"loss": 1.769,
"step": 39000
},
{
"epoch": 8.0,
"eval_loss": 1.736777424812317,
"eval_runtime": 77.6891,
"eval_samples_per_second": 127.083,
"eval_steps_per_second": 15.897,
"step": 39488
},
{
"epoch": 8.0,
"grad_norm": 16.57518196105957,
"learning_rate": 5.998784440842789e-06,
"loss": 1.6586,
"step": 39500
},
{
"epoch": 8.1,
"grad_norm": 21.029388427734375,
"learning_rate": 5.948136142625608e-06,
"loss": 1.6599,
"step": 40000
},
{
"epoch": 8.21,
"grad_norm": 11.840949058532715,
"learning_rate": 5.8974878444084285e-06,
"loss": 1.6946,
"step": 40500
},
{
"epoch": 8.31,
"grad_norm": 18.389739990234375,
"learning_rate": 5.846839546191249e-06,
"loss": 1.6685,
"step": 41000
},
{
"epoch": 8.41,
"grad_norm": 19.934383392333984,
"learning_rate": 5.796191247974069e-06,
"loss": 1.7214,
"step": 41500
},
{
"epoch": 8.51,
"grad_norm": 22.178720474243164,
"learning_rate": 5.7455429497568885e-06,
"loss": 1.7002,
"step": 42000
},
{
"epoch": 8.61,
"grad_norm": 29.597888946533203,
"learning_rate": 5.694894651539709e-06,
"loss": 1.7304,
"step": 42500
},
{
"epoch": 8.71,
"grad_norm": 20.78500747680664,
"learning_rate": 5.644246353322529e-06,
"loss": 1.6568,
"step": 43000
},
{
"epoch": 8.81,
"grad_norm": 23.99928855895996,
"learning_rate": 5.593598055105349e-06,
"loss": 1.692,
"step": 43500
},
{
"epoch": 8.91,
"grad_norm": 28.18294334411621,
"learning_rate": 5.542949756888169e-06,
"loss": 1.6527,
"step": 44000
},
{
"epoch": 9.0,
"eval_loss": NaN,
"eval_runtime": 77.6864,
"eval_samples_per_second": 127.088,
"eval_steps_per_second": 15.897,
"step": 44424
},
{
"epoch": 9.02,
"grad_norm": 16.98941993713379,
"learning_rate": 5.492301458670989e-06,
"loss": 1.6694,
"step": 44500
},
{
"epoch": 9.12,
"grad_norm": 25.71541976928711,
"learning_rate": 5.441653160453809e-06,
"loss": 1.6672,
"step": 45000
},
{
"epoch": 9.22,
"grad_norm": 18.719865798950195,
"learning_rate": 5.3910048622366294e-06,
"loss": 1.6557,
"step": 45500
},
{
"epoch": 9.32,
"grad_norm": 25.50741195678711,
"learning_rate": 5.34035656401945e-06,
"loss": 1.6223,
"step": 46000
},
{
"epoch": 9.42,
"grad_norm": 19.657133102416992,
"learning_rate": 5.289708265802269e-06,
"loss": 1.6683,
"step": 46500
},
{
"epoch": 9.52,
"grad_norm": 16.309303283691406,
"learning_rate": 5.239059967585089e-06,
"loss": 1.6707,
"step": 47000
},
{
"epoch": 9.62,
"grad_norm": 16.309322357177734,
"learning_rate": 5.18841166936791e-06,
"loss": 1.6603,
"step": 47500
},
{
"epoch": 9.72,
"grad_norm": 22.41241455078125,
"learning_rate": 5.13776337115073e-06,
"loss": 1.6055,
"step": 48000
},
{
"epoch": 9.83,
"grad_norm": 17.34325408935547,
"learning_rate": 5.087115072933549e-06,
"loss": 1.673,
"step": 48500
},
{
"epoch": 9.93,
"grad_norm": 23.603504180908203,
"learning_rate": 5.03646677471637e-06,
"loss": 1.65,
"step": 49000
},
{
"epoch": 10.0,
"eval_loss": 1.688843846321106,
"eval_runtime": 77.6895,
"eval_samples_per_second": 127.083,
"eval_steps_per_second": 15.897,
"step": 49360
},
{
"epoch": 10.03,
"grad_norm": 24.20751190185547,
"learning_rate": 4.98581847649919e-06,
"loss": 1.6285,
"step": 49500
},
{
"epoch": 10.13,
"grad_norm": 15.967161178588867,
"learning_rate": 4.93517017828201e-06,
"loss": 1.608,
"step": 50000
},
{
"epoch": 10.23,
"grad_norm": 15.834975242614746,
"learning_rate": 4.88452188006483e-06,
"loss": 1.6181,
"step": 50500
},
{
"epoch": 10.33,
"grad_norm": 36.941707611083984,
"learning_rate": 4.83387358184765e-06,
"loss": 1.6119,
"step": 51000
},
{
"epoch": 10.43,
"grad_norm": 8.939834594726562,
"learning_rate": 4.78322528363047e-06,
"loss": 1.6556,
"step": 51500
},
{
"epoch": 10.53,
"grad_norm": 15.55808162689209,
"learning_rate": 4.73257698541329e-06,
"loss": 1.6024,
"step": 52000
},
{
"epoch": 10.64,
"grad_norm": 18.59736442565918,
"learning_rate": 4.6819286871961106e-06,
"loss": 1.6258,
"step": 52500
},
{
"epoch": 10.74,
"grad_norm": 18.915645599365234,
"learning_rate": 4.631280388978931e-06,
"loss": 1.6147,
"step": 53000
},
{
"epoch": 10.84,
"grad_norm": 21.615427017211914,
"learning_rate": 4.580632090761751e-06,
"loss": 1.6278,
"step": 53500
},
{
"epoch": 10.94,
"grad_norm": 16.90226936340332,
"learning_rate": 4.529983792544571e-06,
"loss": 1.6281,
"step": 54000
},
{
"epoch": 11.0,
"eval_loss": NaN,
"eval_runtime": 77.7252,
"eval_samples_per_second": 127.024,
"eval_steps_per_second": 15.889,
"step": 54296
},
{
"epoch": 11.04,
"grad_norm": 17.557144165039062,
"learning_rate": 4.479335494327391e-06,
"loss": 1.6239,
"step": 54500
},
{
"epoch": 11.14,
"grad_norm": 30.636253356933594,
"learning_rate": 4.428687196110211e-06,
"loss": 1.6089,
"step": 55000
},
{
"epoch": 11.24,
"grad_norm": 7.480132102966309,
"learning_rate": 4.378038897893031e-06,
"loss": 1.6014,
"step": 55500
},
{
"epoch": 11.35,
"grad_norm": 17.750568389892578,
"learning_rate": 4.3273905996758516e-06,
"loss": 1.6182,
"step": 56000
},
{
"epoch": 11.45,
"grad_norm": 21.50258445739746,
"learning_rate": 4.276742301458671e-06,
"loss": 1.6185,
"step": 56500
},
{
"epoch": 11.55,
"grad_norm": 12.455598831176758,
"learning_rate": 4.226094003241491e-06,
"loss": 1.5705,
"step": 57000
},
{
"epoch": 11.65,
"grad_norm": 21.358774185180664,
"learning_rate": 4.1754457050243115e-06,
"loss": 1.6046,
"step": 57500
},
{
"epoch": 11.75,
"grad_norm": 23.460098266601562,
"learning_rate": 4.124797406807132e-06,
"loss": 1.604,
"step": 58000
},
{
"epoch": 11.85,
"grad_norm": 24.99700355529785,
"learning_rate": 4.074149108589951e-06,
"loss": 1.6132,
"step": 58500
},
{
"epoch": 11.95,
"grad_norm": 20.90674591064453,
"learning_rate": 4.0235008103727714e-06,
"loss": 1.591,
"step": 59000
},
{
"epoch": 12.0,
"eval_loss": 1.6575616598129272,
"eval_runtime": 77.7204,
"eval_samples_per_second": 127.032,
"eval_steps_per_second": 15.89,
"step": 59232
},
{
"epoch": 12.05,
"grad_norm": 23.823829650878906,
"learning_rate": 3.972852512155592e-06,
"loss": 1.6094,
"step": 59500
},
{
"epoch": 12.16,
"grad_norm": 18.17566680908203,
"learning_rate": 3.922204213938412e-06,
"loss": 1.5851,
"step": 60000
},
{
"epoch": 12.26,
"grad_norm": 19.279071807861328,
"learning_rate": 3.871555915721232e-06,
"loss": 1.6302,
"step": 60500
},
{
"epoch": 12.36,
"grad_norm": 19.40210723876953,
"learning_rate": 3.8209076175040525e-06,
"loss": 1.5933,
"step": 61000
},
{
"epoch": 12.46,
"grad_norm": 20.0490665435791,
"learning_rate": 3.7702593192868723e-06,
"loss": 1.6217,
"step": 61500
},
{
"epoch": 12.56,
"grad_norm": 17.142908096313477,
"learning_rate": 3.719611021069692e-06,
"loss": 1.5791,
"step": 62000
},
{
"epoch": 12.66,
"grad_norm": 23.866859436035156,
"learning_rate": 3.668962722852513e-06,
"loss": 1.6127,
"step": 62500
},
{
"epoch": 12.76,
"grad_norm": 21.511199951171875,
"learning_rate": 3.6183144246353323e-06,
"loss": 1.5558,
"step": 63000
},
{
"epoch": 12.86,
"grad_norm": 20.330402374267578,
"learning_rate": 3.567666126418153e-06,
"loss": 1.5865,
"step": 63500
},
{
"epoch": 12.97,
"grad_norm": 17.255298614501953,
"learning_rate": 3.517017828200973e-06,
"loss": 1.5508,
"step": 64000
},
{
"epoch": 13.0,
"eval_loss": NaN,
"eval_runtime": 77.745,
"eval_samples_per_second": 126.992,
"eval_steps_per_second": 15.885,
"step": 64168
},
{
"epoch": 13.07,
"grad_norm": 15.916613578796387,
"learning_rate": 3.466369529983793e-06,
"loss": 1.5599,
"step": 64500
},
{
"epoch": 13.17,
"grad_norm": 12.248026847839355,
"learning_rate": 3.415721231766613e-06,
"loss": 1.5774,
"step": 65000
},
{
"epoch": 13.27,
"grad_norm": 20.037677764892578,
"learning_rate": 3.365072933549433e-06,
"loss": 1.5538,
"step": 65500
},
{
"epoch": 13.37,
"grad_norm": 20.426361083984375,
"learning_rate": 3.314424635332253e-06,
"loss": 1.5883,
"step": 66000
},
{
"epoch": 13.47,
"grad_norm": 32.5631103515625,
"learning_rate": 3.2637763371150733e-06,
"loss": 1.558,
"step": 66500
},
{
"epoch": 13.57,
"grad_norm": 25.788705825805664,
"learning_rate": 3.213128038897893e-06,
"loss": 1.571,
"step": 67000
},
{
"epoch": 13.68,
"grad_norm": 15.069945335388184,
"learning_rate": 3.1624797406807134e-06,
"loss": 1.5732,
"step": 67500
},
{
"epoch": 13.78,
"grad_norm": 13.779557228088379,
"learning_rate": 3.1118314424635336e-06,
"loss": 1.5425,
"step": 68000
},
{
"epoch": 13.88,
"grad_norm": 15.08449935913086,
"learning_rate": 3.0611831442463535e-06,
"loss": 1.5852,
"step": 68500
},
{
"epoch": 13.98,
"grad_norm": 32.31583023071289,
"learning_rate": 3.0105348460291737e-06,
"loss": 1.5929,
"step": 69000
},
{
"epoch": 14.0,
"eval_loss": 1.6150667667388916,
"eval_runtime": 77.7004,
"eval_samples_per_second": 127.065,
"eval_steps_per_second": 15.894,
"step": 69104
},
{
"epoch": 14.08,
"grad_norm": 25.524412155151367,
"learning_rate": 2.9598865478119936e-06,
"loss": 1.5315,
"step": 69500
},
{
"epoch": 14.18,
"grad_norm": 19.731473922729492,
"learning_rate": 2.9092382495948142e-06,
"loss": 1.536,
"step": 70000
},
{
"epoch": 14.28,
"grad_norm": 21.87810516357422,
"learning_rate": 2.8585899513776337e-06,
"loss": 1.512,
"step": 70500
},
{
"epoch": 14.38,
"grad_norm": 28.01495933532715,
"learning_rate": 2.8079416531604543e-06,
"loss": 1.5568,
"step": 71000
},
{
"epoch": 14.49,
"grad_norm": 17.939329147338867,
"learning_rate": 2.757293354943274e-06,
"loss": 1.5332,
"step": 71500
},
{
"epoch": 14.59,
"grad_norm": 20.130300521850586,
"learning_rate": 2.7066450567260944e-06,
"loss": 1.5775,
"step": 72000
},
{
"epoch": 14.69,
"grad_norm": 12.78222942352295,
"learning_rate": 2.6559967585089143e-06,
"loss": 1.5827,
"step": 72500
},
{
"epoch": 14.79,
"grad_norm": 15.022797584533691,
"learning_rate": 2.6053484602917346e-06,
"loss": 1.5574,
"step": 73000
},
{
"epoch": 14.89,
"grad_norm": 16.1165714263916,
"learning_rate": 2.5547001620745544e-06,
"loss": 1.5317,
"step": 73500
},
{
"epoch": 14.99,
"grad_norm": 19.746944427490234,
"learning_rate": 2.5040518638573747e-06,
"loss": 1.5504,
"step": 74000
},
{
"epoch": 15.0,
"eval_loss": NaN,
"eval_runtime": 77.7514,
"eval_samples_per_second": 126.982,
"eval_steps_per_second": 15.884,
"step": 74040
},
{
"epoch": 15.09,
"grad_norm": 25.471561431884766,
"learning_rate": 2.453403565640195e-06,
"loss": 1.5746,
"step": 74500
},
{
"epoch": 15.19,
"grad_norm": 15.089065551757812,
"learning_rate": 2.4027552674230148e-06,
"loss": 1.5614,
"step": 75000
},
{
"epoch": 15.3,
"grad_norm": 24.640731811523438,
"learning_rate": 2.352106969205835e-06,
"loss": 1.5522,
"step": 75500
},
{
"epoch": 15.4,
"grad_norm": 17.83973503112793,
"learning_rate": 2.301458670988655e-06,
"loss": 1.4853,
"step": 76000
},
{
"epoch": 15.5,
"grad_norm": 26.84282875061035,
"learning_rate": 2.250810372771475e-06,
"loss": 1.4772,
"step": 76500
},
{
"epoch": 15.6,
"grad_norm": 17.549413681030273,
"learning_rate": 2.200162074554295e-06,
"loss": 1.4863,
"step": 77000
},
{
"epoch": 15.7,
"grad_norm": 30.209253311157227,
"learning_rate": 2.1495137763371152e-06,
"loss": 1.5432,
"step": 77500
},
{
"epoch": 15.8,
"grad_norm": 16.511491775512695,
"learning_rate": 2.098865478119935e-06,
"loss": 1.5001,
"step": 78000
},
{
"epoch": 15.9,
"grad_norm": 22.751419067382812,
"learning_rate": 2.0482171799027553e-06,
"loss": 1.5492,
"step": 78500
},
{
"epoch": 16.0,
"eval_loss": 1.6249719858169556,
"eval_runtime": 77.7315,
"eval_samples_per_second": 127.014,
"eval_steps_per_second": 15.888,
"step": 78976
},
{
"epoch": 16.0,
"grad_norm": 20.166460037231445,
"learning_rate": 1.9975688816855756e-06,
"loss": 1.4967,
"step": 79000
},
{
"epoch": 16.11,
"grad_norm": 12.910844802856445,
"learning_rate": 1.946920583468396e-06,
"loss": 1.5459,
"step": 79500
},
{
"epoch": 16.21,
"grad_norm": 15.807537078857422,
"learning_rate": 1.8962722852512159e-06,
"loss": 1.4823,
"step": 80000
},
{
"epoch": 16.31,
"grad_norm": 15.588082313537598,
"learning_rate": 1.845623987034036e-06,
"loss": 1.5009,
"step": 80500
},
{
"epoch": 16.41,
"grad_norm": 19.26714324951172,
"learning_rate": 1.794975688816856e-06,
"loss": 1.5067,
"step": 81000
},
{
"epoch": 16.51,
"grad_norm": 13.45703411102295,
"learning_rate": 1.744327390599676e-06,
"loss": 1.5131,
"step": 81500
},
{
"epoch": 16.61,
"grad_norm": 19.007076263427734,
"learning_rate": 1.693679092382496e-06,
"loss": 1.5047,
"step": 82000
},
{
"epoch": 16.71,
"grad_norm": 19.805233001708984,
"learning_rate": 1.6430307941653161e-06,
"loss": 1.5133,
"step": 82500
},
{
"epoch": 16.82,
"grad_norm": 16.605981826782227,
"learning_rate": 1.5923824959481362e-06,
"loss": 1.5214,
"step": 83000
},
{
"epoch": 16.92,
"grad_norm": 18.25604820251465,
"learning_rate": 1.5417341977309562e-06,
"loss": 1.5488,
"step": 83500
},
{
"epoch": 17.0,
"eval_loss": 1.6310968399047852,
"eval_runtime": 77.7353,
"eval_samples_per_second": 127.008,
"eval_steps_per_second": 15.887,
"step": 83912
},
{
"epoch": 17.02,
"grad_norm": 21.933780670166016,
"learning_rate": 1.4910858995137765e-06,
"loss": 1.4996,
"step": 84000
},
{
"epoch": 17.12,
"grad_norm": 14.489141464233398,
"learning_rate": 1.4404376012965966e-06,
"loss": 1.5357,
"step": 84500
},
{
"epoch": 17.22,
"grad_norm": 24.40933609008789,
"learning_rate": 1.3897893030794166e-06,
"loss": 1.4992,
"step": 85000
},
{
"epoch": 17.32,
"grad_norm": 22.278676986694336,
"learning_rate": 1.3391410048622367e-06,
"loss": 1.508,
"step": 85500
},
{
"epoch": 17.42,
"grad_norm": 14.33293342590332,
"learning_rate": 1.2884927066450567e-06,
"loss": 1.5416,
"step": 86000
},
{
"epoch": 17.52,
"grad_norm": 9.797683715820312,
"learning_rate": 1.237844408427877e-06,
"loss": 1.5316,
"step": 86500
},
{
"epoch": 17.63,
"grad_norm": 22.992408752441406,
"learning_rate": 1.187196110210697e-06,
"loss": 1.5616,
"step": 87000
},
{
"epoch": 17.73,
"grad_norm": 22.246749877929688,
"learning_rate": 1.136547811993517e-06,
"loss": 1.5187,
"step": 87500
},
{
"epoch": 17.83,
"grad_norm": 15.32419204711914,
"learning_rate": 1.0858995137763371e-06,
"loss": 1.519,
"step": 88000
},
{
"epoch": 17.93,
"grad_norm": 15.239709854125977,
"learning_rate": 1.0352512155591574e-06,
"loss": 1.5152,
"step": 88500
},
{
"epoch": 18.0,
"eval_loss": NaN,
"eval_runtime": 77.7064,
"eval_samples_per_second": 127.055,
"eval_steps_per_second": 15.893,
"step": 88848
},
{
"epoch": 18.03,
"grad_norm": 17.708662033081055,
"learning_rate": 9.846029173419774e-07,
"loss": 1.5052,
"step": 89000
},
{
"epoch": 18.13,
"grad_norm": 21.949464797973633,
"learning_rate": 9.339546191247975e-07,
"loss": 1.4853,
"step": 89500
},
{
"epoch": 18.23,
"grad_norm": 22.907304763793945,
"learning_rate": 8.833063209076175e-07,
"loss": 1.5041,
"step": 90000
},
{
"epoch": 18.33,
"grad_norm": 16.836776733398438,
"learning_rate": 8.326580226904377e-07,
"loss": 1.5395,
"step": 90500
},
{
"epoch": 18.44,
"grad_norm": 24.573055267333984,
"learning_rate": 7.820097244732577e-07,
"loss": 1.5261,
"step": 91000
},
{
"epoch": 18.54,
"grad_norm": 16.58083152770996,
"learning_rate": 7.313614262560778e-07,
"loss": 1.501,
"step": 91500
},
{
"epoch": 18.64,
"grad_norm": 13.214485168457031,
"learning_rate": 6.807131280388978e-07,
"loss": 1.4723,
"step": 92000
},
{
"epoch": 18.74,
"grad_norm": 20.009634017944336,
"learning_rate": 6.300648298217181e-07,
"loss": 1.5101,
"step": 92500
},
{
"epoch": 18.84,
"grad_norm": 17.555011749267578,
"learning_rate": 5.794165316045381e-07,
"loss": 1.495,
"step": 93000
},
{
"epoch": 18.94,
"grad_norm": 23.471906661987305,
"learning_rate": 5.287682333873582e-07,
"loss": 1.5066,
"step": 93500
},
{
"epoch": 19.0,
"eval_loss": 1.5963941812515259,
"eval_runtime": 77.7338,
"eval_samples_per_second": 127.01,
"eval_steps_per_second": 15.888,
"step": 93784
},
{
"epoch": 19.04,
"grad_norm": 23.58425521850586,
"learning_rate": 4.781199351701784e-07,
"loss": 1.4951,
"step": 94000
},
{
"epoch": 19.15,
"grad_norm": 33.22106170654297,
"learning_rate": 4.274716369529984e-07,
"loss": 1.46,
"step": 94500
},
{
"epoch": 19.25,
"grad_norm": 19.294275283813477,
"learning_rate": 3.768233387358185e-07,
"loss": 1.5119,
"step": 95000
},
{
"epoch": 19.35,
"grad_norm": 16.35405158996582,
"learning_rate": 3.261750405186386e-07,
"loss": 1.495,
"step": 95500
},
{
"epoch": 19.45,
"grad_norm": 12.868066787719727,
"learning_rate": 2.755267423014587e-07,
"loss": 1.507,
"step": 96000
},
{
"epoch": 19.55,
"grad_norm": 18.227985382080078,
"learning_rate": 2.248784440842788e-07,
"loss": 1.5176,
"step": 96500
},
{
"epoch": 19.65,
"grad_norm": 23.572263717651367,
"learning_rate": 1.7423014586709888e-07,
"loss": 1.5273,
"step": 97000
},
{
"epoch": 19.75,
"grad_norm": 13.487857818603516,
"learning_rate": 1.2358184764991896e-07,
"loss": 1.514,
"step": 97500
},
{
"epoch": 19.85,
"grad_norm": 13.97512435913086,
"learning_rate": 7.293354943273906e-08,
"loss": 1.5456,
"step": 98000
},
{
"epoch": 19.96,
"grad_norm": 21.878570556640625,
"learning_rate": 2.228525121555916e-08,
"loss": 1.4764,
"step": 98500
},
{
"epoch": 20.0,
"eval_loss": 1.5965242385864258,
"eval_runtime": 77.7339,
"eval_samples_per_second": 127.01,
"eval_steps_per_second": 15.888,
"step": 98720
},
{
"epoch": 20.0,
"step": 98720,
"total_flos": 2.0786493066313728e+17,
"train_loss": 1.721275230976519,
"train_runtime": 20874.6418,
"train_samples_per_second": 37.833,
"train_steps_per_second": 4.729
}
],
"logging_steps": 500,
"max_steps": 98720,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 500,
"total_flos": 2.0786493066313728e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}