quip-4k-gemma / checkpoint-792 /trainer_state.json
Anis1123's picture
Initial model upload
c8d7b76 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.971724787935909,
"eval_steps": 500,
"global_step": 792,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03770028275212064,
"grad_norm": 3.2918148040771484,
"learning_rate": 4.9995083170283816e-05,
"loss": 4.3742,
"num_input_tokens_seen": 50544,
"step": 5
},
{
"epoch": 0.07540056550424128,
"grad_norm": 3.0331127643585205,
"learning_rate": 4.998033461515242e-05,
"loss": 3.8434,
"num_input_tokens_seen": 104080,
"step": 10
},
{
"epoch": 0.11310084825636192,
"grad_norm": 3.322950601577759,
"learning_rate": 4.9955760135896534e-05,
"loss": 3.6027,
"num_input_tokens_seen": 155776,
"step": 15
},
{
"epoch": 0.15080113100848255,
"grad_norm": 2.7705531120300293,
"learning_rate": 4.992136939879856e-05,
"loss": 3.4102,
"num_input_tokens_seen": 203184,
"step": 20
},
{
"epoch": 0.1885014137606032,
"grad_norm": 3.4572091102600098,
"learning_rate": 4.9877175931330346e-05,
"loss": 3.153,
"num_input_tokens_seen": 255744,
"step": 25
},
{
"epoch": 0.22620169651272384,
"grad_norm": 2.0412893295288086,
"learning_rate": 4.982319711683221e-05,
"loss": 3.0949,
"num_input_tokens_seen": 307808,
"step": 30
},
{
"epoch": 0.2639019792648445,
"grad_norm": 2.0765998363494873,
"learning_rate": 4.975945418767529e-05,
"loss": 3.0392,
"num_input_tokens_seen": 358192,
"step": 35
},
{
"epoch": 0.3016022620169651,
"grad_norm": 2.2433698177337646,
"learning_rate": 4.968597221690986e-05,
"loss": 3.0685,
"num_input_tokens_seen": 408832,
"step": 40
},
{
"epoch": 0.3393025447690858,
"grad_norm": 2.7639973163604736,
"learning_rate": 4.96027801084029e-05,
"loss": 3.0385,
"num_input_tokens_seen": 458368,
"step": 45
},
{
"epoch": 0.3770028275212064,
"grad_norm": 2.0680019855499268,
"learning_rate": 4.950991058546893e-05,
"loss": 3.1079,
"num_input_tokens_seen": 512160,
"step": 50
},
{
"epoch": 0.41470311027332707,
"grad_norm": 2.181554079055786,
"learning_rate": 4.940740017799833e-05,
"loss": 2.9883,
"num_input_tokens_seen": 562928,
"step": 55
},
{
"epoch": 0.4524033930254477,
"grad_norm": 2.0724411010742188,
"learning_rate": 4.929528920808854e-05,
"loss": 2.969,
"num_input_tokens_seen": 613072,
"step": 60
},
{
"epoch": 0.49010367577756836,
"grad_norm": 2.5529158115386963,
"learning_rate": 4.917362177418342e-05,
"loss": 3.0019,
"num_input_tokens_seen": 664640,
"step": 65
},
{
"epoch": 0.527803958529689,
"grad_norm": 2.3983142375946045,
"learning_rate": 4.904244573372733e-05,
"loss": 2.9984,
"num_input_tokens_seen": 716800,
"step": 70
},
{
"epoch": 0.5655042412818096,
"grad_norm": 2.206209182739258,
"learning_rate": 4.8901812684340564e-05,
"loss": 2.9848,
"num_input_tokens_seen": 766528,
"step": 75
},
{
"epoch": 0.6032045240339302,
"grad_norm": 2.22698974609375,
"learning_rate": 4.8751777943523634e-05,
"loss": 2.9169,
"num_input_tokens_seen": 817376,
"step": 80
},
{
"epoch": 0.6409048067860509,
"grad_norm": 2.4569709300994873,
"learning_rate": 4.8592400526898314e-05,
"loss": 2.8844,
"num_input_tokens_seen": 864688,
"step": 85
},
{
"epoch": 0.6786050895381716,
"grad_norm": 2.195237636566162,
"learning_rate": 4.842374312499405e-05,
"loss": 2.9422,
"num_input_tokens_seen": 913216,
"step": 90
},
{
"epoch": 0.7163053722902922,
"grad_norm": 2.54585599899292,
"learning_rate": 4.824587207858888e-05,
"loss": 2.8431,
"num_input_tokens_seen": 964144,
"step": 95
},
{
"epoch": 0.7540056550424128,
"grad_norm": 2.3841867446899414,
"learning_rate": 4.805885735261454e-05,
"loss": 2.8377,
"num_input_tokens_seen": 1018768,
"step": 100
},
{
"epoch": 0.7917059377945335,
"grad_norm": 2.359386920928955,
"learning_rate": 4.786277250863599e-05,
"loss": 2.7843,
"num_input_tokens_seen": 1070592,
"step": 105
},
{
"epoch": 0.8294062205466541,
"grad_norm": 2.0952038764953613,
"learning_rate": 4.765769467591625e-05,
"loss": 2.8218,
"num_input_tokens_seen": 1125392,
"step": 110
},
{
"epoch": 0.8671065032987747,
"grad_norm": 2.222754955291748,
"learning_rate": 4.744370452107789e-05,
"loss": 2.877,
"num_input_tokens_seen": 1176256,
"step": 115
},
{
"epoch": 0.9048067860508954,
"grad_norm": 2.3233537673950195,
"learning_rate": 4.722088621637309e-05,
"loss": 2.9028,
"num_input_tokens_seen": 1225376,
"step": 120
},
{
"epoch": 0.942507068803016,
"grad_norm": 2.400575876235962,
"learning_rate": 4.698932740657479e-05,
"loss": 2.762,
"num_input_tokens_seen": 1277024,
"step": 125
},
{
"epoch": 0.9802073515551367,
"grad_norm": 2.592116117477417,
"learning_rate": 4.6749119174501975e-05,
"loss": 2.9164,
"num_input_tokens_seen": 1322912,
"step": 130
},
{
"epoch": 1.0179076343072573,
"grad_norm": 2.563326120376587,
"learning_rate": 4.6500356005192514e-05,
"loss": 2.736,
"num_input_tokens_seen": 1368624,
"step": 135
},
{
"epoch": 1.055607917059378,
"grad_norm": 2.3927371501922607,
"learning_rate": 4.6243135748737864e-05,
"loss": 2.7414,
"num_input_tokens_seen": 1417664,
"step": 140
},
{
"epoch": 1.0933081998114986,
"grad_norm": 2.597975492477417,
"learning_rate": 4.597755958179406e-05,
"loss": 2.6961,
"num_input_tokens_seen": 1469120,
"step": 145
},
{
"epoch": 1.1310084825636193,
"grad_norm": 2.932581663131714,
"learning_rate": 4.570373196778427e-05,
"loss": 2.6134,
"num_input_tokens_seen": 1521632,
"step": 150
},
{
"epoch": 1.1687087653157398,
"grad_norm": 3.28389310836792,
"learning_rate": 4.5421760615808474e-05,
"loss": 2.7548,
"num_input_tokens_seen": 1565296,
"step": 155
},
{
"epoch": 1.2064090480678604,
"grad_norm": 2.927279233932495,
"learning_rate": 4.513175643827647e-05,
"loss": 2.702,
"num_input_tokens_seen": 1617088,
"step": 160
},
{
"epoch": 1.244109330819981,
"grad_norm": 2.840573310852051,
"learning_rate": 4.4833833507280884e-05,
"loss": 2.6893,
"num_input_tokens_seen": 1663584,
"step": 165
},
{
"epoch": 1.2818096135721018,
"grad_norm": 2.6322081089019775,
"learning_rate": 4.4528109009727336e-05,
"loss": 2.5671,
"num_input_tokens_seen": 1713744,
"step": 170
},
{
"epoch": 1.3195098963242224,
"grad_norm": 3.1280879974365234,
"learning_rate": 4.42147032012394e-05,
"loss": 2.7682,
"num_input_tokens_seen": 1762768,
"step": 175
},
{
"epoch": 1.3572101790763431,
"grad_norm": 3.1128265857696533,
"learning_rate": 4.389373935885646e-05,
"loss": 2.7062,
"num_input_tokens_seen": 1815808,
"step": 180
},
{
"epoch": 1.3949104618284638,
"grad_norm": 2.952150344848633,
"learning_rate": 4.356534373254316e-05,
"loss": 2.7092,
"num_input_tokens_seen": 1871040,
"step": 185
},
{
"epoch": 1.4326107445805842,
"grad_norm": 3.0146102905273438,
"learning_rate": 4.322964549552943e-05,
"loss": 2.6518,
"num_input_tokens_seen": 1924048,
"step": 190
},
{
"epoch": 1.4703110273327051,
"grad_norm": 2.999300956726074,
"learning_rate": 4.288677669350066e-05,
"loss": 2.6592,
"num_input_tokens_seen": 1972720,
"step": 195
},
{
"epoch": 1.5080113100848256,
"grad_norm": 3.575253963470459,
"learning_rate": 4.2536872192658036e-05,
"loss": 2.716,
"num_input_tokens_seen": 2022112,
"step": 200
},
{
"epoch": 1.5457115928369463,
"grad_norm": 3.1339428424835205,
"learning_rate": 4.218006962666934e-05,
"loss": 2.6746,
"num_input_tokens_seen": 2072000,
"step": 205
},
{
"epoch": 1.583411875589067,
"grad_norm": 3.130823850631714,
"learning_rate": 4.181650934253132e-05,
"loss": 2.6306,
"num_input_tokens_seen": 2125632,
"step": 210
},
{
"epoch": 1.6211121583411876,
"grad_norm": 3.1828997135162354,
"learning_rate": 4.144633434536467e-05,
"loss": 2.5678,
"num_input_tokens_seen": 2174464,
"step": 215
},
{
"epoch": 1.6588124410933083,
"grad_norm": 3.3879778385162354,
"learning_rate": 4.1069690242163484e-05,
"loss": 2.7334,
"num_input_tokens_seen": 2223408,
"step": 220
},
{
"epoch": 1.6965127238454287,
"grad_norm": 3.5921592712402344,
"learning_rate": 4.06867251845213e-05,
"loss": 2.6658,
"num_input_tokens_seen": 2281296,
"step": 225
},
{
"epoch": 1.7342130065975496,
"grad_norm": 3.4126648902893066,
"learning_rate": 4.0297589810356165e-05,
"loss": 2.6567,
"num_input_tokens_seen": 2334176,
"step": 230
},
{
"epoch": 1.77191328934967,
"grad_norm": 3.504786252975464,
"learning_rate": 3.9902437184657784e-05,
"loss": 2.6131,
"num_input_tokens_seen": 2389296,
"step": 235
},
{
"epoch": 1.8096135721017907,
"grad_norm": 3.459768056869507,
"learning_rate": 3.9501422739279956e-05,
"loss": 2.662,
"num_input_tokens_seen": 2442800,
"step": 240
},
{
"epoch": 1.8473138548539114,
"grad_norm": 2.944490909576416,
"learning_rate": 3.909470421180201e-05,
"loss": 2.6339,
"num_input_tokens_seen": 2494912,
"step": 245
},
{
"epoch": 1.885014137606032,
"grad_norm": 3.4897429943084717,
"learning_rate": 3.8682441583483314e-05,
"loss": 2.6338,
"num_input_tokens_seen": 2544624,
"step": 250
},
{
"epoch": 1.9227144203581528,
"grad_norm": 3.15039324760437,
"learning_rate": 3.8264797016335205e-05,
"loss": 2.6844,
"num_input_tokens_seen": 2597792,
"step": 255
},
{
"epoch": 1.9604147031102732,
"grad_norm": 3.5575578212738037,
"learning_rate": 3.7841934789335164e-05,
"loss": 2.5939,
"num_input_tokens_seen": 2646544,
"step": 260
},
{
"epoch": 1.998114985862394,
"grad_norm": 3.97521710395813,
"learning_rate": 3.741402123380828e-05,
"loss": 2.7393,
"num_input_tokens_seen": 2700224,
"step": 265
},
{
"epoch": 2.0358152686145146,
"grad_norm": 3.1736276149749756,
"learning_rate": 3.6981224668001424e-05,
"loss": 2.4495,
"num_input_tokens_seen": 2749104,
"step": 270
},
{
"epoch": 2.0735155513666355,
"grad_norm": 3.633033275604248,
"learning_rate": 3.654371533087586e-05,
"loss": 2.3823,
"num_input_tokens_seen": 2802992,
"step": 275
},
{
"epoch": 2.111215834118756,
"grad_norm": 3.4200243949890137,
"learning_rate": 3.610166531514436e-05,
"loss": 2.474,
"num_input_tokens_seen": 2844512,
"step": 280
},
{
"epoch": 2.1489161168708764,
"grad_norm": 4.055693626403809,
"learning_rate": 3.565524849957921e-05,
"loss": 2.4776,
"num_input_tokens_seen": 2894400,
"step": 285
},
{
"epoch": 2.1866163996229973,
"grad_norm": 4.254647254943848,
"learning_rate": 3.520464048061758e-05,
"loss": 2.4709,
"num_input_tokens_seen": 2944688,
"step": 290
},
{
"epoch": 2.2243166823751177,
"grad_norm": 4.337226390838623,
"learning_rate": 3.47500185032913e-05,
"loss": 2.5009,
"num_input_tokens_seen": 2994672,
"step": 295
},
{
"epoch": 2.2620169651272386,
"grad_norm": 4.424121379852295,
"learning_rate": 3.4291561391508185e-05,
"loss": 2.4524,
"num_input_tokens_seen": 3045344,
"step": 300
},
{
"epoch": 2.299717247879359,
"grad_norm": 4.178481101989746,
"learning_rate": 3.3829449477712324e-05,
"loss": 2.4083,
"num_input_tokens_seen": 3100688,
"step": 305
},
{
"epoch": 2.3374175306314795,
"grad_norm": 4.315364837646484,
"learning_rate": 3.336386453195088e-05,
"loss": 2.4713,
"num_input_tokens_seen": 3154896,
"step": 310
},
{
"epoch": 2.3751178133836004,
"grad_norm": 4.445486545562744,
"learning_rate": 3.2894989690375626e-05,
"loss": 2.3986,
"num_input_tokens_seen": 3209120,
"step": 315
},
{
"epoch": 2.412818096135721,
"grad_norm": 4.80281400680542,
"learning_rate": 3.2423009383206876e-05,
"loss": 2.4258,
"num_input_tokens_seen": 3257984,
"step": 320
},
{
"epoch": 2.4505183788878417,
"grad_norm": 4.705103397369385,
"learning_rate": 3.194810926218861e-05,
"loss": 2.4126,
"num_input_tokens_seen": 3309424,
"step": 325
},
{
"epoch": 2.488218661639962,
"grad_norm": 4.281659126281738,
"learning_rate": 3.147047612756302e-05,
"loss": 2.3639,
"num_input_tokens_seen": 3359152,
"step": 330
},
{
"epoch": 2.525918944392083,
"grad_norm": 4.493980407714844,
"learning_rate": 3.099029785459328e-05,
"loss": 2.5176,
"num_input_tokens_seen": 3406784,
"step": 335
},
{
"epoch": 2.5636192271442035,
"grad_norm": 4.339905261993408,
"learning_rate": 3.0507763319663517e-05,
"loss": 2.4779,
"num_input_tokens_seen": 3459328,
"step": 340
},
{
"epoch": 2.6013195098963244,
"grad_norm": 4.67468786239624,
"learning_rate": 3.002306232598497e-05,
"loss": 2.3833,
"num_input_tokens_seen": 3511056,
"step": 345
},
{
"epoch": 2.639019792648445,
"grad_norm": 4.905932426452637,
"learning_rate": 2.9536385528937567e-05,
"loss": 2.3794,
"num_input_tokens_seen": 3562352,
"step": 350
},
{
"epoch": 2.6767200754005653,
"grad_norm": 4.684591770172119,
"learning_rate": 2.9047924361076345e-05,
"loss": 2.4883,
"num_input_tokens_seen": 3615664,
"step": 355
},
{
"epoch": 2.7144203581526862,
"grad_norm": 4.377009868621826,
"learning_rate": 2.8557870956832132e-05,
"loss": 2.3423,
"num_input_tokens_seen": 3661424,
"step": 360
},
{
"epoch": 2.7521206409048067,
"grad_norm": 5.146539688110352,
"learning_rate": 2.8066418076936167e-05,
"loss": 2.4092,
"num_input_tokens_seen": 3710592,
"step": 365
},
{
"epoch": 2.7898209236569276,
"grad_norm": 5.5937910079956055,
"learning_rate": 2.7573759032598366e-05,
"loss": 2.4818,
"num_input_tokens_seen": 3765728,
"step": 370
},
{
"epoch": 2.827521206409048,
"grad_norm": 4.4958696365356445,
"learning_rate": 2.7080087609469062e-05,
"loss": 2.4587,
"num_input_tokens_seen": 3815360,
"step": 375
},
{
"epoch": 2.8652214891611685,
"grad_norm": 4.503344535827637,
"learning_rate": 2.6585597991414114e-05,
"loss": 2.3462,
"num_input_tokens_seen": 3868096,
"step": 380
},
{
"epoch": 2.9029217719132894,
"grad_norm": 4.773792743682861,
"learning_rate": 2.6090484684133404e-05,
"loss": 2.3713,
"num_input_tokens_seen": 3913696,
"step": 385
},
{
"epoch": 2.9406220546654103,
"grad_norm": 4.8644537925720215,
"learning_rate": 2.5594942438652688e-05,
"loss": 2.4618,
"num_input_tokens_seen": 3971840,
"step": 390
},
{
"epoch": 2.9783223374175307,
"grad_norm": 5.539215087890625,
"learning_rate": 2.509916617471903e-05,
"loss": 2.5454,
"num_input_tokens_seen": 4025040,
"step": 395
},
{
"epoch": 3.016022620169651,
"grad_norm": 4.475778579711914,
"learning_rate": 2.46033509041298e-05,
"loss": 2.3007,
"num_input_tokens_seen": 4075488,
"step": 400
},
{
"epoch": 3.053722902921772,
"grad_norm": 4.82028341293335,
"learning_rate": 2.410769165402549e-05,
"loss": 2.274,
"num_input_tokens_seen": 4130496,
"step": 405
},
{
"epoch": 3.0914231856738925,
"grad_norm": 5.513036251068115,
"learning_rate": 2.3612383390176503e-05,
"loss": 2.2457,
"num_input_tokens_seen": 4181504,
"step": 410
},
{
"epoch": 3.1291234684260134,
"grad_norm": 5.219841957092285,
"learning_rate": 2.3117620940294048e-05,
"loss": 2.2595,
"num_input_tokens_seen": 4236816,
"step": 415
},
{
"epoch": 3.166823751178134,
"grad_norm": 5.527017593383789,
"learning_rate": 2.2623598917395438e-05,
"loss": 2.2326,
"num_input_tokens_seen": 4289488,
"step": 420
},
{
"epoch": 3.2045240339302543,
"grad_norm": 5.297417163848877,
"learning_rate": 2.213051164325366e-05,
"loss": 2.2595,
"num_input_tokens_seen": 4334704,
"step": 425
},
{
"epoch": 3.242224316682375,
"grad_norm": 5.43293571472168,
"learning_rate": 2.1638553071961708e-05,
"loss": 2.2084,
"num_input_tokens_seen": 4377360,
"step": 430
},
{
"epoch": 3.2799245994344957,
"grad_norm": 5.788747310638428,
"learning_rate": 2.1147916713641367e-05,
"loss": 2.21,
"num_input_tokens_seen": 4428544,
"step": 435
},
{
"epoch": 3.3176248821866166,
"grad_norm": 6.185176372528076,
"learning_rate": 2.0658795558326743e-05,
"loss": 2.2449,
"num_input_tokens_seen": 4477664,
"step": 440
},
{
"epoch": 3.355325164938737,
"grad_norm": 5.564029693603516,
"learning_rate": 2.017138200005236e-05,
"loss": 2.2457,
"num_input_tokens_seen": 4533792,
"step": 445
},
{
"epoch": 3.3930254476908575,
"grad_norm": 5.952132225036621,
"learning_rate": 1.9685867761175584e-05,
"loss": 2.3035,
"num_input_tokens_seen": 4584800,
"step": 450
},
{
"epoch": 3.4307257304429783,
"grad_norm": 5.484558582305908,
"learning_rate": 1.9202443816963425e-05,
"loss": 2.3204,
"num_input_tokens_seen": 4634976,
"step": 455
},
{
"epoch": 3.468426013195099,
"grad_norm": 6.121850490570068,
"learning_rate": 1.872130032047302e-05,
"loss": 2.2055,
"num_input_tokens_seen": 4691344,
"step": 460
},
{
"epoch": 3.5061262959472197,
"grad_norm": 6.345475673675537,
"learning_rate": 1.824262652775568e-05,
"loss": 2.3327,
"num_input_tokens_seen": 4745536,
"step": 465
},
{
"epoch": 3.54382657869934,
"grad_norm": 6.4370574951171875,
"learning_rate": 1.7766610723413684e-05,
"loss": 2.2214,
"num_input_tokens_seen": 4798128,
"step": 470
},
{
"epoch": 3.581526861451461,
"grad_norm": 6.708219528198242,
"learning_rate": 1.7293440146539196e-05,
"loss": 2.3097,
"num_input_tokens_seen": 4847632,
"step": 475
},
{
"epoch": 3.6192271442035815,
"grad_norm": 5.383622169494629,
"learning_rate": 1.682330091706446e-05,
"loss": 2.2966,
"num_input_tokens_seen": 4905648,
"step": 480
},
{
"epoch": 3.6569274269557024,
"grad_norm": 6.210061550140381,
"learning_rate": 1.6356377962552238e-05,
"loss": 2.2006,
"num_input_tokens_seen": 4955600,
"step": 485
},
{
"epoch": 3.694627709707823,
"grad_norm": 6.637734889984131,
"learning_rate": 1.589285494545514e-05,
"loss": 2.2463,
"num_input_tokens_seen": 5007424,
"step": 490
},
{
"epoch": 3.7323279924599433,
"grad_norm": 5.893795967102051,
"learning_rate": 1.5432914190872757e-05,
"loss": 2.2045,
"num_input_tokens_seen": 5058848,
"step": 495
},
{
"epoch": 3.770028275212064,
"grad_norm": 5.891578674316406,
"learning_rate": 1.4976736614834664e-05,
"loss": 2.135,
"num_input_tokens_seen": 5109904,
"step": 500
},
{
"epoch": 3.8077285579641846,
"grad_norm": 6.888275623321533,
"learning_rate": 1.4524501653137787e-05,
"loss": 2.3023,
"num_input_tokens_seen": 5156496,
"step": 505
},
{
"epoch": 3.8454288407163055,
"grad_norm": 5.987553119659424,
"learning_rate": 1.4076387190766017e-05,
"loss": 2.2636,
"num_input_tokens_seen": 5207824,
"step": 510
},
{
"epoch": 3.883129123468426,
"grad_norm": 6.791808128356934,
"learning_rate": 1.363256949191972e-05,
"loss": 2.1942,
"num_input_tokens_seen": 5256608,
"step": 515
},
{
"epoch": 3.9208294062205464,
"grad_norm": 6.749192714691162,
"learning_rate": 1.3193223130682936e-05,
"loss": 2.2731,
"num_input_tokens_seen": 5305584,
"step": 520
},
{
"epoch": 3.9585296889726673,
"grad_norm": 5.9771647453308105,
"learning_rate": 1.2758520922355226e-05,
"loss": 2.1669,
"num_input_tokens_seen": 5349712,
"step": 525
},
{
"epoch": 3.9962299717247878,
"grad_norm": 6.35679817199707,
"learning_rate": 1.2328633855475429e-05,
"loss": 2.2146,
"num_input_tokens_seen": 5400336,
"step": 530
},
{
"epoch": 4.033930254476909,
"grad_norm": 6.124676704406738,
"learning_rate": 1.1903731024563966e-05,
"loss": 2.0324,
"num_input_tokens_seen": 5451776,
"step": 535
},
{
"epoch": 4.071630537229029,
"grad_norm": 6.622769355773926,
"learning_rate": 1.148397956361007e-05,
"loss": 2.0762,
"num_input_tokens_seen": 5507520,
"step": 540
},
{
"epoch": 4.10933081998115,
"grad_norm": 7.076972961425781,
"learning_rate": 1.106954458033026e-05,
"loss": 2.0949,
"num_input_tokens_seen": 5562688,
"step": 545
},
{
"epoch": 4.147031102733271,
"grad_norm": 6.695667266845703,
"learning_rate": 1.0660589091223855e-05,
"loss": 2.1853,
"num_input_tokens_seen": 5610800,
"step": 550
},
{
"epoch": 4.184731385485391,
"grad_norm": 7.716992378234863,
"learning_rate": 1.025727395745095e-05,
"loss": 2.1706,
"num_input_tokens_seen": 5657616,
"step": 555
},
{
"epoch": 4.222431668237512,
"grad_norm": 6.8223419189453125,
"learning_rate": 9.859757821558337e-06,
"loss": 2.1199,
"num_input_tokens_seen": 5710960,
"step": 560
},
{
"epoch": 4.260131950989632,
"grad_norm": 6.87358283996582,
"learning_rate": 9.468197045077976e-06,
"loss": 2.0353,
"num_input_tokens_seen": 5756608,
"step": 565
},
{
"epoch": 4.297832233741753,
"grad_norm": 7.660863399505615,
"learning_rate": 9.082745647022797e-06,
"loss": 2.1101,
"num_input_tokens_seen": 5812304,
"step": 570
},
{
"epoch": 4.335532516493874,
"grad_norm": 7.863148212432861,
"learning_rate": 8.703555243303835e-06,
"loss": 2.1076,
"num_input_tokens_seen": 5859808,
"step": 575
},
{
"epoch": 4.3732327992459945,
"grad_norm": 7.298150539398193,
"learning_rate": 8.330774987092712e-06,
"loss": 2.0305,
"num_input_tokens_seen": 5908784,
"step": 580
},
{
"epoch": 4.410933081998115,
"grad_norm": 6.835300922393799,
"learning_rate": 7.96455151015272e-06,
"loss": 2.1132,
"num_input_tokens_seen": 5958672,
"step": 585
},
{
"epoch": 4.448633364750235,
"grad_norm": 6.710065841674805,
"learning_rate": 7.605028865161809e-06,
"loss": 2.1527,
"num_input_tokens_seen": 6010720,
"step": 590
},
{
"epoch": 4.486333647502356,
"grad_norm": 6.827284812927246,
"learning_rate": 7.25234846904993e-06,
"loss": 2.0948,
"num_input_tokens_seen": 6061440,
"step": 595
},
{
"epoch": 4.524033930254477,
"grad_norm": 7.066997528076172,
"learning_rate": 6.906649047373246e-06,
"loss": 2.1222,
"num_input_tokens_seen": 6115216,
"step": 600
},
{
"epoch": 4.561734213006598,
"grad_norm": 7.966955184936523,
"learning_rate": 6.568066579746901e-06,
"loss": 2.0719,
"num_input_tokens_seen": 6160944,
"step": 605
},
{
"epoch": 4.599434495758718,
"grad_norm": 7.257175922393799,
"learning_rate": 6.2367342463579475e-06,
"loss": 2.1399,
"num_input_tokens_seen": 6209008,
"step": 610
},
{
"epoch": 4.6371347785108386,
"grad_norm": 7.445122241973877,
"learning_rate": 5.912782375579412e-06,
"loss": 2.0671,
"num_input_tokens_seen": 6258176,
"step": 615
},
{
"epoch": 4.674835061262959,
"grad_norm": 7.934208869934082,
"learning_rate": 5.596338392706077e-06,
"loss": 2.152,
"num_input_tokens_seen": 6308496,
"step": 620
},
{
"epoch": 4.71253534401508,
"grad_norm": 6.131651878356934,
"learning_rate": 5.2875267698322325e-06,
"loss": 2.0834,
"num_input_tokens_seen": 6358896,
"step": 625
},
{
"epoch": 4.750235626767201,
"grad_norm": 6.925292015075684,
"learning_rate": 4.986468976890993e-06,
"loss": 2.1373,
"num_input_tokens_seen": 6410896,
"step": 630
},
{
"epoch": 4.787935909519321,
"grad_norm": 6.970002174377441,
"learning_rate": 4.693283433874565e-06,
"loss": 2.1276,
"num_input_tokens_seen": 6459120,
"step": 635
},
{
"epoch": 4.825636192271442,
"grad_norm": 7.309933662414551,
"learning_rate": 4.408085464254183e-06,
"loss": 2.1481,
"num_input_tokens_seen": 6506048,
"step": 640
},
{
"epoch": 4.863336475023563,
"grad_norm": 6.540215492248535,
"learning_rate": 4.130987249617993e-06,
"loss": 2.0872,
"num_input_tokens_seen": 6560448,
"step": 645
},
{
"epoch": 4.9010367577756835,
"grad_norm": 6.6013360023498535,
"learning_rate": 3.8620977855448935e-06,
"loss": 2.1141,
"num_input_tokens_seen": 6616704,
"step": 650
},
{
"epoch": 4.938737040527804,
"grad_norm": 7.337521553039551,
"learning_rate": 3.601522838731461e-06,
"loss": 2.0778,
"num_input_tokens_seen": 6670192,
"step": 655
},
{
"epoch": 4.976437323279924,
"grad_norm": 7.133378505706787,
"learning_rate": 3.3493649053890326e-06,
"loss": 2.1827,
"num_input_tokens_seen": 6721600,
"step": 660
},
{
"epoch": 5.014137606032045,
"grad_norm": 7.371194839477539,
"learning_rate": 3.1057231709272077e-06,
"loss": 2.0695,
"num_input_tokens_seen": 6773936,
"step": 665
},
{
"epoch": 5.051837888784166,
"grad_norm": 7.494382858276367,
"learning_rate": 2.8706934709395892e-06,
"loss": 2.0689,
"num_input_tokens_seen": 6823488,
"step": 670
},
{
"epoch": 5.089538171536287,
"grad_norm": 7.376400947570801,
"learning_rate": 2.6443682535072177e-06,
"loss": 1.9623,
"num_input_tokens_seen": 6870576,
"step": 675
},
{
"epoch": 5.127238454288407,
"grad_norm": 7.1367316246032715,
"learning_rate": 2.4268365428344736e-06,
"loss": 2.0186,
"num_input_tokens_seen": 6923488,
"step": 680
},
{
"epoch": 5.1649387370405275,
"grad_norm": 9.0476655960083,
"learning_rate": 2.21818390423168e-06,
"loss": 1.9324,
"num_input_tokens_seen": 6968384,
"step": 685
},
{
"epoch": 5.202639019792649,
"grad_norm": 6.944507122039795,
"learning_rate": 2.0184924104583613e-06,
"loss": 1.9769,
"num_input_tokens_seen": 7012784,
"step": 690
},
{
"epoch": 5.240339302544769,
"grad_norm": 7.558785438537598,
"learning_rate": 1.8278406094401623e-06,
"loss": 1.9815,
"num_input_tokens_seen": 7058992,
"step": 695
},
{
"epoch": 5.27803958529689,
"grad_norm": 7.789961338043213,
"learning_rate": 1.6463034933723337e-06,
"loss": 2.0789,
"num_input_tokens_seen": 7107360,
"step": 700
},
{
"epoch": 5.31573986804901,
"grad_norm": 7.385551929473877,
"learning_rate": 1.4739524692218314e-06,
"loss": 2.0299,
"num_input_tokens_seen": 7154560,
"step": 705
},
{
"epoch": 5.353440150801131,
"grad_norm": 8.215983390808105,
"learning_rate": 1.3108553306396265e-06,
"loss": 2.0888,
"num_input_tokens_seen": 7207760,
"step": 710
},
{
"epoch": 5.391140433553252,
"grad_norm": 7.679669380187988,
"learning_rate": 1.1570762312943295e-06,
"loss": 2.054,
"num_input_tokens_seen": 7261248,
"step": 715
},
{
"epoch": 5.4288407163053725,
"grad_norm": 7.84255313873291,
"learning_rate": 1.0126756596375686e-06,
"loss": 1.9551,
"num_input_tokens_seen": 7309552,
"step": 720
},
{
"epoch": 5.466540999057493,
"grad_norm": 7.932110786437988,
"learning_rate": 8.777104151110826e-07,
"loss": 2.0533,
"num_input_tokens_seen": 7368576,
"step": 725
},
{
"epoch": 5.504241281809613,
"grad_norm": 8.4814453125,
"learning_rate": 7.522335858048707e-07,
"loss": 2.0151,
"num_input_tokens_seen": 7425056,
"step": 730
},
{
"epoch": 5.541941564561734,
"grad_norm": 8.199661254882812,
"learning_rate": 6.362945275751736e-07,
"loss": 2.0525,
"num_input_tokens_seen": 7473488,
"step": 735
},
{
"epoch": 5.579641847313855,
"grad_norm": 7.404112339019775,
"learning_rate": 5.299388446305343e-07,
"loss": 2.0759,
"num_input_tokens_seen": 7522656,
"step": 740
},
{
"epoch": 5.617342130065976,
"grad_norm": 6.568804740905762,
"learning_rate": 4.3320837159353813e-07,
"loss": 2.0374,
"num_input_tokens_seen": 7577104,
"step": 745
},
{
"epoch": 5.655042412818096,
"grad_norm": 6.717598915100098,
"learning_rate": 3.4614115704533767e-07,
"loss": 2.0137,
"num_input_tokens_seen": 7633024,
"step": 750
},
{
"epoch": 5.6927426955702165,
"grad_norm": 8.680829048156738,
"learning_rate": 2.687714485593462e-07,
"loss": 2.1025,
"num_input_tokens_seen": 7682736,
"step": 755
},
{
"epoch": 5.730442978322337,
"grad_norm": 7.93720817565918,
"learning_rate": 2.011296792301165e-07,
"loss": 2.1403,
"num_input_tokens_seen": 7737888,
"step": 760
},
{
"epoch": 5.768143261074458,
"grad_norm": 7.161721706390381,
"learning_rate": 1.4324245570256633e-07,
"loss": 2.0965,
"num_input_tokens_seen": 7788384,
"step": 765
},
{
"epoch": 5.805843543826579,
"grad_norm": 7.347177028656006,
"learning_rate": 9.513254770636137e-08,
"loss": 2.0826,
"num_input_tokens_seen": 7835600,
"step": 770
},
{
"epoch": 5.843543826578699,
"grad_norm": 7.278431415557861,
"learning_rate": 5.681887909952388e-08,
"loss": 2.1508,
"num_input_tokens_seen": 7888336,
"step": 775
},
{
"epoch": 5.88124410933082,
"grad_norm": 8.160250663757324,
"learning_rate": 2.831652042480093e-08,
"loss": 2.0338,
"num_input_tokens_seen": 7938960,
"step": 780
},
{
"epoch": 5.918944392082941,
"grad_norm": 7.380901336669922,
"learning_rate": 9.636682981720158e-09,
"loss": 2.0466,
"num_input_tokens_seen": 7991296,
"step": 785
},
{
"epoch": 5.956644674835061,
"grad_norm": 7.834613800048828,
"learning_rate": 7.867144166728846e-10,
"loss": 2.0436,
"num_input_tokens_seen": 8044336,
"step": 790
}
],
"logging_steps": 5,
"max_steps": 792,
"num_input_tokens_seen": 8063376,
"num_train_epochs": 6,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.844904305885184e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}