Emilio407's picture
Upload folder using huggingface_hub
bc28f00 verified
{
"best_metric": 0.3894089162349701,
"best_model_checkpoint": "prostate-mri-T2w-v05/checkpoint-6223",
"epoch": 7.0,
"eval_steps": 500,
"global_step": 6223,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.025871766029246346,
"grad_norm": 62.908626556396484,
"learning_rate": 2.5871766029246344e-07,
"loss": 0.9082,
"step": 23
},
{
"epoch": 0.05174353205849269,
"grad_norm": 17.856958389282227,
"learning_rate": 5.174353205849269e-07,
"loss": 0.9712,
"step": 46
},
{
"epoch": 0.07761529808773904,
"grad_norm": 14.738483428955078,
"learning_rate": 7.761529808773904e-07,
"loss": 0.7274,
"step": 69
},
{
"epoch": 0.10348706411698538,
"grad_norm": 11.495190620422363,
"learning_rate": 1.0348706411698538e-06,
"loss": 0.5668,
"step": 92
},
{
"epoch": 0.12935883014623173,
"grad_norm": 14.365918159484863,
"learning_rate": 1.2935883014623174e-06,
"loss": 0.5766,
"step": 115
},
{
"epoch": 0.15523059617547807,
"grad_norm": 4.656925678253174,
"learning_rate": 1.5523059617547808e-06,
"loss": 0.5059,
"step": 138
},
{
"epoch": 0.18110236220472442,
"grad_norm": 9.39604663848877,
"learning_rate": 1.8110236220472441e-06,
"loss": 0.4405,
"step": 161
},
{
"epoch": 0.20697412823397077,
"grad_norm": 9.823530197143555,
"learning_rate": 2.0697412823397075e-06,
"loss": 0.3319,
"step": 184
},
{
"epoch": 0.23284589426321708,
"grad_norm": 10.687150955200195,
"learning_rate": 2.328458942632171e-06,
"loss": 0.4602,
"step": 207
},
{
"epoch": 0.25871766029246346,
"grad_norm": 1.1277879476547241,
"learning_rate": 2.5871766029246347e-06,
"loss": 0.3568,
"step": 230
},
{
"epoch": 0.2845894263217098,
"grad_norm": 0.47847771644592285,
"learning_rate": 2.845894263217098e-06,
"loss": 0.6281,
"step": 253
},
{
"epoch": 0.31046119235095615,
"grad_norm": 0.5296943187713623,
"learning_rate": 3.1046119235095615e-06,
"loss": 0.3346,
"step": 276
},
{
"epoch": 0.3363329583802025,
"grad_norm": 11.51565170288086,
"learning_rate": 3.3633295838020245e-06,
"loss": 0.5892,
"step": 299
},
{
"epoch": 0.36220472440944884,
"grad_norm": 18.972782135009766,
"learning_rate": 3.6220472440944883e-06,
"loss": 0.7544,
"step": 322
},
{
"epoch": 0.3880764904386952,
"grad_norm": 10.762083053588867,
"learning_rate": 3.880764904386952e-06,
"loss": 0.6232,
"step": 345
},
{
"epoch": 0.41394825646794153,
"grad_norm": 11.391585350036621,
"learning_rate": 4.139482564679415e-06,
"loss": 0.4856,
"step": 368
},
{
"epoch": 0.4398200224971879,
"grad_norm": 1.076779842376709,
"learning_rate": 4.3982002249718785e-06,
"loss": 0.3987,
"step": 391
},
{
"epoch": 0.46569178852643417,
"grad_norm": 11.604365348815918,
"learning_rate": 4.656917885264342e-06,
"loss": 0.5967,
"step": 414
},
{
"epoch": 0.4915635545556805,
"grad_norm": 10.939277648925781,
"learning_rate": 4.915635545556806e-06,
"loss": 0.3712,
"step": 437
},
{
"epoch": 0.5174353205849269,
"grad_norm": 10.872827529907227,
"learning_rate": 5.1743532058492695e-06,
"loss": 0.4234,
"step": 460
},
{
"epoch": 0.5433070866141733,
"grad_norm": 13.358455657958984,
"learning_rate": 5.433070866141733e-06,
"loss": 0.6415,
"step": 483
},
{
"epoch": 0.5691788526434196,
"grad_norm": 0.5069867968559265,
"learning_rate": 5.691788526434196e-06,
"loss": 0.3304,
"step": 506
},
{
"epoch": 0.595050618672666,
"grad_norm": 11.854174613952637,
"learning_rate": 5.95050618672666e-06,
"loss": 0.2956,
"step": 529
},
{
"epoch": 0.6209223847019123,
"grad_norm": 0.6237831711769104,
"learning_rate": 6.209223847019123e-06,
"loss": 0.5922,
"step": 552
},
{
"epoch": 0.6467941507311586,
"grad_norm": 18.399309158325195,
"learning_rate": 6.467941507311586e-06,
"loss": 0.5529,
"step": 575
},
{
"epoch": 0.672665916760405,
"grad_norm": 1.0723528861999512,
"learning_rate": 6.726659167604049e-06,
"loss": 0.5464,
"step": 598
},
{
"epoch": 0.6985376827896513,
"grad_norm": 10.857867240905762,
"learning_rate": 6.985376827896513e-06,
"loss": 0.5258,
"step": 621
},
{
"epoch": 0.7244094488188977,
"grad_norm": 11.417038917541504,
"learning_rate": 7.244094488188977e-06,
"loss": 0.4599,
"step": 644
},
{
"epoch": 0.750281214848144,
"grad_norm": 12.366191864013672,
"learning_rate": 7.50281214848144e-06,
"loss": 0.5278,
"step": 667
},
{
"epoch": 0.7761529808773904,
"grad_norm": 59.656211853027344,
"learning_rate": 7.761529808773903e-06,
"loss": 0.4802,
"step": 690
},
{
"epoch": 0.8020247469066367,
"grad_norm": 1.6525479555130005,
"learning_rate": 8.020247469066367e-06,
"loss": 0.3855,
"step": 713
},
{
"epoch": 0.8278965129358831,
"grad_norm": 11.62271499633789,
"learning_rate": 8.27896512935883e-06,
"loss": 0.5377,
"step": 736
},
{
"epoch": 0.8537682789651294,
"grad_norm": 11.242469787597656,
"learning_rate": 8.537682789651294e-06,
"loss": 0.6707,
"step": 759
},
{
"epoch": 0.8796400449943758,
"grad_norm": 1.673377513885498,
"learning_rate": 8.796400449943757e-06,
"loss": 0.434,
"step": 782
},
{
"epoch": 0.905511811023622,
"grad_norm": 0.3748597800731659,
"learning_rate": 9.055118110236222e-06,
"loss": 0.4709,
"step": 805
},
{
"epoch": 0.9313835770528683,
"grad_norm": 10.111214637756348,
"learning_rate": 9.313835770528684e-06,
"loss": 0.5416,
"step": 828
},
{
"epoch": 0.9572553430821147,
"grad_norm": 11.05431842803955,
"learning_rate": 9.572553430821147e-06,
"loss": 0.4895,
"step": 851
},
{
"epoch": 0.983127109111361,
"grad_norm": 0.5102259516716003,
"learning_rate": 9.831271091113612e-06,
"loss": 0.6304,
"step": 874
},
{
"epoch": 1.0,
"eval_accuracy": 0.7857142857142857,
"eval_auc": 0.8011167033658384,
"eval_f1": 0.0,
"eval_loss": 0.7771126627922058,
"eval_precision": 0.0,
"eval_recall": 0.0,
"eval_runtime": 2.7441,
"eval_samples_per_second": 173.466,
"eval_steps_per_second": 21.865,
"step": 889
},
{
"epoch": 1.0089988751406074,
"grad_norm": 13.26599407196045,
"learning_rate": 1.0089988751406076e-05,
"loss": 0.3247,
"step": 897
},
{
"epoch": 1.0348706411698538,
"grad_norm": 11.029207229614258,
"learning_rate": 1.0348706411698539e-05,
"loss": 0.6095,
"step": 920
},
{
"epoch": 1.0607424071991,
"grad_norm": 0.07530930638313293,
"learning_rate": 1.0607424071991e-05,
"loss": 0.1472,
"step": 943
},
{
"epoch": 1.0866141732283465,
"grad_norm": 0.6263224482536316,
"learning_rate": 1.0866141732283466e-05,
"loss": 0.6615,
"step": 966
},
{
"epoch": 1.1124859392575928,
"grad_norm": 9.975605964660645,
"learning_rate": 1.1124859392575929e-05,
"loss": 0.4082,
"step": 989
},
{
"epoch": 1.1383577052868392,
"grad_norm": 10.850635528564453,
"learning_rate": 1.1383577052868393e-05,
"loss": 0.4151,
"step": 1012
},
{
"epoch": 1.1642294713160855,
"grad_norm": 10.857563972473145,
"learning_rate": 1.1642294713160854e-05,
"loss": 0.4441,
"step": 1035
},
{
"epoch": 1.190101237345332,
"grad_norm": 4.742630958557129,
"learning_rate": 1.190101237345332e-05,
"loss": 0.7853,
"step": 1058
},
{
"epoch": 1.2159730033745781,
"grad_norm": 0.09981119632720947,
"learning_rate": 1.2159730033745783e-05,
"loss": 0.2316,
"step": 1081
},
{
"epoch": 1.2418447694038246,
"grad_norm": 0.37830597162246704,
"learning_rate": 1.2418447694038246e-05,
"loss": 0.5479,
"step": 1104
},
{
"epoch": 1.2677165354330708,
"grad_norm": 1.0691417455673218,
"learning_rate": 1.2677165354330708e-05,
"loss": 0.342,
"step": 1127
},
{
"epoch": 1.2935883014623173,
"grad_norm": 10.283402442932129,
"learning_rate": 1.2935883014623173e-05,
"loss": 0.591,
"step": 1150
},
{
"epoch": 1.3194600674915635,
"grad_norm": 2.4355926513671875,
"learning_rate": 1.3194600674915636e-05,
"loss": 0.486,
"step": 1173
},
{
"epoch": 1.34533183352081,
"grad_norm": 7.244462966918945,
"learning_rate": 1.3453318335208098e-05,
"loss": 0.5206,
"step": 1196
},
{
"epoch": 1.3712035995500562,
"grad_norm": 9.888749122619629,
"learning_rate": 1.3712035995500563e-05,
"loss": 0.4504,
"step": 1219
},
{
"epoch": 1.3970753655793025,
"grad_norm": 3.3459055423736572,
"learning_rate": 1.3970753655793026e-05,
"loss": 0.4415,
"step": 1242
},
{
"epoch": 1.422947131608549,
"grad_norm": 0.175747811794281,
"learning_rate": 1.4229471316085491e-05,
"loss": 0.2943,
"step": 1265
},
{
"epoch": 1.4488188976377954,
"grad_norm": 4.791985988616943,
"learning_rate": 1.4488188976377953e-05,
"loss": 0.566,
"step": 1288
},
{
"epoch": 1.4746906636670416,
"grad_norm": 12.70669174194336,
"learning_rate": 1.4746906636670415e-05,
"loss": 0.4687,
"step": 1311
},
{
"epoch": 1.5005624296962878,
"grad_norm": 0.04389326646924019,
"learning_rate": 1.500562429696288e-05,
"loss": 0.2629,
"step": 1334
},
{
"epoch": 1.5264341957255343,
"grad_norm": 4.369255542755127,
"learning_rate": 1.5264341957255343e-05,
"loss": 0.472,
"step": 1357
},
{
"epoch": 1.5523059617547807,
"grad_norm": 4.454276084899902,
"learning_rate": 1.5523059617547807e-05,
"loss": 0.4307,
"step": 1380
},
{
"epoch": 1.578177727784027,
"grad_norm": 1.1173179149627686,
"learning_rate": 1.578177727784027e-05,
"loss": 0.3431,
"step": 1403
},
{
"epoch": 1.6040494938132732,
"grad_norm": 3.0332882404327393,
"learning_rate": 1.6040494938132734e-05,
"loss": 0.2545,
"step": 1426
},
{
"epoch": 1.6299212598425197,
"grad_norm": 10.521413803100586,
"learning_rate": 1.6299212598425197e-05,
"loss": 0.6555,
"step": 1449
},
{
"epoch": 1.6557930258717661,
"grad_norm": 3.8012301921844482,
"learning_rate": 1.655793025871766e-05,
"loss": 0.37,
"step": 1472
},
{
"epoch": 1.6816647919010124,
"grad_norm": 0.05941835045814514,
"learning_rate": 1.6816647919010127e-05,
"loss": 0.3091,
"step": 1495
},
{
"epoch": 1.7075365579302586,
"grad_norm": 0.29451972246170044,
"learning_rate": 1.7075365579302587e-05,
"loss": 0.3511,
"step": 1518
},
{
"epoch": 1.733408323959505,
"grad_norm": 3.2587544918060303,
"learning_rate": 1.733408323959505e-05,
"loss": 0.6164,
"step": 1541
},
{
"epoch": 1.7592800899887515,
"grad_norm": 3.4113192558288574,
"learning_rate": 1.7592800899887514e-05,
"loss": 0.3817,
"step": 1564
},
{
"epoch": 1.7851518560179978,
"grad_norm": 11.975400924682617,
"learning_rate": 1.7851518560179977e-05,
"loss": 0.3421,
"step": 1587
},
{
"epoch": 1.811023622047244,
"grad_norm": 1.5508025884628296,
"learning_rate": 1.8110236220472444e-05,
"loss": 0.6197,
"step": 1610
},
{
"epoch": 1.8368953880764904,
"grad_norm": 0.24332457780838013,
"learning_rate": 1.8368953880764904e-05,
"loss": 0.4227,
"step": 1633
},
{
"epoch": 1.862767154105737,
"grad_norm": 0.10154075175523758,
"learning_rate": 1.8627671541057367e-05,
"loss": 0.4287,
"step": 1656
},
{
"epoch": 1.8886389201349831,
"grad_norm": 10.398853302001953,
"learning_rate": 1.8886389201349834e-05,
"loss": 0.3014,
"step": 1679
},
{
"epoch": 1.9145106861642294,
"grad_norm": 0.8489835858345032,
"learning_rate": 1.9145106861642294e-05,
"loss": 0.3982,
"step": 1702
},
{
"epoch": 1.9403824521934758,
"grad_norm": 0.8622331619262695,
"learning_rate": 1.940382452193476e-05,
"loss": 0.5771,
"step": 1725
},
{
"epoch": 1.9662542182227223,
"grad_norm": 2.847560405731201,
"learning_rate": 1.9662542182227224e-05,
"loss": 0.4758,
"step": 1748
},
{
"epoch": 1.9921259842519685,
"grad_norm": 0.5839571952819824,
"learning_rate": 1.9921259842519684e-05,
"loss": 0.2732,
"step": 1771
},
{
"epoch": 2.0,
"eval_accuracy": 0.7857142857142857,
"eval_auc": 0.7965817342979973,
"eval_f1": 0.0,
"eval_loss": 0.9842509627342224,
"eval_precision": 0.0,
"eval_recall": 0.0,
"eval_runtime": 2.74,
"eval_samples_per_second": 173.72,
"eval_steps_per_second": 21.897,
"step": 1778
},
{
"epoch": 2.0179977502812148,
"grad_norm": 0.23481443524360657,
"learning_rate": 2.017997750281215e-05,
"loss": 0.2917,
"step": 1794
},
{
"epoch": 2.043869516310461,
"grad_norm": 0.1679057627916336,
"learning_rate": 2.043869516310461e-05,
"loss": 0.5162,
"step": 1817
},
{
"epoch": 2.0697412823397077,
"grad_norm": 5.455684185028076,
"learning_rate": 2.0697412823397078e-05,
"loss": 0.5789,
"step": 1840
},
{
"epoch": 2.095613048368954,
"grad_norm": 7.542656421661377,
"learning_rate": 2.095613048368954e-05,
"loss": 0.5314,
"step": 1863
},
{
"epoch": 2.1214848143982,
"grad_norm": 2.186872720718384,
"learning_rate": 2.1214848143982e-05,
"loss": 0.3816,
"step": 1886
},
{
"epoch": 2.1473565804274464,
"grad_norm": 0.19364489614963531,
"learning_rate": 2.1473565804274468e-05,
"loss": 0.336,
"step": 1909
},
{
"epoch": 2.173228346456693,
"grad_norm": 0.20303942263126373,
"learning_rate": 2.173228346456693e-05,
"loss": 0.421,
"step": 1932
},
{
"epoch": 2.1991001124859393,
"grad_norm": 0.5988159775733948,
"learning_rate": 2.1991001124859395e-05,
"loss": 0.4587,
"step": 1955
},
{
"epoch": 2.2249718785151855,
"grad_norm": 7.407308101654053,
"learning_rate": 2.2249718785151858e-05,
"loss": 0.6277,
"step": 1978
},
{
"epoch": 2.2508436445444318,
"grad_norm": 2.7972147464752197,
"learning_rate": 2.2508436445444318e-05,
"loss": 0.4025,
"step": 2001
},
{
"epoch": 2.2767154105736784,
"grad_norm": 14.71440601348877,
"learning_rate": 2.2767154105736785e-05,
"loss": 0.4362,
"step": 2024
},
{
"epoch": 2.3025871766029247,
"grad_norm": 0.6409655213356018,
"learning_rate": 2.302587176602925e-05,
"loss": 0.4014,
"step": 2047
},
{
"epoch": 2.328458942632171,
"grad_norm": 0.0657491534948349,
"learning_rate": 2.328458942632171e-05,
"loss": 0.3942,
"step": 2070
},
{
"epoch": 2.354330708661417,
"grad_norm": 0.9892528653144836,
"learning_rate": 2.3543307086614175e-05,
"loss": 0.5464,
"step": 2093
},
{
"epoch": 2.380202474690664,
"grad_norm": 0.6635844111442566,
"learning_rate": 2.380202474690664e-05,
"loss": 0.4767,
"step": 2116
},
{
"epoch": 2.40607424071991,
"grad_norm": 7.151478290557861,
"learning_rate": 2.4060742407199102e-05,
"loss": 0.4637,
"step": 2139
},
{
"epoch": 2.4319460067491563,
"grad_norm": 0.7382416129112244,
"learning_rate": 2.4319460067491565e-05,
"loss": 0.3082,
"step": 2162
},
{
"epoch": 2.4578177727784025,
"grad_norm": 9.286602973937988,
"learning_rate": 2.4578177727784025e-05,
"loss": 0.5282,
"step": 2185
},
{
"epoch": 2.483689538807649,
"grad_norm": 4.055200099945068,
"learning_rate": 2.4836895388076492e-05,
"loss": 0.457,
"step": 2208
},
{
"epoch": 2.5095613048368954,
"grad_norm": 0.12764699757099152,
"learning_rate": 2.5095613048368956e-05,
"loss": 0.3318,
"step": 2231
},
{
"epoch": 2.5354330708661417,
"grad_norm": 2.9867072105407715,
"learning_rate": 2.5354330708661416e-05,
"loss": 0.3542,
"step": 2254
},
{
"epoch": 2.5613048368953883,
"grad_norm": 4.703469753265381,
"learning_rate": 2.5613048368953886e-05,
"loss": 0.4748,
"step": 2277
},
{
"epoch": 2.5871766029246346,
"grad_norm": 9.484485626220703,
"learning_rate": 2.5871766029246346e-05,
"loss": 0.4135,
"step": 2300
},
{
"epoch": 2.613048368953881,
"grad_norm": 18.093868255615234,
"learning_rate": 2.613048368953881e-05,
"loss": 0.4656,
"step": 2323
},
{
"epoch": 2.638920134983127,
"grad_norm": 0.21779067814350128,
"learning_rate": 2.6389201349831272e-05,
"loss": 0.3057,
"step": 2346
},
{
"epoch": 2.6647919010123733,
"grad_norm": 0.7615957260131836,
"learning_rate": 2.6647919010123736e-05,
"loss": 0.4652,
"step": 2369
},
{
"epoch": 2.69066366704162,
"grad_norm": 0.35096365213394165,
"learning_rate": 2.6906636670416196e-05,
"loss": 0.3751,
"step": 2392
},
{
"epoch": 2.716535433070866,
"grad_norm": 0.5412973761558533,
"learning_rate": 2.7165354330708666e-05,
"loss": 0.1823,
"step": 2415
},
{
"epoch": 2.7424071991001124,
"grad_norm": 1.9220446348190308,
"learning_rate": 2.7424071991001126e-05,
"loss": 0.6485,
"step": 2438
},
{
"epoch": 2.7682789651293587,
"grad_norm": 11.08309268951416,
"learning_rate": 2.768278965129359e-05,
"loss": 0.4777,
"step": 2461
},
{
"epoch": 2.794150731158605,
"grad_norm": 0.0344441793859005,
"learning_rate": 2.7941507311586053e-05,
"loss": 0.2201,
"step": 2484
},
{
"epoch": 2.8200224971878516,
"grad_norm": 0.9013801217079163,
"learning_rate": 2.8200224971878513e-05,
"loss": 0.471,
"step": 2507
},
{
"epoch": 2.845894263217098,
"grad_norm": 1.6073826551437378,
"learning_rate": 2.8458942632170983e-05,
"loss": 0.5535,
"step": 2530
},
{
"epoch": 2.871766029246344,
"grad_norm": 3.7488861083984375,
"learning_rate": 2.8717660292463443e-05,
"loss": 0.525,
"step": 2553
},
{
"epoch": 2.8976377952755907,
"grad_norm": 0.2803422808647156,
"learning_rate": 2.8976377952755906e-05,
"loss": 0.4241,
"step": 2576
},
{
"epoch": 2.923509561304837,
"grad_norm": 2.339632749557495,
"learning_rate": 2.923509561304837e-05,
"loss": 0.3422,
"step": 2599
},
{
"epoch": 2.949381327334083,
"grad_norm": 0.10544496774673462,
"learning_rate": 2.949381327334083e-05,
"loss": 0.2698,
"step": 2622
},
{
"epoch": 2.9752530933633294,
"grad_norm": 5.151175498962402,
"learning_rate": 2.97525309336333e-05,
"loss": 0.5485,
"step": 2645
},
{
"epoch": 3.0,
"eval_accuracy": 0.7815126050420168,
"eval_auc": 0.7921778337003251,
"eval_f1": 0.0,
"eval_loss": 0.7073759436607361,
"eval_precision": 0.0,
"eval_recall": 0.0,
"eval_runtime": 2.7321,
"eval_samples_per_second": 174.227,
"eval_steps_per_second": 21.961,
"step": 2667
},
{
"epoch": 3.001124859392576,
"grad_norm": 7.475840091705322,
"learning_rate": 3.001124859392576e-05,
"loss": 0.2998,
"step": 2668
},
{
"epoch": 3.0269966254218224,
"grad_norm": 0.36270514130592346,
"learning_rate": 3.0269966254218223e-05,
"loss": 0.3207,
"step": 2691
},
{
"epoch": 3.0528683914510686,
"grad_norm": 3.655841112136841,
"learning_rate": 3.052868391451069e-05,
"loss": 0.376,
"step": 2714
},
{
"epoch": 3.078740157480315,
"grad_norm": 0.28142863512039185,
"learning_rate": 3.078740157480315e-05,
"loss": 0.3376,
"step": 2737
},
{
"epoch": 3.1046119235095615,
"grad_norm": 0.1056133359670639,
"learning_rate": 3.1046119235095613e-05,
"loss": 0.43,
"step": 2760
},
{
"epoch": 3.1304836895388077,
"grad_norm": 10.104964256286621,
"learning_rate": 3.130483689538808e-05,
"loss": 0.4603,
"step": 2783
},
{
"epoch": 3.156355455568054,
"grad_norm": 8.072303771972656,
"learning_rate": 3.156355455568054e-05,
"loss": 0.3976,
"step": 2806
},
{
"epoch": 3.1822272215973,
"grad_norm": 10.715200424194336,
"learning_rate": 3.1822272215973004e-05,
"loss": 0.4361,
"step": 2829
},
{
"epoch": 3.208098987626547,
"grad_norm": 1.3668488264083862,
"learning_rate": 3.208098987626547e-05,
"loss": 0.2668,
"step": 2852
},
{
"epoch": 3.233970753655793,
"grad_norm": 1.127758502960205,
"learning_rate": 3.233970753655794e-05,
"loss": 0.3974,
"step": 2875
},
{
"epoch": 3.2598425196850394,
"grad_norm": 0.872011125087738,
"learning_rate": 3.2598425196850394e-05,
"loss": 0.3557,
"step": 2898
},
{
"epoch": 3.2857142857142856,
"grad_norm": 15.174445152282715,
"learning_rate": 3.285714285714286e-05,
"loss": 0.4542,
"step": 2921
},
{
"epoch": 3.3115860517435323,
"grad_norm": 3.535675048828125,
"learning_rate": 3.311586051743532e-05,
"loss": 0.3665,
"step": 2944
},
{
"epoch": 3.3374578177727785,
"grad_norm": 0.1963585466146469,
"learning_rate": 3.3374578177727784e-05,
"loss": 0.4275,
"step": 2967
},
{
"epoch": 3.3633295838020247,
"grad_norm": 1.3127784729003906,
"learning_rate": 3.3633295838020254e-05,
"loss": 0.4884,
"step": 2990
},
{
"epoch": 3.389201349831271,
"grad_norm": 1.3405988216400146,
"learning_rate": 3.389201349831271e-05,
"loss": 0.2996,
"step": 3013
},
{
"epoch": 3.415073115860517,
"grad_norm": 9.742510795593262,
"learning_rate": 3.4150731158605174e-05,
"loss": 0.4061,
"step": 3036
},
{
"epoch": 3.440944881889764,
"grad_norm": 0.6711751222610474,
"learning_rate": 3.440944881889764e-05,
"loss": 0.3684,
"step": 3059
},
{
"epoch": 3.46681664791901,
"grad_norm": 9.26884937286377,
"learning_rate": 3.46681664791901e-05,
"loss": 0.4557,
"step": 3082
},
{
"epoch": 3.4926884139482564,
"grad_norm": 11.18705940246582,
"learning_rate": 3.492688413948257e-05,
"loss": 0.3843,
"step": 3105
},
{
"epoch": 3.518560179977503,
"grad_norm": 0.6143022775650024,
"learning_rate": 3.518560179977503e-05,
"loss": 0.2909,
"step": 3128
},
{
"epoch": 3.5444319460067493,
"grad_norm": 0.13090083003044128,
"learning_rate": 3.544431946006749e-05,
"loss": 0.3688,
"step": 3151
},
{
"epoch": 3.5703037120359955,
"grad_norm": 8.838688850402832,
"learning_rate": 3.5703037120359954e-05,
"loss": 0.4144,
"step": 3174
},
{
"epoch": 3.5961754780652417,
"grad_norm": 8.904288291931152,
"learning_rate": 3.596175478065242e-05,
"loss": 0.3711,
"step": 3197
},
{
"epoch": 3.622047244094488,
"grad_norm": 0.6677899360656738,
"learning_rate": 3.622047244094489e-05,
"loss": 0.5376,
"step": 3220
},
{
"epoch": 3.6479190101237347,
"grad_norm": 2.987168073654175,
"learning_rate": 3.647919010123735e-05,
"loss": 0.3642,
"step": 3243
},
{
"epoch": 3.673790776152981,
"grad_norm": 9.179577827453613,
"learning_rate": 3.673790776152981e-05,
"loss": 0.6663,
"step": 3266
},
{
"epoch": 3.699662542182227,
"grad_norm": 1.0456788539886475,
"learning_rate": 3.699662542182227e-05,
"loss": 0.478,
"step": 3289
},
{
"epoch": 3.725534308211474,
"grad_norm": 8.08246898651123,
"learning_rate": 3.7255343082114735e-05,
"loss": 0.4601,
"step": 3312
},
{
"epoch": 3.75140607424072,
"grad_norm": 8.33056926727295,
"learning_rate": 3.7514060742407205e-05,
"loss": 0.4343,
"step": 3335
},
{
"epoch": 3.7772778402699663,
"grad_norm": 2.994161367416382,
"learning_rate": 3.777277840269967e-05,
"loss": 0.3037,
"step": 3358
},
{
"epoch": 3.8031496062992125,
"grad_norm": 0.5124155282974243,
"learning_rate": 3.8031496062992125e-05,
"loss": 0.4441,
"step": 3381
},
{
"epoch": 3.8290213723284587,
"grad_norm": 9.554545402526855,
"learning_rate": 3.829021372328459e-05,
"loss": 0.4561,
"step": 3404
},
{
"epoch": 3.8548931383577054,
"grad_norm": 2.4750254154205322,
"learning_rate": 3.854893138357705e-05,
"loss": 0.5598,
"step": 3427
},
{
"epoch": 3.8807649043869517,
"grad_norm": 0.18071837723255157,
"learning_rate": 3.880764904386952e-05,
"loss": 0.392,
"step": 3450
},
{
"epoch": 3.906636670416198,
"grad_norm": 0.9068869948387146,
"learning_rate": 3.9066366704161985e-05,
"loss": 0.4358,
"step": 3473
},
{
"epoch": 3.9325084364454446,
"grad_norm": 2.832122325897217,
"learning_rate": 3.932508436445445e-05,
"loss": 0.3503,
"step": 3496
},
{
"epoch": 3.958380202474691,
"grad_norm": 8.372138023376465,
"learning_rate": 3.9583802024746905e-05,
"loss": 0.3665,
"step": 3519
},
{
"epoch": 3.984251968503937,
"grad_norm": 0.4025736451148987,
"learning_rate": 3.984251968503937e-05,
"loss": 0.3274,
"step": 3542
},
{
"epoch": 4.0,
"eval_accuracy": 0.7815126050420168,
"eval_auc": 0.8276973891160743,
"eval_f1": 0.08771929824561403,
"eval_loss": 0.6576350927352905,
"eval_precision": 0.4166666666666667,
"eval_recall": 0.049019607843137254,
"eval_runtime": 2.7248,
"eval_samples_per_second": 174.693,
"eval_steps_per_second": 22.02,
"step": 3556
},
{
"epoch": 4.010123734533184,
"grad_norm": 11.520211219787598,
"learning_rate": 4.010123734533184e-05,
"loss": 0.3699,
"step": 3565
},
{
"epoch": 4.0359955005624295,
"grad_norm": 3.6669209003448486,
"learning_rate": 4.03599550056243e-05,
"loss": 0.4811,
"step": 3588
},
{
"epoch": 4.061867266591676,
"grad_norm": 6.646744251251221,
"learning_rate": 4.0618672665916766e-05,
"loss": 0.3609,
"step": 3611
},
{
"epoch": 4.087739032620922,
"grad_norm": 0.03924195468425751,
"learning_rate": 4.087739032620922e-05,
"loss": 0.3344,
"step": 3634
},
{
"epoch": 4.113610798650169,
"grad_norm": 3.2102909088134766,
"learning_rate": 4.1136107986501686e-05,
"loss": 0.6133,
"step": 3657
},
{
"epoch": 4.139482564679415,
"grad_norm": 8.028974533081055,
"learning_rate": 4.1394825646794156e-05,
"loss": 0.322,
"step": 3680
},
{
"epoch": 4.165354330708661,
"grad_norm": 0.7968009114265442,
"learning_rate": 4.165354330708662e-05,
"loss": 0.337,
"step": 3703
},
{
"epoch": 4.191226096737908,
"grad_norm": 8.355441093444824,
"learning_rate": 4.191226096737908e-05,
"loss": 0.3879,
"step": 3726
},
{
"epoch": 4.2170978627671545,
"grad_norm": 0.24170738458633423,
"learning_rate": 4.217097862767154e-05,
"loss": 0.3252,
"step": 3749
},
{
"epoch": 4.2429696287964,
"grad_norm": 5.390313625335693,
"learning_rate": 4.2429696287964e-05,
"loss": 0.507,
"step": 3772
},
{
"epoch": 4.268841394825647,
"grad_norm": 5.854102611541748,
"learning_rate": 4.268841394825647e-05,
"loss": 0.3849,
"step": 3795
},
{
"epoch": 4.294713160854893,
"grad_norm": 8.469975471496582,
"learning_rate": 4.2947131608548936e-05,
"loss": 0.315,
"step": 3818
},
{
"epoch": 4.320584926884139,
"grad_norm": 3.3237178325653076,
"learning_rate": 4.32058492688414e-05,
"loss": 0.7087,
"step": 3841
},
{
"epoch": 4.346456692913386,
"grad_norm": 1.0640898942947388,
"learning_rate": 4.346456692913386e-05,
"loss": 0.4298,
"step": 3864
},
{
"epoch": 4.372328458942632,
"grad_norm": 0.4618189334869385,
"learning_rate": 4.372328458942632e-05,
"loss": 0.3469,
"step": 3887
},
{
"epoch": 4.398200224971879,
"grad_norm": 0.6110893487930298,
"learning_rate": 4.398200224971879e-05,
"loss": 0.4085,
"step": 3910
},
{
"epoch": 4.424071991001125,
"grad_norm": 8.723979949951172,
"learning_rate": 4.424071991001125e-05,
"loss": 0.263,
"step": 3933
},
{
"epoch": 4.449943757030371,
"grad_norm": 19.94253158569336,
"learning_rate": 4.4499437570303716e-05,
"loss": 0.3318,
"step": 3956
},
{
"epoch": 4.475815523059618,
"grad_norm": 0.9224587082862854,
"learning_rate": 4.475815523059618e-05,
"loss": 0.462,
"step": 3979
},
{
"epoch": 4.5016872890888635,
"grad_norm": 9.54886531829834,
"learning_rate": 4.5016872890888636e-05,
"loss": 0.4471,
"step": 4002
},
{
"epoch": 4.52755905511811,
"grad_norm": 3.35776686668396,
"learning_rate": 4.52755905511811e-05,
"loss": 0.4742,
"step": 4025
},
{
"epoch": 4.553430821147357,
"grad_norm": 7.887851715087891,
"learning_rate": 4.553430821147357e-05,
"loss": 0.4714,
"step": 4048
},
{
"epoch": 4.579302587176603,
"grad_norm": 3.327909469604492,
"learning_rate": 4.5793025871766033e-05,
"loss": 0.6452,
"step": 4071
},
{
"epoch": 4.605174353205849,
"grad_norm": 1.3243390321731567,
"learning_rate": 4.60517435320585e-05,
"loss": 0.3745,
"step": 4094
},
{
"epoch": 4.631046119235096,
"grad_norm": 0.7224089503288269,
"learning_rate": 4.631046119235096e-05,
"loss": 0.2628,
"step": 4117
},
{
"epoch": 4.656917885264342,
"grad_norm": 0.017284361645579338,
"learning_rate": 4.656917885264342e-05,
"loss": 0.317,
"step": 4140
},
{
"epoch": 4.6827896512935885,
"grad_norm": 14.2166166305542,
"learning_rate": 4.682789651293589e-05,
"loss": 0.464,
"step": 4163
},
{
"epoch": 4.708661417322834,
"grad_norm": 0.49798861145973206,
"learning_rate": 4.708661417322835e-05,
"loss": 0.3336,
"step": 4186
},
{
"epoch": 4.734533183352081,
"grad_norm": 1.5816570520401,
"learning_rate": 4.7345331833520814e-05,
"loss": 0.423,
"step": 4209
},
{
"epoch": 4.760404949381328,
"grad_norm": 4.431649208068848,
"learning_rate": 4.760404949381328e-05,
"loss": 0.4474,
"step": 4232
},
{
"epoch": 4.786276715410573,
"grad_norm": 7.487512588500977,
"learning_rate": 4.7862767154105734e-05,
"loss": 0.32,
"step": 4255
},
{
"epoch": 4.81214848143982,
"grad_norm": 3.283292293548584,
"learning_rate": 4.8121484814398204e-05,
"loss": 0.4767,
"step": 4278
},
{
"epoch": 4.838020247469066,
"grad_norm": 7.209409236907959,
"learning_rate": 4.838020247469067e-05,
"loss": 0.2593,
"step": 4301
},
{
"epoch": 4.863892013498313,
"grad_norm": 7.526238441467285,
"learning_rate": 4.863892013498313e-05,
"loss": 0.3552,
"step": 4324
},
{
"epoch": 4.889763779527559,
"grad_norm": 0.18729792535305023,
"learning_rate": 4.8897637795275594e-05,
"loss": 0.3581,
"step": 4347
},
{
"epoch": 4.915635545556805,
"grad_norm": 0.21440748870372772,
"learning_rate": 4.915635545556805e-05,
"loss": 0.2921,
"step": 4370
},
{
"epoch": 4.941507311586052,
"grad_norm": 12.171503067016602,
"learning_rate": 4.941507311586052e-05,
"loss": 0.5248,
"step": 4393
},
{
"epoch": 4.967379077615298,
"grad_norm": 0.14398285746574402,
"learning_rate": 4.9673790776152984e-05,
"loss": 0.4253,
"step": 4416
},
{
"epoch": 4.993250843644544,
"grad_norm": 2.316239595413208,
"learning_rate": 4.993250843644545e-05,
"loss": 0.4106,
"step": 4439
},
{
"epoch": 5.0,
"eval_accuracy": 0.7815126050420168,
"eval_auc": 0.8033579742057251,
"eval_f1": 0.0,
"eval_loss": 0.50748610496521,
"eval_precision": 0.0,
"eval_recall": 0.0,
"eval_runtime": 2.7592,
"eval_samples_per_second": 172.516,
"eval_steps_per_second": 21.746,
"step": 4445
},
{
"epoch": 5.019122609673791,
"grad_norm": 4.349539279937744,
"learning_rate": 4.997875265591801e-05,
"loss": 0.3046,
"step": 4462
},
{
"epoch": 5.0449943757030375,
"grad_norm": 0.3448341190814972,
"learning_rate": 4.995000624921885e-05,
"loss": 0.3477,
"step": 4485
},
{
"epoch": 5.070866141732283,
"grad_norm": 0.6105608940124512,
"learning_rate": 4.9921259842519685e-05,
"loss": 0.5053,
"step": 4508
},
{
"epoch": 5.09673790776153,
"grad_norm": 8.691744804382324,
"learning_rate": 4.989251343582053e-05,
"loss": 0.3679,
"step": 4531
},
{
"epoch": 5.122609673790776,
"grad_norm": 2.42826771736145,
"learning_rate": 4.9863767029121364e-05,
"loss": 0.2206,
"step": 4554
},
{
"epoch": 5.1484814398200225,
"grad_norm": 2.756155490875244,
"learning_rate": 4.98350206224222e-05,
"loss": 0.6075,
"step": 4577
},
{
"epoch": 5.174353205849269,
"grad_norm": 0.6143556833267212,
"learning_rate": 4.9806274215723035e-05,
"loss": 0.2272,
"step": 4600
},
{
"epoch": 5.200224971878515,
"grad_norm": 0.17892147600650787,
"learning_rate": 4.977752780902387e-05,
"loss": 0.5256,
"step": 4623
},
{
"epoch": 5.226096737907762,
"grad_norm": 0.12368874251842499,
"learning_rate": 4.9748781402324714e-05,
"loss": 0.5569,
"step": 4646
},
{
"epoch": 5.251968503937007,
"grad_norm": 0.048263806849718094,
"learning_rate": 4.972003499562555e-05,
"loss": 0.2298,
"step": 4669
},
{
"epoch": 5.277840269966254,
"grad_norm": 4.790956020355225,
"learning_rate": 4.9691288588926386e-05,
"loss": 0.5415,
"step": 4692
},
{
"epoch": 5.303712035995501,
"grad_norm": 5.684449672698975,
"learning_rate": 4.966254218222722e-05,
"loss": 0.3086,
"step": 4715
},
{
"epoch": 5.329583802024747,
"grad_norm": 0.4559657871723175,
"learning_rate": 4.9633795775528064e-05,
"loss": 0.3575,
"step": 4738
},
{
"epoch": 5.355455568053993,
"grad_norm": 7.768259048461914,
"learning_rate": 4.96050493688289e-05,
"loss": 0.4601,
"step": 4761
},
{
"epoch": 5.38132733408324,
"grad_norm": 0.44012290239334106,
"learning_rate": 4.9576302962129736e-05,
"loss": 0.3265,
"step": 4784
},
{
"epoch": 5.407199100112486,
"grad_norm": 0.22265122830867767,
"learning_rate": 4.954755655543057e-05,
"loss": 0.6152,
"step": 4807
},
{
"epoch": 5.433070866141732,
"grad_norm": 4.463795185089111,
"learning_rate": 4.951881014873141e-05,
"loss": 0.3046,
"step": 4830
},
{
"epoch": 5.458942632170979,
"grad_norm": 5.145118236541748,
"learning_rate": 4.949006374203225e-05,
"loss": 0.4773,
"step": 4853
},
{
"epoch": 5.484814398200225,
"grad_norm": 0.24177011847496033,
"learning_rate": 4.9461317335333087e-05,
"loss": 0.2634,
"step": 4876
},
{
"epoch": 5.5106861642294716,
"grad_norm": 0.8902063369750977,
"learning_rate": 4.943257092863392e-05,
"loss": 0.3362,
"step": 4899
},
{
"epoch": 5.536557930258717,
"grad_norm": 0.39589497447013855,
"learning_rate": 4.940382452193476e-05,
"loss": 0.6168,
"step": 4922
},
{
"epoch": 5.562429696287964,
"grad_norm": 1.736325979232788,
"learning_rate": 4.9375078115235594e-05,
"loss": 0.5466,
"step": 4945
},
{
"epoch": 5.588301462317211,
"grad_norm": 2.3112049102783203,
"learning_rate": 4.934633170853644e-05,
"loss": 0.3546,
"step": 4968
},
{
"epoch": 5.6141732283464565,
"grad_norm": 0.4776885509490967,
"learning_rate": 4.931758530183727e-05,
"loss": 0.3755,
"step": 4991
},
{
"epoch": 5.640044994375703,
"grad_norm": 0.09375002235174179,
"learning_rate": 4.928883889513811e-05,
"loss": 0.2044,
"step": 5014
},
{
"epoch": 5.665916760404949,
"grad_norm": 11.319817543029785,
"learning_rate": 4.9260092488438945e-05,
"loss": 0.5937,
"step": 5037
},
{
"epoch": 5.691788526434196,
"grad_norm": 1.1792347431182861,
"learning_rate": 4.923134608173979e-05,
"loss": 0.3856,
"step": 5060
},
{
"epoch": 5.717660292463442,
"grad_norm": 6.615360736846924,
"learning_rate": 4.920259967504062e-05,
"loss": 0.3669,
"step": 5083
},
{
"epoch": 5.743532058492688,
"grad_norm": 6.705723762512207,
"learning_rate": 4.917385326834146e-05,
"loss": 0.2221,
"step": 5106
},
{
"epoch": 5.769403824521935,
"grad_norm": 2.312495470046997,
"learning_rate": 4.9145106861642295e-05,
"loss": 0.4308,
"step": 5129
},
{
"epoch": 5.7952755905511815,
"grad_norm": 11.482613563537598,
"learning_rate": 4.911636045494313e-05,
"loss": 0.3644,
"step": 5152
},
{
"epoch": 5.821147356580427,
"grad_norm": 4.5011982917785645,
"learning_rate": 4.9087614048243973e-05,
"loss": 0.5318,
"step": 5175
},
{
"epoch": 5.847019122609674,
"grad_norm": 3.1337974071502686,
"learning_rate": 4.905886764154481e-05,
"loss": 0.4274,
"step": 5198
},
{
"epoch": 5.872890888638921,
"grad_norm": 0.16391399502754211,
"learning_rate": 4.9030121234845645e-05,
"loss": 0.3304,
"step": 5221
},
{
"epoch": 5.898762654668166,
"grad_norm": 0.630847692489624,
"learning_rate": 4.900137482814649e-05,
"loss": 0.4878,
"step": 5244
},
{
"epoch": 5.924634420697413,
"grad_norm": 3.9900641441345215,
"learning_rate": 4.897262842144732e-05,
"loss": 0.3846,
"step": 5267
},
{
"epoch": 5.950506186726659,
"grad_norm": 5.799670219421387,
"learning_rate": 4.894388201474816e-05,
"loss": 0.3469,
"step": 5290
},
{
"epoch": 5.9763779527559056,
"grad_norm": 0.49845901131629944,
"learning_rate": 4.8915135608048996e-05,
"loss": 0.3733,
"step": 5313
},
{
"epoch": 6.0,
"eval_accuracy": 0.7857142857142857,
"eval_auc": 0.7628709237705779,
"eval_f1": 0.0,
"eval_loss": 0.6193206310272217,
"eval_precision": 0.0,
"eval_recall": 0.0,
"eval_runtime": 2.7283,
"eval_samples_per_second": 174.467,
"eval_steps_per_second": 21.992,
"step": 5334
},
{
"epoch": 6.002249718785152,
"grad_norm": 0.5700758695602417,
"learning_rate": 4.888638920134984e-05,
"loss": 0.3025,
"step": 5336
},
{
"epoch": 6.028121484814398,
"grad_norm": 0.3606770932674408,
"learning_rate": 4.8857642794650674e-05,
"loss": 0.383,
"step": 5359
},
{
"epoch": 6.053993250843645,
"grad_norm": 0.136641263961792,
"learning_rate": 4.88288963879515e-05,
"loss": 0.2687,
"step": 5382
},
{
"epoch": 6.0798650168728905,
"grad_norm": 0.3220977187156677,
"learning_rate": 4.8800149981252346e-05,
"loss": 0.4189,
"step": 5405
},
{
"epoch": 6.105736782902137,
"grad_norm": 0.8333466053009033,
"learning_rate": 4.877140357455318e-05,
"loss": 0.3677,
"step": 5428
},
{
"epoch": 6.131608548931384,
"grad_norm": 0.08014432340860367,
"learning_rate": 4.8742657167854025e-05,
"loss": 0.3624,
"step": 5451
},
{
"epoch": 6.15748031496063,
"grad_norm": 8.192437171936035,
"learning_rate": 4.871391076115486e-05,
"loss": 0.4036,
"step": 5474
},
{
"epoch": 6.183352080989876,
"grad_norm": 6.411347389221191,
"learning_rate": 4.8685164354455696e-05,
"loss": 0.3529,
"step": 5497
},
{
"epoch": 6.209223847019123,
"grad_norm": 0.006956384517252445,
"learning_rate": 4.865641794775653e-05,
"loss": 0.1559,
"step": 5520
},
{
"epoch": 6.235095613048369,
"grad_norm": 0.1893380731344223,
"learning_rate": 4.862767154105737e-05,
"loss": 0.5716,
"step": 5543
},
{
"epoch": 6.2609673790776155,
"grad_norm": 0.47687458992004395,
"learning_rate": 4.859892513435821e-05,
"loss": 0.399,
"step": 5566
},
{
"epoch": 6.286839145106861,
"grad_norm": 4.109427452087402,
"learning_rate": 4.857017872765905e-05,
"loss": 0.3858,
"step": 5589
},
{
"epoch": 6.312710911136108,
"grad_norm": 0.6130828857421875,
"learning_rate": 4.854143232095988e-05,
"loss": 0.3434,
"step": 5612
},
{
"epoch": 6.338582677165355,
"grad_norm": 2.936263084411621,
"learning_rate": 4.851268591426072e-05,
"loss": 0.7265,
"step": 5635
},
{
"epoch": 6.3644544431946,
"grad_norm": 0.4069950580596924,
"learning_rate": 4.848393950756156e-05,
"loss": 0.2387,
"step": 5658
},
{
"epoch": 6.390326209223847,
"grad_norm": 3.4261677265167236,
"learning_rate": 4.84551931008624e-05,
"loss": 0.4388,
"step": 5681
},
{
"epoch": 6.416197975253094,
"grad_norm": 4.583181381225586,
"learning_rate": 4.842644669416323e-05,
"loss": 0.321,
"step": 5704
},
{
"epoch": 6.44206974128234,
"grad_norm": 4.256218433380127,
"learning_rate": 4.839770028746407e-05,
"loss": 0.3261,
"step": 5727
},
{
"epoch": 6.467941507311586,
"grad_norm": 1.0955240726470947,
"learning_rate": 4.8368953880764905e-05,
"loss": 0.3753,
"step": 5750
},
{
"epoch": 6.493813273340832,
"grad_norm": 7.2450361251831055,
"learning_rate": 4.834020747406575e-05,
"loss": 0.2428,
"step": 5773
},
{
"epoch": 6.519685039370079,
"grad_norm": 5.181588172912598,
"learning_rate": 4.831146106736658e-05,
"loss": 0.3341,
"step": 5796
},
{
"epoch": 6.545556805399325,
"grad_norm": 2.217822790145874,
"learning_rate": 4.828271466066742e-05,
"loss": 0.4004,
"step": 5819
},
{
"epoch": 6.571428571428571,
"grad_norm": 0.3265409469604492,
"learning_rate": 4.8253968253968255e-05,
"loss": 0.4519,
"step": 5842
},
{
"epoch": 6.597300337457818,
"grad_norm": 3.4193084239959717,
"learning_rate": 4.822522184726909e-05,
"loss": 0.4347,
"step": 5865
},
{
"epoch": 6.6231721034870645,
"grad_norm": 3.1898536682128906,
"learning_rate": 4.8196475440569934e-05,
"loss": 0.2508,
"step": 5888
},
{
"epoch": 6.64904386951631,
"grad_norm": 5.070316314697266,
"learning_rate": 4.816772903387077e-05,
"loss": 0.4112,
"step": 5911
},
{
"epoch": 6.674915635545557,
"grad_norm": 0.23132477700710297,
"learning_rate": 4.8138982627171605e-05,
"loss": 0.2206,
"step": 5934
},
{
"epoch": 6.700787401574803,
"grad_norm": 4.237873554229736,
"learning_rate": 4.811023622047244e-05,
"loss": 0.5504,
"step": 5957
},
{
"epoch": 6.7266591676040495,
"grad_norm": 0.857448160648346,
"learning_rate": 4.808148981377328e-05,
"loss": 0.3815,
"step": 5980
},
{
"epoch": 6.752530933633296,
"grad_norm": 5.193442344665527,
"learning_rate": 4.805274340707412e-05,
"loss": 0.3129,
"step": 6003
},
{
"epoch": 6.778402699662542,
"grad_norm": 4.156959056854248,
"learning_rate": 4.8023997000374956e-05,
"loss": 0.4625,
"step": 6026
},
{
"epoch": 6.804274465691789,
"grad_norm": 0.4680914878845215,
"learning_rate": 4.799525059367579e-05,
"loss": 0.2069,
"step": 6049
},
{
"epoch": 6.830146231721034,
"grad_norm": 5.006599426269531,
"learning_rate": 4.796650418697663e-05,
"loss": 0.4084,
"step": 6072
},
{
"epoch": 6.856017997750281,
"grad_norm": 6.840053081512451,
"learning_rate": 4.793775778027747e-05,
"loss": 0.2994,
"step": 6095
},
{
"epoch": 6.881889763779528,
"grad_norm": 0.11373735964298248,
"learning_rate": 4.7909011373578306e-05,
"loss": 0.2934,
"step": 6118
},
{
"epoch": 6.907761529808774,
"grad_norm": 5.152923107147217,
"learning_rate": 4.788026496687914e-05,
"loss": 0.2687,
"step": 6141
},
{
"epoch": 6.93363329583802,
"grad_norm": 0.021206054836511612,
"learning_rate": 4.785151856017998e-05,
"loss": 0.3019,
"step": 6164
},
{
"epoch": 6.959505061867267,
"grad_norm": 0.06403312087059021,
"learning_rate": 4.7822772153480814e-05,
"loss": 0.2471,
"step": 6187
},
{
"epoch": 6.985376827896513,
"grad_norm": 3.1448378562927246,
"learning_rate": 4.7794025746781657e-05,
"loss": 0.6191,
"step": 6210
},
{
"epoch": 7.0,
"eval_accuracy": 0.819327731092437,
"eval_auc": 0.8453785257418476,
"eval_f1": 0.4625,
"eval_loss": 0.3894089162349701,
"eval_precision": 0.6379310344827587,
"eval_recall": 0.3627450980392157,
"eval_runtime": 2.7066,
"eval_samples_per_second": 175.867,
"eval_steps_per_second": 22.168,
"step": 6223
}
],
"logging_steps": 23,
"max_steps": 44450,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 20,
"early_stopping_threshold": 0.01
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.0174156175154545e+19,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}