dada22231's picture
Training in progress, step 95, checkpoint
f59325d verified
raw
history blame
18.3 kB
{
"best_metric": 0.36422646045684814,
"best_model_checkpoint": "miner_id_24/checkpoint-75",
"epoch": 0.38559107052257735,
"eval_steps": 25,
"global_step": 95,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004058853373921867,
"grad_norm": 0.5062510967254639,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.3693,
"step": 1
},
{
"epoch": 0.004058853373921867,
"eval_loss": 0.8792099952697754,
"eval_runtime": 3.4344,
"eval_samples_per_second": 14.559,
"eval_steps_per_second": 3.785,
"step": 1
},
{
"epoch": 0.008117706747843734,
"grad_norm": 0.9184481501579285,
"learning_rate": 6.666666666666667e-05,
"loss": 0.5743,
"step": 2
},
{
"epoch": 0.0121765601217656,
"grad_norm": 0.9887821674346924,
"learning_rate": 0.0001,
"loss": 0.6198,
"step": 3
},
{
"epoch": 0.016235413495687467,
"grad_norm": 0.7524212598800659,
"learning_rate": 9.997376600647783e-05,
"loss": 0.5442,
"step": 4
},
{
"epoch": 0.020294266869609334,
"grad_norm": 0.6812997460365295,
"learning_rate": 9.989509461357426e-05,
"loss": 0.4661,
"step": 5
},
{
"epoch": 0.0243531202435312,
"grad_norm": 0.5500982403755188,
"learning_rate": 9.976407754861426e-05,
"loss": 0.4792,
"step": 6
},
{
"epoch": 0.028411973617453068,
"grad_norm": 0.48947757482528687,
"learning_rate": 9.958086757163489e-05,
"loss": 0.4932,
"step": 7
},
{
"epoch": 0.032470826991374935,
"grad_norm": 0.4276084303855896,
"learning_rate": 9.934567829727386e-05,
"loss": 0.4636,
"step": 8
},
{
"epoch": 0.0365296803652968,
"grad_norm": 0.3858795464038849,
"learning_rate": 9.905878394570453e-05,
"loss": 0.442,
"step": 9
},
{
"epoch": 0.04058853373921867,
"grad_norm": 0.37294769287109375,
"learning_rate": 9.872051902290737e-05,
"loss": 0.4651,
"step": 10
},
{
"epoch": 0.044647387113140535,
"grad_norm": 0.41522446274757385,
"learning_rate": 9.833127793065098e-05,
"loss": 0.4742,
"step": 11
},
{
"epoch": 0.0487062404870624,
"grad_norm": 0.40490609407424927,
"learning_rate": 9.789151450663723e-05,
"loss": 0.4696,
"step": 12
},
{
"epoch": 0.05276509386098427,
"grad_norm": 0.15065628290176392,
"learning_rate": 9.740174149534693e-05,
"loss": 0.1888,
"step": 13
},
{
"epoch": 0.056823947234906136,
"grad_norm": 0.3047502934932709,
"learning_rate": 9.686252995020249e-05,
"loss": 0.3328,
"step": 14
},
{
"epoch": 0.060882800608828,
"grad_norm": 0.2834240198135376,
"learning_rate": 9.627450856774539e-05,
"loss": 0.3549,
"step": 15
},
{
"epoch": 0.06494165398274987,
"grad_norm": 0.2397720217704773,
"learning_rate": 9.563836295460398e-05,
"loss": 0.3516,
"step": 16
},
{
"epoch": 0.06900050735667174,
"grad_norm": 0.2353983372449875,
"learning_rate": 9.495483482810688e-05,
"loss": 0.402,
"step": 17
},
{
"epoch": 0.0730593607305936,
"grad_norm": 0.264461487531662,
"learning_rate": 9.422472115147382e-05,
"loss": 0.4114,
"step": 18
},
{
"epoch": 0.07711821410451547,
"grad_norm": 0.3238242268562317,
"learning_rate": 9.3448873204592e-05,
"loss": 0.4244,
"step": 19
},
{
"epoch": 0.08117706747843734,
"grad_norm": 0.33132946491241455,
"learning_rate": 9.2628195591462e-05,
"loss": 0.4197,
"step": 20
},
{
"epoch": 0.0852359208523592,
"grad_norm": 0.31999894976615906,
"learning_rate": 9.176364518546989e-05,
"loss": 0.4201,
"step": 21
},
{
"epoch": 0.08929477422628107,
"grad_norm": 0.27462199330329895,
"learning_rate": 9.08562300137157e-05,
"loss": 0.4323,
"step": 22
},
{
"epoch": 0.09335362760020294,
"grad_norm": 0.28106844425201416,
"learning_rate": 8.990700808169889e-05,
"loss": 0.4207,
"step": 23
},
{
"epoch": 0.0974124809741248,
"grad_norm": 0.3135908246040344,
"learning_rate": 8.891708613973126e-05,
"loss": 0.4172,
"step": 24
},
{
"epoch": 0.10147133434804667,
"grad_norm": 0.39373230934143066,
"learning_rate": 8.788761839251559e-05,
"loss": 0.4293,
"step": 25
},
{
"epoch": 0.10147133434804667,
"eval_loss": 0.4003879129886627,
"eval_runtime": 3.4909,
"eval_samples_per_second": 14.323,
"eval_steps_per_second": 3.724,
"step": 25
},
{
"epoch": 0.10553018772196854,
"grad_norm": 0.2560587227344513,
"learning_rate": 8.681980515339464e-05,
"loss": 0.2327,
"step": 26
},
{
"epoch": 0.1095890410958904,
"grad_norm": 0.45607316493988037,
"learning_rate": 8.571489144483944e-05,
"loss": 0.3487,
"step": 27
},
{
"epoch": 0.11364789446981227,
"grad_norm": 0.3606185019016266,
"learning_rate": 8.457416554680877e-05,
"loss": 0.3752,
"step": 28
},
{
"epoch": 0.11770674784373414,
"grad_norm": 0.2905651330947876,
"learning_rate": 8.339895749467238e-05,
"loss": 0.3514,
"step": 29
},
{
"epoch": 0.121765601217656,
"grad_norm": 0.23107478022575378,
"learning_rate": 8.219063752844926e-05,
"loss": 0.3738,
"step": 30
},
{
"epoch": 0.12582445459157787,
"grad_norm": 0.1988050937652588,
"learning_rate": 8.095061449516903e-05,
"loss": 0.3693,
"step": 31
},
{
"epoch": 0.12988330796549974,
"grad_norm": 0.2333528846502304,
"learning_rate": 7.968033420621935e-05,
"loss": 0.3971,
"step": 32
},
{
"epoch": 0.1339421613394216,
"grad_norm": 0.3175937235355377,
"learning_rate": 7.838127775159452e-05,
"loss": 0.3949,
"step": 33
},
{
"epoch": 0.13800101471334347,
"grad_norm": 0.3904169201850891,
"learning_rate": 7.705495977301078e-05,
"loss": 0.4436,
"step": 34
},
{
"epoch": 0.14205986808726534,
"grad_norm": 0.3506806194782257,
"learning_rate": 7.570292669790186e-05,
"loss": 0.4193,
"step": 35
},
{
"epoch": 0.1461187214611872,
"grad_norm": 0.33251529932022095,
"learning_rate": 7.43267549363537e-05,
"loss": 0.4313,
"step": 36
},
{
"epoch": 0.15017757483510907,
"grad_norm": 0.3682462275028229,
"learning_rate": 7.292804904308087e-05,
"loss": 0.4288,
"step": 37
},
{
"epoch": 0.15423642820903094,
"grad_norm": 0.11894040554761887,
"learning_rate": 7.150843984658754e-05,
"loss": 0.1897,
"step": 38
},
{
"epoch": 0.1582952815829528,
"grad_norm": 0.19390328228473663,
"learning_rate": 7.006958254769438e-05,
"loss": 0.3119,
"step": 39
},
{
"epoch": 0.16235413495687467,
"grad_norm": 0.24307291209697723,
"learning_rate": 6.861315478964841e-05,
"loss": 0.345,
"step": 40
},
{
"epoch": 0.16641298833079654,
"grad_norm": 0.2972720265388489,
"learning_rate": 6.714085470206609e-05,
"loss": 0.3694,
"step": 41
},
{
"epoch": 0.1704718417047184,
"grad_norm": 0.2774950861930847,
"learning_rate": 6.56543989209901e-05,
"loss": 0.3787,
"step": 42
},
{
"epoch": 0.17453069507864027,
"grad_norm": 0.24689042568206787,
"learning_rate": 6.415552058736854e-05,
"loss": 0.3736,
"step": 43
},
{
"epoch": 0.17858954845256214,
"grad_norm": 0.2081252783536911,
"learning_rate": 6.264596732629e-05,
"loss": 0.3739,
"step": 44
},
{
"epoch": 0.182648401826484,
"grad_norm": 0.2078096568584442,
"learning_rate": 6.112749920933111e-05,
"loss": 0.3723,
"step": 45
},
{
"epoch": 0.18670725520040587,
"grad_norm": 0.20248815417289734,
"learning_rate": 5.960188670239154e-05,
"loss": 0.3964,
"step": 46
},
{
"epoch": 0.19076610857432774,
"grad_norm": 0.2449951171875,
"learning_rate": 5.80709086014102e-05,
"loss": 0.4352,
"step": 47
},
{
"epoch": 0.1948249619482496,
"grad_norm": 0.2900124490261078,
"learning_rate": 5.653634995836856e-05,
"loss": 0.429,
"step": 48
},
{
"epoch": 0.19888381532217148,
"grad_norm": 0.3739871680736542,
"learning_rate": 5.500000000000001e-05,
"loss": 0.4275,
"step": 49
},
{
"epoch": 0.20294266869609334,
"grad_norm": 0.4056133031845093,
"learning_rate": 5.346365004163145e-05,
"loss": 0.4132,
"step": 50
},
{
"epoch": 0.20294266869609334,
"eval_loss": 0.3687722384929657,
"eval_runtime": 3.499,
"eval_samples_per_second": 14.29,
"eval_steps_per_second": 3.715,
"step": 50
},
{
"epoch": 0.2070015220700152,
"grad_norm": 0.07692764699459076,
"learning_rate": 5.192909139858981e-05,
"loss": 0.1729,
"step": 51
},
{
"epoch": 0.21106037544393708,
"grad_norm": 0.1375497430562973,
"learning_rate": 5.0398113297608465e-05,
"loss": 0.3147,
"step": 52
},
{
"epoch": 0.21511922881785894,
"grad_norm": 0.1671007126569748,
"learning_rate": 4.887250079066892e-05,
"loss": 0.3289,
"step": 53
},
{
"epoch": 0.2191780821917808,
"grad_norm": 0.1718771904706955,
"learning_rate": 4.7354032673710005e-05,
"loss": 0.3449,
"step": 54
},
{
"epoch": 0.22323693556570268,
"grad_norm": 0.20011144876480103,
"learning_rate": 4.584447941263149e-05,
"loss": 0.3742,
"step": 55
},
{
"epoch": 0.22729578893962454,
"grad_norm": 0.18110662698745728,
"learning_rate": 4.43456010790099e-05,
"loss": 0.4061,
"step": 56
},
{
"epoch": 0.2313546423135464,
"grad_norm": 0.20814234018325806,
"learning_rate": 4.285914529793391e-05,
"loss": 0.3747,
"step": 57
},
{
"epoch": 0.23541349568746828,
"grad_norm": 0.19282929599285126,
"learning_rate": 4.13868452103516e-05,
"loss": 0.3778,
"step": 58
},
{
"epoch": 0.23947234906139014,
"grad_norm": 0.21799100935459137,
"learning_rate": 3.9930417452305626e-05,
"loss": 0.3985,
"step": 59
},
{
"epoch": 0.243531202435312,
"grad_norm": 0.21902616322040558,
"learning_rate": 3.8491560153412466e-05,
"loss": 0.3939,
"step": 60
},
{
"epoch": 0.24759005580923388,
"grad_norm": 0.2695430815219879,
"learning_rate": 3.707195095691913e-05,
"loss": 0.3985,
"step": 61
},
{
"epoch": 0.25164890918315574,
"grad_norm": 0.28181904554367065,
"learning_rate": 3.567324506364632e-05,
"loss": 0.3966,
"step": 62
},
{
"epoch": 0.2557077625570776,
"grad_norm": 0.09019971638917923,
"learning_rate": 3.4297073302098156e-05,
"loss": 0.1922,
"step": 63
},
{
"epoch": 0.2597666159309995,
"grad_norm": 0.13326597213745117,
"learning_rate": 3.2945040226989244e-05,
"loss": 0.3133,
"step": 64
},
{
"epoch": 0.26382546930492135,
"grad_norm": 0.16596747934818268,
"learning_rate": 3.16187222484055e-05,
"loss": 0.3314,
"step": 65
},
{
"epoch": 0.2678843226788432,
"grad_norm": 0.19934867322444916,
"learning_rate": 3.0319665793780648e-05,
"loss": 0.335,
"step": 66
},
{
"epoch": 0.2719431760527651,
"grad_norm": 0.17301230132579803,
"learning_rate": 2.9049385504830985e-05,
"loss": 0.3333,
"step": 67
},
{
"epoch": 0.27600202942668695,
"grad_norm": 0.18114101886749268,
"learning_rate": 2.7809362471550748e-05,
"loss": 0.3575,
"step": 68
},
{
"epoch": 0.2800608828006088,
"grad_norm": 0.17812739312648773,
"learning_rate": 2.660104250532764e-05,
"loss": 0.3595,
"step": 69
},
{
"epoch": 0.2841197361745307,
"grad_norm": 0.18535548448562622,
"learning_rate": 2.5425834453191232e-05,
"loss": 0.3873,
"step": 70
},
{
"epoch": 0.28817858954845255,
"grad_norm": 0.20128794014453888,
"learning_rate": 2.4285108555160577e-05,
"loss": 0.3631,
"step": 71
},
{
"epoch": 0.2922374429223744,
"grad_norm": 0.21520088613033295,
"learning_rate": 2.3180194846605367e-05,
"loss": 0.3809,
"step": 72
},
{
"epoch": 0.2962962962962963,
"grad_norm": 0.2530140280723572,
"learning_rate": 2.2112381607484417e-05,
"loss": 0.4191,
"step": 73
},
{
"epoch": 0.30035514967021815,
"grad_norm": 0.2900885343551636,
"learning_rate": 2.1082913860268765e-05,
"loss": 0.4096,
"step": 74
},
{
"epoch": 0.30441400304414,
"grad_norm": 0.37200847268104553,
"learning_rate": 2.0092991918301108e-05,
"loss": 0.3949,
"step": 75
},
{
"epoch": 0.30441400304414,
"eval_loss": 0.36422646045684814,
"eval_runtime": 3.5086,
"eval_samples_per_second": 14.251,
"eval_steps_per_second": 3.705,
"step": 75
},
{
"epoch": 0.3084728564180619,
"grad_norm": 0.09115864336490631,
"learning_rate": 1.91437699862843e-05,
"loss": 0.2235,
"step": 76
},
{
"epoch": 0.31253170979198375,
"grad_norm": 0.1393779069185257,
"learning_rate": 1.8236354814530112e-05,
"loss": 0.3149,
"step": 77
},
{
"epoch": 0.3165905631659056,
"grad_norm": 0.14699040353298187,
"learning_rate": 1.7371804408538024e-05,
"loss": 0.3251,
"step": 78
},
{
"epoch": 0.3206494165398275,
"grad_norm": 0.1531774252653122,
"learning_rate": 1.6551126795408016e-05,
"loss": 0.3349,
"step": 79
},
{
"epoch": 0.32470826991374935,
"grad_norm": 0.1625107377767563,
"learning_rate": 1.577527884852619e-05,
"loss": 0.3474,
"step": 80
},
{
"epoch": 0.3287671232876712,
"grad_norm": 0.1845567524433136,
"learning_rate": 1.5045165171893116e-05,
"loss": 0.3567,
"step": 81
},
{
"epoch": 0.3328259766615931,
"grad_norm": 0.18067772686481476,
"learning_rate": 1.4361637045396029e-05,
"loss": 0.3791,
"step": 82
},
{
"epoch": 0.33688483003551495,
"grad_norm": 0.20357079803943634,
"learning_rate": 1.3725491432254624e-05,
"loss": 0.3848,
"step": 83
},
{
"epoch": 0.3409436834094368,
"grad_norm": 0.20797663927078247,
"learning_rate": 1.313747004979751e-05,
"loss": 0.3734,
"step": 84
},
{
"epoch": 0.3450025367833587,
"grad_norm": 0.23876087367534637,
"learning_rate": 1.2598258504653081e-05,
"loss": 0.4123,
"step": 85
},
{
"epoch": 0.34906139015728055,
"grad_norm": 0.2859575152397156,
"learning_rate": 1.2108485493362765e-05,
"loss": 0.4115,
"step": 86
},
{
"epoch": 0.3531202435312024,
"grad_norm": 0.3142327666282654,
"learning_rate": 1.1668722069349041e-05,
"loss": 0.3819,
"step": 87
},
{
"epoch": 0.3571790969051243,
"grad_norm": 0.10286320745944977,
"learning_rate": 1.1279480977092635e-05,
"loss": 0.179,
"step": 88
},
{
"epoch": 0.36123795027904615,
"grad_norm": 0.1432906836271286,
"learning_rate": 1.094121605429547e-05,
"loss": 0.3092,
"step": 89
},
{
"epoch": 0.365296803652968,
"grad_norm": 0.15591755509376526,
"learning_rate": 1.0654321702726141e-05,
"loss": 0.3168,
"step": 90
},
{
"epoch": 0.3693556570268899,
"grad_norm": 0.16477596759796143,
"learning_rate": 1.0419132428365116e-05,
"loss": 0.3352,
"step": 91
},
{
"epoch": 0.37341451040081175,
"grad_norm": 0.16704893112182617,
"learning_rate": 1.0235922451385733e-05,
"loss": 0.3432,
"step": 92
},
{
"epoch": 0.3774733637747336,
"grad_norm": 0.1805972009897232,
"learning_rate": 1.0104905386425733e-05,
"loss": 0.3548,
"step": 93
},
{
"epoch": 0.3815322171486555,
"grad_norm": 0.1912754327058792,
"learning_rate": 1.002623399352217e-05,
"loss": 0.3728,
"step": 94
},
{
"epoch": 0.38559107052257735,
"grad_norm": 0.1889467090368271,
"learning_rate": 1e-05,
"loss": 0.3875,
"step": 95
}
],
"logging_steps": 1,
"max_steps": 95,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.1352414048768492e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}