aleegis09's picture
Training in progress, step 200, checkpoint
bac2587 verified
{
"best_metric": 0.4325978755950928,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.007839909057054938,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 3.919954528527469e-05,
"grad_norm": 0.16042552888393402,
"learning_rate": 5e-06,
"loss": 0.4326,
"step": 1
},
{
"epoch": 3.919954528527469e-05,
"eval_loss": 0.5350391268730164,
"eval_runtime": 2393.8126,
"eval_samples_per_second": 17.948,
"eval_steps_per_second": 8.974,
"step": 1
},
{
"epoch": 7.839909057054938e-05,
"grad_norm": 0.15639245510101318,
"learning_rate": 1e-05,
"loss": 0.5717,
"step": 2
},
{
"epoch": 0.00011759863585582407,
"grad_norm": 0.14500007033348083,
"learning_rate": 1.5e-05,
"loss": 0.5217,
"step": 3
},
{
"epoch": 0.00015679818114109877,
"grad_norm": 0.13646380603313446,
"learning_rate": 2e-05,
"loss": 0.4166,
"step": 4
},
{
"epoch": 0.00019599772642637345,
"grad_norm": 0.14864464104175568,
"learning_rate": 2.5e-05,
"loss": 0.4906,
"step": 5
},
{
"epoch": 0.00023519727171164814,
"grad_norm": 0.15161864459514618,
"learning_rate": 3e-05,
"loss": 0.568,
"step": 6
},
{
"epoch": 0.00027439681699692285,
"grad_norm": 0.14943252503871918,
"learning_rate": 3.5e-05,
"loss": 0.597,
"step": 7
},
{
"epoch": 0.00031359636228219754,
"grad_norm": 0.14887230098247528,
"learning_rate": 4e-05,
"loss": 0.4819,
"step": 8
},
{
"epoch": 0.0003527959075674722,
"grad_norm": 0.18022413551807404,
"learning_rate": 4.5e-05,
"loss": 0.5437,
"step": 9
},
{
"epoch": 0.0003919954528527469,
"grad_norm": 0.18863749504089355,
"learning_rate": 5e-05,
"loss": 0.5148,
"step": 10
},
{
"epoch": 0.0004311949981380216,
"grad_norm": 0.19053728878498077,
"learning_rate": 5.500000000000001e-05,
"loss": 0.6177,
"step": 11
},
{
"epoch": 0.0004703945434232963,
"grad_norm": 0.16379624605178833,
"learning_rate": 6e-05,
"loss": 0.6024,
"step": 12
},
{
"epoch": 0.000509594088708571,
"grad_norm": 0.17053860425949097,
"learning_rate": 6.500000000000001e-05,
"loss": 0.5195,
"step": 13
},
{
"epoch": 0.0005487936339938457,
"grad_norm": 0.1678755134344101,
"learning_rate": 7e-05,
"loss": 0.5825,
"step": 14
},
{
"epoch": 0.0005879931792791204,
"grad_norm": 0.20500046014785767,
"learning_rate": 7.500000000000001e-05,
"loss": 0.6271,
"step": 15
},
{
"epoch": 0.0006271927245643951,
"grad_norm": 0.17075373232364655,
"learning_rate": 8e-05,
"loss": 0.5008,
"step": 16
},
{
"epoch": 0.0006663922698496698,
"grad_norm": 0.15935742855072021,
"learning_rate": 8.5e-05,
"loss": 0.4639,
"step": 17
},
{
"epoch": 0.0007055918151349444,
"grad_norm": 0.182022824883461,
"learning_rate": 9e-05,
"loss": 0.4297,
"step": 18
},
{
"epoch": 0.0007447913604202191,
"grad_norm": 0.16236171126365662,
"learning_rate": 9.5e-05,
"loss": 0.4011,
"step": 19
},
{
"epoch": 0.0007839909057054938,
"grad_norm": 0.18881011009216309,
"learning_rate": 0.0001,
"loss": 0.5249,
"step": 20
},
{
"epoch": 0.0008231904509907685,
"grad_norm": 0.18972285091876984,
"learning_rate": 9.999238475781957e-05,
"loss": 0.4455,
"step": 21
},
{
"epoch": 0.0008623899962760432,
"grad_norm": 0.18663617968559265,
"learning_rate": 9.99695413509548e-05,
"loss": 0.4337,
"step": 22
},
{
"epoch": 0.0009015895415613179,
"grad_norm": 0.1874244064092636,
"learning_rate": 9.99314767377287e-05,
"loss": 0.4353,
"step": 23
},
{
"epoch": 0.0009407890868465926,
"grad_norm": 0.1862955540418625,
"learning_rate": 9.987820251299122e-05,
"loss": 0.5082,
"step": 24
},
{
"epoch": 0.0009799886321318672,
"grad_norm": 0.17161045968532562,
"learning_rate": 9.980973490458728e-05,
"loss": 0.4783,
"step": 25
},
{
"epoch": 0.001019188177417142,
"grad_norm": 0.17733538150787354,
"learning_rate": 9.972609476841367e-05,
"loss": 0.4794,
"step": 26
},
{
"epoch": 0.0010583877227024166,
"grad_norm": 0.17916053533554077,
"learning_rate": 9.962730758206611e-05,
"loss": 0.4799,
"step": 27
},
{
"epoch": 0.0010975872679876914,
"grad_norm": 0.17826522886753082,
"learning_rate": 9.951340343707852e-05,
"loss": 0.4,
"step": 28
},
{
"epoch": 0.001136786813272966,
"grad_norm": 0.19639909267425537,
"learning_rate": 9.938441702975689e-05,
"loss": 0.372,
"step": 29
},
{
"epoch": 0.0011759863585582408,
"grad_norm": 0.19923311471939087,
"learning_rate": 9.924038765061042e-05,
"loss": 0.423,
"step": 30
},
{
"epoch": 0.0012151859038435154,
"grad_norm": 0.20051322877407074,
"learning_rate": 9.908135917238321e-05,
"loss": 0.4233,
"step": 31
},
{
"epoch": 0.0012543854491287902,
"grad_norm": 0.20958496630191803,
"learning_rate": 9.890738003669029e-05,
"loss": 0.4288,
"step": 32
},
{
"epoch": 0.0012935849944140647,
"grad_norm": 0.20028801262378693,
"learning_rate": 9.871850323926177e-05,
"loss": 0.3726,
"step": 33
},
{
"epoch": 0.0013327845396993395,
"grad_norm": 0.20952534675598145,
"learning_rate": 9.851478631379982e-05,
"loss": 0.5309,
"step": 34
},
{
"epoch": 0.001371984084984614,
"grad_norm": 0.21508319675922394,
"learning_rate": 9.829629131445342e-05,
"loss": 0.4658,
"step": 35
},
{
"epoch": 0.001411183630269889,
"grad_norm": 0.2109171450138092,
"learning_rate": 9.806308479691595e-05,
"loss": 0.4026,
"step": 36
},
{
"epoch": 0.0014503831755551635,
"grad_norm": 0.2147839516401291,
"learning_rate": 9.781523779815179e-05,
"loss": 0.3535,
"step": 37
},
{
"epoch": 0.0014895827208404383,
"grad_norm": 0.20986714959144592,
"learning_rate": 9.755282581475769e-05,
"loss": 0.3854,
"step": 38
},
{
"epoch": 0.0015287822661257128,
"grad_norm": 0.22886401414871216,
"learning_rate": 9.727592877996585e-05,
"loss": 0.3524,
"step": 39
},
{
"epoch": 0.0015679818114109876,
"grad_norm": 0.2274533212184906,
"learning_rate": 9.698463103929542e-05,
"loss": 0.3901,
"step": 40
},
{
"epoch": 0.0016071813566962624,
"grad_norm": 0.2675640285015106,
"learning_rate": 9.667902132486009e-05,
"loss": 0.3687,
"step": 41
},
{
"epoch": 0.001646380901981537,
"grad_norm": 0.23128102719783783,
"learning_rate": 9.635919272833938e-05,
"loss": 0.4233,
"step": 42
},
{
"epoch": 0.0016855804472668118,
"grad_norm": 0.22661566734313965,
"learning_rate": 9.602524267262203e-05,
"loss": 0.4152,
"step": 43
},
{
"epoch": 0.0017247799925520864,
"grad_norm": 0.2415810078382492,
"learning_rate": 9.567727288213005e-05,
"loss": 0.3817,
"step": 44
},
{
"epoch": 0.0017639795378373612,
"grad_norm": 0.2653232216835022,
"learning_rate": 9.53153893518325e-05,
"loss": 0.4235,
"step": 45
},
{
"epoch": 0.0018031790831226358,
"grad_norm": 0.2872054874897003,
"learning_rate": 9.493970231495835e-05,
"loss": 0.4208,
"step": 46
},
{
"epoch": 0.0018423786284079105,
"grad_norm": 0.2797979712486267,
"learning_rate": 9.45503262094184e-05,
"loss": 0.4168,
"step": 47
},
{
"epoch": 0.0018815781736931851,
"grad_norm": 0.2640951871871948,
"learning_rate": 9.414737964294636e-05,
"loss": 0.3265,
"step": 48
},
{
"epoch": 0.00192077771897846,
"grad_norm": 0.33999350666999817,
"learning_rate": 9.373098535696979e-05,
"loss": 0.3443,
"step": 49
},
{
"epoch": 0.0019599772642637345,
"grad_norm": 0.49079790711402893,
"learning_rate": 9.330127018922194e-05,
"loss": 0.3703,
"step": 50
},
{
"epoch": 0.0019599772642637345,
"eval_loss": 0.44784459471702576,
"eval_runtime": 2398.5451,
"eval_samples_per_second": 17.913,
"eval_steps_per_second": 8.957,
"step": 50
},
{
"epoch": 0.001999176809549009,
"grad_norm": 0.1652698665857315,
"learning_rate": 9.285836503510562e-05,
"loss": 0.4374,
"step": 51
},
{
"epoch": 0.002038376354834284,
"grad_norm": 0.15927405655384064,
"learning_rate": 9.24024048078213e-05,
"loss": 0.5079,
"step": 52
},
{
"epoch": 0.0020775759001195587,
"grad_norm": 0.13452063500881195,
"learning_rate": 9.193352839727121e-05,
"loss": 0.4709,
"step": 53
},
{
"epoch": 0.0021167754454048332,
"grad_norm": 0.12576071918010712,
"learning_rate": 9.145187862775209e-05,
"loss": 0.5341,
"step": 54
},
{
"epoch": 0.002155974990690108,
"grad_norm": 0.11464632302522659,
"learning_rate": 9.09576022144496e-05,
"loss": 0.4928,
"step": 55
},
{
"epoch": 0.002195174535975383,
"grad_norm": 0.12085549533367157,
"learning_rate": 9.045084971874738e-05,
"loss": 0.5811,
"step": 56
},
{
"epoch": 0.0022343740812606574,
"grad_norm": 0.12558674812316895,
"learning_rate": 8.993177550236464e-05,
"loss": 0.596,
"step": 57
},
{
"epoch": 0.002273573626545932,
"grad_norm": 0.12457916140556335,
"learning_rate": 8.940053768033609e-05,
"loss": 0.5087,
"step": 58
},
{
"epoch": 0.0023127731718312066,
"grad_norm": 0.12604419887065887,
"learning_rate": 8.885729807284856e-05,
"loss": 0.5619,
"step": 59
},
{
"epoch": 0.0023519727171164816,
"grad_norm": 0.1314886063337326,
"learning_rate": 8.83022221559489e-05,
"loss": 0.5879,
"step": 60
},
{
"epoch": 0.002391172262401756,
"grad_norm": 0.12995365262031555,
"learning_rate": 8.773547901113862e-05,
"loss": 0.4307,
"step": 61
},
{
"epoch": 0.0024303718076870307,
"grad_norm": 0.13908788561820984,
"learning_rate": 8.715724127386972e-05,
"loss": 0.538,
"step": 62
},
{
"epoch": 0.0024695713529723057,
"grad_norm": 0.13697293400764465,
"learning_rate": 8.656768508095853e-05,
"loss": 0.4955,
"step": 63
},
{
"epoch": 0.0025087708982575803,
"grad_norm": 0.12941715121269226,
"learning_rate": 8.596699001693255e-05,
"loss": 0.5089,
"step": 64
},
{
"epoch": 0.002547970443542855,
"grad_norm": 0.1320359855890274,
"learning_rate": 8.535533905932738e-05,
"loss": 0.4805,
"step": 65
},
{
"epoch": 0.0025871699888281295,
"grad_norm": 0.13320119678974152,
"learning_rate": 8.473291852294987e-05,
"loss": 0.4316,
"step": 66
},
{
"epoch": 0.0026263695341134045,
"grad_norm": 0.13988560438156128,
"learning_rate": 8.409991800312493e-05,
"loss": 0.5421,
"step": 67
},
{
"epoch": 0.002665569079398679,
"grad_norm": 0.13911867141723633,
"learning_rate": 8.345653031794292e-05,
"loss": 0.4276,
"step": 68
},
{
"epoch": 0.0027047686246839536,
"grad_norm": 0.14803774654865265,
"learning_rate": 8.280295144952536e-05,
"loss": 0.3929,
"step": 69
},
{
"epoch": 0.002743968169969228,
"grad_norm": 0.14860422909259796,
"learning_rate": 8.213938048432697e-05,
"loss": 0.4329,
"step": 70
},
{
"epoch": 0.002783167715254503,
"grad_norm": 0.1481442004442215,
"learning_rate": 8.146601955249188e-05,
"loss": 0.4106,
"step": 71
},
{
"epoch": 0.002822367260539778,
"grad_norm": 0.1563064306974411,
"learning_rate": 8.07830737662829e-05,
"loss": 0.482,
"step": 72
},
{
"epoch": 0.0028615668058250524,
"grad_norm": 0.146785169839859,
"learning_rate": 8.009075115760243e-05,
"loss": 0.4506,
"step": 73
},
{
"epoch": 0.002900766351110327,
"grad_norm": 0.15569761395454407,
"learning_rate": 7.938926261462366e-05,
"loss": 0.4138,
"step": 74
},
{
"epoch": 0.002939965896395602,
"grad_norm": 0.16449235379695892,
"learning_rate": 7.86788218175523e-05,
"loss": 0.5195,
"step": 75
},
{
"epoch": 0.0029791654416808765,
"grad_norm": 0.15461541712284088,
"learning_rate": 7.795964517353735e-05,
"loss": 0.4168,
"step": 76
},
{
"epoch": 0.003018364986966151,
"grad_norm": 0.16411833465099335,
"learning_rate": 7.723195175075136e-05,
"loss": 0.4094,
"step": 77
},
{
"epoch": 0.0030575645322514257,
"grad_norm": 0.1622595489025116,
"learning_rate": 7.649596321166024e-05,
"loss": 0.4647,
"step": 78
},
{
"epoch": 0.0030967640775367007,
"grad_norm": 0.16740792989730835,
"learning_rate": 7.575190374550272e-05,
"loss": 0.4236,
"step": 79
},
{
"epoch": 0.0031359636228219753,
"grad_norm": 0.17070157825946808,
"learning_rate": 7.500000000000001e-05,
"loss": 0.4204,
"step": 80
},
{
"epoch": 0.00317516316810725,
"grad_norm": 0.1682911217212677,
"learning_rate": 7.424048101231686e-05,
"loss": 0.3919,
"step": 81
},
{
"epoch": 0.003214362713392525,
"grad_norm": 0.17516714334487915,
"learning_rate": 7.347357813929454e-05,
"loss": 0.4194,
"step": 82
},
{
"epoch": 0.0032535622586777994,
"grad_norm": 0.17585694789886475,
"learning_rate": 7.269952498697734e-05,
"loss": 0.3488,
"step": 83
},
{
"epoch": 0.003292761803963074,
"grad_norm": 0.16620786488056183,
"learning_rate": 7.191855733945387e-05,
"loss": 0.3378,
"step": 84
},
{
"epoch": 0.0033319613492483486,
"grad_norm": 0.17664961516857147,
"learning_rate": 7.113091308703498e-05,
"loss": 0.3861,
"step": 85
},
{
"epoch": 0.0033711608945336236,
"grad_norm": 0.1843758374452591,
"learning_rate": 7.033683215379002e-05,
"loss": 0.3457,
"step": 86
},
{
"epoch": 0.003410360439818898,
"grad_norm": 0.19849351048469543,
"learning_rate": 6.953655642446368e-05,
"loss": 0.3744,
"step": 87
},
{
"epoch": 0.0034495599851041728,
"grad_norm": 0.1878618746995926,
"learning_rate": 6.873032967079561e-05,
"loss": 0.3847,
"step": 88
},
{
"epoch": 0.0034887595303894473,
"grad_norm": 0.19447298347949982,
"learning_rate": 6.7918397477265e-05,
"loss": 0.425,
"step": 89
},
{
"epoch": 0.0035279590756747223,
"grad_norm": 0.1870015263557434,
"learning_rate": 6.710100716628344e-05,
"loss": 0.3633,
"step": 90
},
{
"epoch": 0.003567158620959997,
"grad_norm": 0.19948124885559082,
"learning_rate": 6.627840772285784e-05,
"loss": 0.3673,
"step": 91
},
{
"epoch": 0.0036063581662452715,
"grad_norm": 0.23523126542568207,
"learning_rate": 6.545084971874738e-05,
"loss": 0.4524,
"step": 92
},
{
"epoch": 0.003645557711530546,
"grad_norm": 0.21212515234947205,
"learning_rate": 6.461858523613684e-05,
"loss": 0.3936,
"step": 93
},
{
"epoch": 0.003684757256815821,
"grad_norm": 0.2363279014825821,
"learning_rate": 6.378186779084995e-05,
"loss": 0.4356,
"step": 94
},
{
"epoch": 0.0037239568021010957,
"grad_norm": 0.21925963461399078,
"learning_rate": 6.294095225512603e-05,
"loss": 0.3833,
"step": 95
},
{
"epoch": 0.0037631563473863702,
"grad_norm": 0.20751087367534637,
"learning_rate": 6.209609477998338e-05,
"loss": 0.336,
"step": 96
},
{
"epoch": 0.003802355892671645,
"grad_norm": 0.2536580264568329,
"learning_rate": 6.124755271719325e-05,
"loss": 0.4993,
"step": 97
},
{
"epoch": 0.00384155543795692,
"grad_norm": 0.25841864943504333,
"learning_rate": 6.0395584540887963e-05,
"loss": 0.3797,
"step": 98
},
{
"epoch": 0.0038807549832421944,
"grad_norm": 0.2994214594364166,
"learning_rate": 5.9540449768827246e-05,
"loss": 0.332,
"step": 99
},
{
"epoch": 0.003919954528527469,
"grad_norm": 0.3704180121421814,
"learning_rate": 5.868240888334653e-05,
"loss": 0.4043,
"step": 100
},
{
"epoch": 0.003919954528527469,
"eval_loss": 0.43801045417785645,
"eval_runtime": 2400.0319,
"eval_samples_per_second": 17.902,
"eval_steps_per_second": 8.951,
"step": 100
},
{
"epoch": 0.0039591540738127436,
"grad_norm": 0.10259481519460678,
"learning_rate": 5.782172325201155e-05,
"loss": 0.4114,
"step": 101
},
{
"epoch": 0.003998353619098018,
"grad_norm": 0.1059873104095459,
"learning_rate": 5.695865504800327e-05,
"loss": 0.4391,
"step": 102
},
{
"epoch": 0.004037553164383294,
"grad_norm": 0.10306891053915024,
"learning_rate": 5.6093467170257374e-05,
"loss": 0.4589,
"step": 103
},
{
"epoch": 0.004076752709668568,
"grad_norm": 0.09604241698980331,
"learning_rate": 5.522642316338268e-05,
"loss": 0.4462,
"step": 104
},
{
"epoch": 0.004115952254953843,
"grad_norm": 0.1092570349574089,
"learning_rate": 5.435778713738292e-05,
"loss": 0.4617,
"step": 105
},
{
"epoch": 0.004155151800239117,
"grad_norm": 0.10691061615943909,
"learning_rate": 5.348782368720626e-05,
"loss": 0.5161,
"step": 106
},
{
"epoch": 0.004194351345524392,
"grad_norm": 0.10758673399686813,
"learning_rate": 5.26167978121472e-05,
"loss": 0.6515,
"step": 107
},
{
"epoch": 0.0042335508908096665,
"grad_norm": 0.10809138417243958,
"learning_rate": 5.174497483512506e-05,
"loss": 0.4775,
"step": 108
},
{
"epoch": 0.004272750436094941,
"grad_norm": 0.10625655204057693,
"learning_rate": 5.0872620321864185e-05,
"loss": 0.534,
"step": 109
},
{
"epoch": 0.004311949981380216,
"grad_norm": 0.11111942678689957,
"learning_rate": 5e-05,
"loss": 0.5024,
"step": 110
},
{
"epoch": 0.004351149526665491,
"grad_norm": 0.10645933449268341,
"learning_rate": 4.912737967813583e-05,
"loss": 0.459,
"step": 111
},
{
"epoch": 0.004390349071950766,
"grad_norm": 0.11491873860359192,
"learning_rate": 4.825502516487497e-05,
"loss": 0.6275,
"step": 112
},
{
"epoch": 0.00442954861723604,
"grad_norm": 0.11639122664928436,
"learning_rate": 4.738320218785281e-05,
"loss": 0.5297,
"step": 113
},
{
"epoch": 0.004468748162521315,
"grad_norm": 0.12297159433364868,
"learning_rate": 4.6512176312793736e-05,
"loss": 0.4937,
"step": 114
},
{
"epoch": 0.004507947707806589,
"grad_norm": 0.11242052167654037,
"learning_rate": 4.564221286261709e-05,
"loss": 0.4692,
"step": 115
},
{
"epoch": 0.004547147253091864,
"grad_norm": 0.1251300573348999,
"learning_rate": 4.477357683661734e-05,
"loss": 0.5091,
"step": 116
},
{
"epoch": 0.0045863467983771385,
"grad_norm": 0.12641409039497375,
"learning_rate": 4.390653282974264e-05,
"loss": 0.539,
"step": 117
},
{
"epoch": 0.004625546343662413,
"grad_norm": 0.1277226060628891,
"learning_rate": 4.3041344951996746e-05,
"loss": 0.5769,
"step": 118
},
{
"epoch": 0.0046647458889476886,
"grad_norm": 0.12659917771816254,
"learning_rate": 4.2178276747988446e-05,
"loss": 0.3997,
"step": 119
},
{
"epoch": 0.004703945434232963,
"grad_norm": 0.1406412273645401,
"learning_rate": 4.131759111665349e-05,
"loss": 0.4552,
"step": 120
},
{
"epoch": 0.004743144979518238,
"grad_norm": 0.1345863938331604,
"learning_rate": 4.045955023117276e-05,
"loss": 0.4727,
"step": 121
},
{
"epoch": 0.004782344524803512,
"grad_norm": 0.13614259660243988,
"learning_rate": 3.960441545911204e-05,
"loss": 0.4239,
"step": 122
},
{
"epoch": 0.004821544070088787,
"grad_norm": 0.13695776462554932,
"learning_rate": 3.875244728280676e-05,
"loss": 0.3773,
"step": 123
},
{
"epoch": 0.0048607436153740614,
"grad_norm": 0.14491674304008484,
"learning_rate": 3.790390522001662e-05,
"loss": 0.39,
"step": 124
},
{
"epoch": 0.004899943160659336,
"grad_norm": 0.14301298558712006,
"learning_rate": 3.705904774487396e-05,
"loss": 0.3741,
"step": 125
},
{
"epoch": 0.0049391427059446115,
"grad_norm": 0.14596934616565704,
"learning_rate": 3.6218132209150045e-05,
"loss": 0.4123,
"step": 126
},
{
"epoch": 0.004978342251229886,
"grad_norm": 0.12781678140163422,
"learning_rate": 3.5381414763863166e-05,
"loss": 0.3334,
"step": 127
},
{
"epoch": 0.005017541796515161,
"grad_norm": 0.1531793177127838,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.3903,
"step": 128
},
{
"epoch": 0.005056741341800435,
"grad_norm": 0.1685943901538849,
"learning_rate": 3.372159227714218e-05,
"loss": 0.514,
"step": 129
},
{
"epoch": 0.00509594088708571,
"grad_norm": 0.15798459947109222,
"learning_rate": 3.289899283371657e-05,
"loss": 0.4733,
"step": 130
},
{
"epoch": 0.005135140432370984,
"grad_norm": 0.16000813245773315,
"learning_rate": 3.2081602522734986e-05,
"loss": 0.4515,
"step": 131
},
{
"epoch": 0.005174339977656259,
"grad_norm": 0.15483549237251282,
"learning_rate": 3.12696703292044e-05,
"loss": 0.3787,
"step": 132
},
{
"epoch": 0.0052135395229415335,
"grad_norm": 0.18001963198184967,
"learning_rate": 3.046344357553632e-05,
"loss": 0.4393,
"step": 133
},
{
"epoch": 0.005252739068226809,
"grad_norm": 0.1790371835231781,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.3586,
"step": 134
},
{
"epoch": 0.0052919386135120835,
"grad_norm": 0.1700044572353363,
"learning_rate": 2.886908691296504e-05,
"loss": 0.4281,
"step": 135
},
{
"epoch": 0.005331138158797358,
"grad_norm": 0.1701839715242386,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.2861,
"step": 136
},
{
"epoch": 0.005370337704082633,
"grad_norm": 0.17230786383152008,
"learning_rate": 2.7300475013022663e-05,
"loss": 0.3672,
"step": 137
},
{
"epoch": 0.005409537249367907,
"grad_norm": 0.19381862878799438,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.4293,
"step": 138
},
{
"epoch": 0.005448736794653182,
"grad_norm": 0.17442116141319275,
"learning_rate": 2.575951898768315e-05,
"loss": 0.356,
"step": 139
},
{
"epoch": 0.005487936339938456,
"grad_norm": 0.18775367736816406,
"learning_rate": 2.500000000000001e-05,
"loss": 0.3817,
"step": 140
},
{
"epoch": 0.005527135885223731,
"grad_norm": 0.20612426102161407,
"learning_rate": 2.4248096254497288e-05,
"loss": 0.433,
"step": 141
},
{
"epoch": 0.005566335430509006,
"grad_norm": 0.19330035150051117,
"learning_rate": 2.350403678833976e-05,
"loss": 0.4553,
"step": 142
},
{
"epoch": 0.005605534975794281,
"grad_norm": 0.19368550181388855,
"learning_rate": 2.2768048249248648e-05,
"loss": 0.4106,
"step": 143
},
{
"epoch": 0.005644734521079556,
"grad_norm": 0.21053840219974518,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.3231,
"step": 144
},
{
"epoch": 0.00568393406636483,
"grad_norm": 0.2086849808692932,
"learning_rate": 2.132117818244771e-05,
"loss": 0.368,
"step": 145
},
{
"epoch": 0.005723133611650105,
"grad_norm": 0.20636354386806488,
"learning_rate": 2.061073738537635e-05,
"loss": 0.3489,
"step": 146
},
{
"epoch": 0.005762333156935379,
"grad_norm": 0.23584240674972534,
"learning_rate": 1.9909248842397584e-05,
"loss": 0.4045,
"step": 147
},
{
"epoch": 0.005801532702220654,
"grad_norm": 0.2612581253051758,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.39,
"step": 148
},
{
"epoch": 0.005840732247505929,
"grad_norm": 0.2916366159915924,
"learning_rate": 1.8533980447508137e-05,
"loss": 0.4611,
"step": 149
},
{
"epoch": 0.005879931792791204,
"grad_norm": 0.429953396320343,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.3297,
"step": 150
},
{
"epoch": 0.005879931792791204,
"eval_loss": 0.43333882093429565,
"eval_runtime": 2400.7314,
"eval_samples_per_second": 17.897,
"eval_steps_per_second": 8.949,
"step": 150
},
{
"epoch": 0.0059191313380764785,
"grad_norm": 0.08383464813232422,
"learning_rate": 1.7197048550474643e-05,
"loss": 0.4218,
"step": 151
},
{
"epoch": 0.005958330883361753,
"grad_norm": 0.08397100120782852,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.4163,
"step": 152
},
{
"epoch": 0.005997530428647028,
"grad_norm": 0.08535988628864288,
"learning_rate": 1.5900081996875083e-05,
"loss": 0.4975,
"step": 153
},
{
"epoch": 0.006036729973932302,
"grad_norm": 0.08967166393995285,
"learning_rate": 1.526708147705013e-05,
"loss": 0.4722,
"step": 154
},
{
"epoch": 0.006075929519217577,
"grad_norm": 0.09084314107894897,
"learning_rate": 1.4644660940672627e-05,
"loss": 0.428,
"step": 155
},
{
"epoch": 0.006115129064502851,
"grad_norm": 0.10152707993984222,
"learning_rate": 1.4033009983067452e-05,
"loss": 0.5041,
"step": 156
},
{
"epoch": 0.006154328609788127,
"grad_norm": 0.10302560031414032,
"learning_rate": 1.3432314919041478e-05,
"loss": 0.4389,
"step": 157
},
{
"epoch": 0.006193528155073401,
"grad_norm": 0.10672393441200256,
"learning_rate": 1.2842758726130283e-05,
"loss": 0.5093,
"step": 158
},
{
"epoch": 0.006232727700358676,
"grad_norm": 0.11030876636505127,
"learning_rate": 1.22645209888614e-05,
"loss": 0.5054,
"step": 159
},
{
"epoch": 0.0062719272456439506,
"grad_norm": 0.12015864998102188,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.5693,
"step": 160
},
{
"epoch": 0.006311126790929225,
"grad_norm": 0.1129220575094223,
"learning_rate": 1.1142701927151456e-05,
"loss": 0.5293,
"step": 161
},
{
"epoch": 0.0063503263362145,
"grad_norm": 0.11352799832820892,
"learning_rate": 1.0599462319663905e-05,
"loss": 0.4903,
"step": 162
},
{
"epoch": 0.006389525881499774,
"grad_norm": 0.13408681750297546,
"learning_rate": 1.006822449763537e-05,
"loss": 0.5941,
"step": 163
},
{
"epoch": 0.00642872542678505,
"grad_norm": 0.12811416387557983,
"learning_rate": 9.549150281252633e-06,
"loss": 0.5844,
"step": 164
},
{
"epoch": 0.006467924972070324,
"grad_norm": 0.12660935521125793,
"learning_rate": 9.042397785550405e-06,
"loss": 0.4857,
"step": 165
},
{
"epoch": 0.006507124517355599,
"grad_norm": 0.12374024838209152,
"learning_rate": 8.548121372247918e-06,
"loss": 0.5069,
"step": 166
},
{
"epoch": 0.0065463240626408735,
"grad_norm": 0.1355339139699936,
"learning_rate": 8.066471602728803e-06,
"loss": 0.616,
"step": 167
},
{
"epoch": 0.006585523607926148,
"grad_norm": 0.1416226476430893,
"learning_rate": 7.597595192178702e-06,
"loss": 0.5935,
"step": 168
},
{
"epoch": 0.006624723153211423,
"grad_norm": 0.12920598685741425,
"learning_rate": 7.1416349648943894e-06,
"loss": 0.5472,
"step": 169
},
{
"epoch": 0.006663922698496697,
"grad_norm": 0.1332990676164627,
"learning_rate": 6.698729810778065e-06,
"loss": 0.3977,
"step": 170
},
{
"epoch": 0.006703122243781972,
"grad_norm": 0.1304275095462799,
"learning_rate": 6.269014643030213e-06,
"loss": 0.4423,
"step": 171
},
{
"epoch": 0.006742321789067247,
"grad_norm": 0.13337342441082,
"learning_rate": 5.852620357053651e-06,
"loss": 0.3449,
"step": 172
},
{
"epoch": 0.006781521334352522,
"grad_norm": 0.14355811476707458,
"learning_rate": 5.449673790581611e-06,
"loss": 0.3999,
"step": 173
},
{
"epoch": 0.006820720879637796,
"grad_norm": 0.1402300000190735,
"learning_rate": 5.060297685041659e-06,
"loss": 0.4032,
"step": 174
},
{
"epoch": 0.006859920424923071,
"grad_norm": 0.14491398632526398,
"learning_rate": 4.684610648167503e-06,
"loss": 0.3728,
"step": 175
},
{
"epoch": 0.0068991199702083455,
"grad_norm": 0.1472439020872116,
"learning_rate": 4.322727117869951e-06,
"loss": 0.3707,
"step": 176
},
{
"epoch": 0.00693831951549362,
"grad_norm": 0.14669029414653778,
"learning_rate": 3.974757327377981e-06,
"loss": 0.4789,
"step": 177
},
{
"epoch": 0.006977519060778895,
"grad_norm": 0.15367992222309113,
"learning_rate": 3.6408072716606346e-06,
"loss": 0.3934,
"step": 178
},
{
"epoch": 0.007016718606064169,
"grad_norm": 0.14292077720165253,
"learning_rate": 3.3209786751399187e-06,
"loss": 0.3769,
"step": 179
},
{
"epoch": 0.007055918151349445,
"grad_norm": 0.14761218428611755,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.3591,
"step": 180
},
{
"epoch": 0.007095117696634719,
"grad_norm": 0.16149510443210602,
"learning_rate": 2.724071220034158e-06,
"loss": 0.4567,
"step": 181
},
{
"epoch": 0.007134317241919994,
"grad_norm": 0.1627466082572937,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.3955,
"step": 182
},
{
"epoch": 0.007173516787205268,
"grad_norm": 0.16270063817501068,
"learning_rate": 2.1847622018482283e-06,
"loss": 0.2732,
"step": 183
},
{
"epoch": 0.007212716332490543,
"grad_norm": 0.17279773950576782,
"learning_rate": 1.9369152030840556e-06,
"loss": 0.3798,
"step": 184
},
{
"epoch": 0.007251915877775818,
"grad_norm": 0.17936405539512634,
"learning_rate": 1.70370868554659e-06,
"loss": 0.409,
"step": 185
},
{
"epoch": 0.007291115423061092,
"grad_norm": 0.1672297567129135,
"learning_rate": 1.4852136862001764e-06,
"loss": 0.3581,
"step": 186
},
{
"epoch": 0.007330314968346368,
"grad_norm": 0.1915307492017746,
"learning_rate": 1.2814967607382432e-06,
"loss": 0.4487,
"step": 187
},
{
"epoch": 0.007369514513631642,
"grad_norm": 0.17655663192272186,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.3467,
"step": 188
},
{
"epoch": 0.007408714058916917,
"grad_norm": 0.17965379357337952,
"learning_rate": 9.186408276168013e-07,
"loss": 0.3978,
"step": 189
},
{
"epoch": 0.007447913604202191,
"grad_norm": 0.1944323629140854,
"learning_rate": 7.596123493895991e-07,
"loss": 0.3575,
"step": 190
},
{
"epoch": 0.007487113149487466,
"grad_norm": 0.2105947881937027,
"learning_rate": 6.15582970243117e-07,
"loss": 0.3813,
"step": 191
},
{
"epoch": 0.0075263126947727405,
"grad_norm": 0.19838747382164001,
"learning_rate": 4.865965629214819e-07,
"loss": 0.4062,
"step": 192
},
{
"epoch": 0.007565512240058015,
"grad_norm": 0.18956390023231506,
"learning_rate": 3.7269241793390085e-07,
"loss": 0.3249,
"step": 193
},
{
"epoch": 0.00760471178534329,
"grad_norm": 0.20137059688568115,
"learning_rate": 2.7390523158633554e-07,
"loss": 0.3665,
"step": 194
},
{
"epoch": 0.007643911330628565,
"grad_norm": 0.22965259850025177,
"learning_rate": 1.9026509541272275e-07,
"loss": 0.3771,
"step": 195
},
{
"epoch": 0.00768311087591384,
"grad_norm": 0.23276779055595398,
"learning_rate": 1.2179748700879012e-07,
"loss": 0.3388,
"step": 196
},
{
"epoch": 0.007722310421199114,
"grad_norm": 0.23765169084072113,
"learning_rate": 6.852326227130834e-08,
"loss": 0.3759,
"step": 197
},
{
"epoch": 0.007761509966484389,
"grad_norm": 0.3065294027328491,
"learning_rate": 3.04586490452119e-08,
"loss": 0.3336,
"step": 198
},
{
"epoch": 0.007800709511769663,
"grad_norm": 0.2954770028591156,
"learning_rate": 7.615242180436522e-09,
"loss": 0.3031,
"step": 199
},
{
"epoch": 0.007839909057054938,
"grad_norm": 0.38077908754348755,
"learning_rate": 0.0,
"loss": 0.3565,
"step": 200
},
{
"epoch": 0.007839909057054938,
"eval_loss": 0.4325978755950928,
"eval_runtime": 2401.2634,
"eval_samples_per_second": 17.893,
"eval_steps_per_second": 8.947,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.2178385512326758e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}