|
{ |
|
"best_metric": 0.6350438594818115, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.48661800486618007, |
|
"eval_steps": 50, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.006488240064882401, |
|
"grad_norm": 4.320489406585693, |
|
"learning_rate": 5e-06, |
|
"loss": 2.9741, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.006488240064882401, |
|
"eval_loss": 3.211296558380127, |
|
"eval_runtime": 82.1902, |
|
"eval_samples_per_second": 6.315, |
|
"eval_steps_per_second": 0.791, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.012976480129764802, |
|
"grad_norm": 4.5113701820373535, |
|
"learning_rate": 1e-05, |
|
"loss": 3.0388, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.019464720194647202, |
|
"grad_norm": 4.6927714347839355, |
|
"learning_rate": 1.5e-05, |
|
"loss": 2.9837, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.025952960259529603, |
|
"grad_norm": 4.680801868438721, |
|
"learning_rate": 2e-05, |
|
"loss": 3.2726, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.032441200324412, |
|
"grad_norm": 5.522418022155762, |
|
"learning_rate": 2.5e-05, |
|
"loss": 3.4348, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.038929440389294405, |
|
"grad_norm": 4.993494987487793, |
|
"learning_rate": 3e-05, |
|
"loss": 3.0498, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0454176804541768, |
|
"grad_norm": 4.6689133644104, |
|
"learning_rate": 3.5e-05, |
|
"loss": 2.6851, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.05190592051905921, |
|
"grad_norm": 5.748671054840088, |
|
"learning_rate": 4e-05, |
|
"loss": 2.7684, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.058394160583941604, |
|
"grad_norm": 6.775564670562744, |
|
"learning_rate": 4.5e-05, |
|
"loss": 2.3901, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.064882400648824, |
|
"grad_norm": 6.568486213684082, |
|
"learning_rate": 5e-05, |
|
"loss": 1.9956, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.07137064071370641, |
|
"grad_norm": 4.80515718460083, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 1.5494, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.07785888077858881, |
|
"grad_norm": 4.092294692993164, |
|
"learning_rate": 6e-05, |
|
"loss": 1.2444, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.08434712084347121, |
|
"grad_norm": 2.87087345123291, |
|
"learning_rate": 6.500000000000001e-05, |
|
"loss": 1.0718, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0908353609083536, |
|
"grad_norm": 4.096825122833252, |
|
"learning_rate": 7e-05, |
|
"loss": 1.0977, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.09732360097323602, |
|
"grad_norm": 4.781303405761719, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.98, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.10381184103811841, |
|
"grad_norm": 3.2860639095306396, |
|
"learning_rate": 8e-05, |
|
"loss": 0.9196, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.11030008110300081, |
|
"grad_norm": 2.540112257003784, |
|
"learning_rate": 8.5e-05, |
|
"loss": 0.7389, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.11678832116788321, |
|
"grad_norm": 2.675224542617798, |
|
"learning_rate": 9e-05, |
|
"loss": 0.6798, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.12327656123276562, |
|
"grad_norm": 2.5746631622314453, |
|
"learning_rate": 9.5e-05, |
|
"loss": 0.6886, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.129764801297648, |
|
"grad_norm": 2.6324894428253174, |
|
"learning_rate": 0.0001, |
|
"loss": 0.8362, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.1362530413625304, |
|
"grad_norm": 2.355058193206787, |
|
"learning_rate": 9.991845519630678e-05, |
|
"loss": 0.8848, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.14274128142741282, |
|
"grad_norm": 2.648463010787964, |
|
"learning_rate": 9.967408676742751e-05, |
|
"loss": 0.6325, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.1492295214922952, |
|
"grad_norm": 2.4602627754211426, |
|
"learning_rate": 9.926769179238466e-05, |
|
"loss": 0.7505, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.15571776155717762, |
|
"grad_norm": 1.898608922958374, |
|
"learning_rate": 9.870059584711668e-05, |
|
"loss": 0.6155, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.16220600162206, |
|
"grad_norm": 1.9833636283874512, |
|
"learning_rate": 9.797464868072488e-05, |
|
"loss": 0.5744, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.16869424168694241, |
|
"grad_norm": 2.158081293106079, |
|
"learning_rate": 9.709221818197624e-05, |
|
"loss": 0.695, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.17518248175182483, |
|
"grad_norm": 1.8228775262832642, |
|
"learning_rate": 9.60561826557425e-05, |
|
"loss": 0.638, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.1816707218167072, |
|
"grad_norm": 1.7093753814697266, |
|
"learning_rate": 9.486992143456792e-05, |
|
"loss": 0.6839, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.18815896188158962, |
|
"grad_norm": 1.9845768213272095, |
|
"learning_rate": 9.353730385598887e-05, |
|
"loss": 0.6063, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.19464720194647203, |
|
"grad_norm": 2.0481371879577637, |
|
"learning_rate": 9.206267664155907e-05, |
|
"loss": 0.7076, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.20113544201135442, |
|
"grad_norm": 1.6540613174438477, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.6592, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.20762368207623683, |
|
"grad_norm": 1.7876536846160889, |
|
"learning_rate": 8.870708053195413e-05, |
|
"loss": 0.6407, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.2141119221411192, |
|
"grad_norm": 1.8859717845916748, |
|
"learning_rate": 8.683705689382024e-05, |
|
"loss": 0.7916, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.22060016220600162, |
|
"grad_norm": 1.7074021100997925, |
|
"learning_rate": 8.484687843276469e-05, |
|
"loss": 0.482, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.22708840227088403, |
|
"grad_norm": 1.5708434581756592, |
|
"learning_rate": 8.274303669726426e-05, |
|
"loss": 0.5889, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.23357664233576642, |
|
"grad_norm": 1.7154409885406494, |
|
"learning_rate": 8.053239398177191e-05, |
|
"loss": 0.8398, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.24006488240064883, |
|
"grad_norm": 1.9261146783828735, |
|
"learning_rate": 7.822216094333847e-05, |
|
"loss": 0.8207, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.24655312246553124, |
|
"grad_norm": 1.4875516891479492, |
|
"learning_rate": 7.58198730819481e-05, |
|
"loss": 0.5842, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.25304136253041365, |
|
"grad_norm": 1.426688551902771, |
|
"learning_rate": 7.333336616128369e-05, |
|
"loss": 0.5933, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.259529602595296, |
|
"grad_norm": 1.401323914527893, |
|
"learning_rate": 7.077075065009433e-05, |
|
"loss": 0.5723, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.2660178426601784, |
|
"grad_norm": 1.319161057472229, |
|
"learning_rate": 6.814038526753205e-05, |
|
"loss": 0.5813, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.2725060827250608, |
|
"grad_norm": 1.4045952558517456, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 0.5935, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.27899432278994324, |
|
"grad_norm": 1.974134087562561, |
|
"learning_rate": 6.271091670967436e-05, |
|
"loss": 0.6298, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.28548256285482565, |
|
"grad_norm": 1.4510246515274048, |
|
"learning_rate": 5.992952333228728e-05, |
|
"loss": 0.5674, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.291970802919708, |
|
"grad_norm": 1.4368536472320557, |
|
"learning_rate": 5.7115741913664264e-05, |
|
"loss": 0.7143, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.2984590429845904, |
|
"grad_norm": 1.5513255596160889, |
|
"learning_rate": 5.427875042394199e-05, |
|
"loss": 0.5459, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.30494728304947283, |
|
"grad_norm": 1.2531628608703613, |
|
"learning_rate": 5.142780253968481e-05, |
|
"loss": 0.6001, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.31143552311435524, |
|
"grad_norm": 1.6614936590194702, |
|
"learning_rate": 4.85721974603152e-05, |
|
"loss": 0.7361, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.31792376317923765, |
|
"grad_norm": 1.7235716581344604, |
|
"learning_rate": 4.5721249576058027e-05, |
|
"loss": 0.7437, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.32441200324412, |
|
"grad_norm": 1.3541579246520996, |
|
"learning_rate": 4.288425808633575e-05, |
|
"loss": 0.6751, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.32441200324412, |
|
"eval_loss": 0.6350438594818115, |
|
"eval_runtime": 82.5647, |
|
"eval_samples_per_second": 6.286, |
|
"eval_steps_per_second": 0.787, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.3309002433090024, |
|
"grad_norm": 1.1093209981918335, |
|
"learning_rate": 4.007047666771274e-05, |
|
"loss": 0.4294, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.33738848337388483, |
|
"grad_norm": 1.7794939279556274, |
|
"learning_rate": 3.728908329032567e-05, |
|
"loss": 0.5775, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.34387672343876724, |
|
"grad_norm": 1.381375789642334, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 0.6621, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.35036496350364965, |
|
"grad_norm": 1.4836828708648682, |
|
"learning_rate": 3.1859614732467954e-05, |
|
"loss": 0.7448, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.35685320356853206, |
|
"grad_norm": 1.253775954246521, |
|
"learning_rate": 2.9229249349905684e-05, |
|
"loss": 0.5938, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.3633414436334144, |
|
"grad_norm": 1.5513309240341187, |
|
"learning_rate": 2.6666633838716314e-05, |
|
"loss": 0.6217, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.36982968369829683, |
|
"grad_norm": 1.777928352355957, |
|
"learning_rate": 2.418012691805191e-05, |
|
"loss": 0.5749, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.37631792376317924, |
|
"grad_norm": 1.605820655822754, |
|
"learning_rate": 2.1777839056661554e-05, |
|
"loss": 0.6827, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.38280616382806165, |
|
"grad_norm": 1.2288376092910767, |
|
"learning_rate": 1.946760601822809e-05, |
|
"loss": 0.5258, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.38929440389294406, |
|
"grad_norm": 1.3616653680801392, |
|
"learning_rate": 1.725696330273575e-05, |
|
"loss": 0.5963, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.3957826439578264, |
|
"grad_norm": 1.4875752925872803, |
|
"learning_rate": 1.5153121567235335e-05, |
|
"loss": 0.6351, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.40227088402270883, |
|
"grad_norm": 1.4477332830429077, |
|
"learning_rate": 1.3162943106179749e-05, |
|
"loss": 0.5471, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.40875912408759124, |
|
"grad_norm": 1.8846336603164673, |
|
"learning_rate": 1.1292919468045877e-05, |
|
"loss": 0.7482, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.41524736415247365, |
|
"grad_norm": 1.851898431777954, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 0.6356, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.42173560421735606, |
|
"grad_norm": 1.6405341625213623, |
|
"learning_rate": 7.937323358440935e-06, |
|
"loss": 0.7012, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.4282238442822384, |
|
"grad_norm": 1.5401231050491333, |
|
"learning_rate": 6.462696144011149e-06, |
|
"loss": 0.5817, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.43471208434712083, |
|
"grad_norm": 1.2834601402282715, |
|
"learning_rate": 5.13007856543209e-06, |
|
"loss": 0.6662, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.44120032441200324, |
|
"grad_norm": 1.7023755311965942, |
|
"learning_rate": 3.9438173442575e-06, |
|
"loss": 0.7074, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.44768856447688565, |
|
"grad_norm": 1.2349250316619873, |
|
"learning_rate": 2.9077818180237693e-06, |
|
"loss": 0.5058, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.45417680454176806, |
|
"grad_norm": 1.5026576519012451, |
|
"learning_rate": 2.0253513192751373e-06, |
|
"loss": 0.7246, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.4606650446066504, |
|
"grad_norm": 1.2308121919631958, |
|
"learning_rate": 1.2994041528833266e-06, |
|
"loss": 0.7091, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.46715328467153283, |
|
"grad_norm": 1.6818972826004028, |
|
"learning_rate": 7.323082076153509e-07, |
|
"loss": 0.6945, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.47364152473641524, |
|
"grad_norm": 1.253867268562317, |
|
"learning_rate": 3.2591323257248893e-07, |
|
"loss": 0.5318, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.48012976480129765, |
|
"grad_norm": 1.5630402565002441, |
|
"learning_rate": 8.15448036932176e-08, |
|
"loss": 0.7055, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.48661800486618007, |
|
"grad_norm": 1.377374291419983, |
|
"learning_rate": 0.0, |
|
"loss": 0.5693, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.451323701362688e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|