|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9955156950672643, |
|
"eval_steps": 500, |
|
"global_step": 1002, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.029895366218236172, |
|
"grad_norm": 1.7227757373325907, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7996, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.059790732436472344, |
|
"grad_norm": 0.9611203507591377, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7135, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08968609865470852, |
|
"grad_norm": 0.9725569590195154, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6951, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.11958146487294469, |
|
"grad_norm": 3.2811538218803395, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6797, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.14947683109118087, |
|
"grad_norm": 0.8573391570675103, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6784, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.17937219730941703, |
|
"grad_norm": 0.868925104854094, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6581, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.20926756352765322, |
|
"grad_norm": 0.8638395884318166, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6555, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.23916292974588937, |
|
"grad_norm": 0.8066338987622645, |
|
"learning_rate": 5e-06, |
|
"loss": 0.648, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.26905829596412556, |
|
"grad_norm": 0.959241666467243, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6408, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.29895366218236175, |
|
"grad_norm": 0.9081144806308692, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6316, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.32884902840059793, |
|
"grad_norm": 0.527888295759615, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6291, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.35874439461883406, |
|
"grad_norm": 0.530904898441949, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6306, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.38863976083707025, |
|
"grad_norm": 1.340210203403077, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6297, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.41853512705530643, |
|
"grad_norm": 0.6798935279587197, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6174, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.4484304932735426, |
|
"grad_norm": 0.5563075616885166, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6267, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.47832585949177875, |
|
"grad_norm": 0.5882371698166489, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6287, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.5082212257100149, |
|
"grad_norm": 0.8190976883281922, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6291, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.5381165919282511, |
|
"grad_norm": 1.1616952378221264, |
|
"learning_rate": 5e-06, |
|
"loss": 0.622, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.5680119581464873, |
|
"grad_norm": 0.9342456317602823, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6223, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.5979073243647235, |
|
"grad_norm": 0.5634508480434365, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6224, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.6278026905829597, |
|
"grad_norm": 0.5687247783350089, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6165, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.6576980568011959, |
|
"grad_norm": 0.4883258194508368, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6152, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.6875934230194319, |
|
"grad_norm": 0.8486304132789366, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6159, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.7174887892376681, |
|
"grad_norm": 0.6238248908563286, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6162, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.7473841554559043, |
|
"grad_norm": 0.6044745525104007, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6234, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.7772795216741405, |
|
"grad_norm": 0.9451375549677865, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6111, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.8071748878923767, |
|
"grad_norm": 0.45825630250172444, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6192, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.8370702541106129, |
|
"grad_norm": 0.6319201283396348, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6096, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.866965620328849, |
|
"grad_norm": 0.7657293379708856, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6141, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.8968609865470852, |
|
"grad_norm": 0.9893263705993499, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6089, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.9267563527653214, |
|
"grad_norm": 0.7173372190639082, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6111, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.9566517189835575, |
|
"grad_norm": 0.9811853051168589, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6102, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.9865470852017937, |
|
"grad_norm": 0.7112900733743598, |
|
"learning_rate": 5e-06, |
|
"loss": 0.61, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.9985052316890882, |
|
"eval_loss": 0.6036205291748047, |
|
"eval_runtime": 179.7257, |
|
"eval_samples_per_second": 50.143, |
|
"eval_steps_per_second": 0.395, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 1.0164424514200299, |
|
"grad_norm": 0.798448788433312, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5844, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.046337817638266, |
|
"grad_norm": 0.4862066251039951, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5673, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.0762331838565022, |
|
"grad_norm": 0.4993076971258802, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5527, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.1061285500747384, |
|
"grad_norm": 0.5150950615801339, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5634, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.1360239162929746, |
|
"grad_norm": 0.5048692828921545, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5586, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.1659192825112108, |
|
"grad_norm": 0.6753247004760012, |
|
"learning_rate": 5e-06, |
|
"loss": 0.564, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.195814648729447, |
|
"grad_norm": 0.6573750836477811, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5671, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.2257100149476832, |
|
"grad_norm": 0.4776374496819678, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5606, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.2556053811659194, |
|
"grad_norm": 0.6972347006108826, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5623, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.2855007473841553, |
|
"grad_norm": 0.5751682932357886, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5638, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.3153961136023917, |
|
"grad_norm": 0.46990283221273976, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5613, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.3452914798206277, |
|
"grad_norm": 0.48832783516725714, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5618, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.375186846038864, |
|
"grad_norm": 0.577664523498894, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5576, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.4050822122571, |
|
"grad_norm": 1.1296329041134021, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5669, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.4349775784753362, |
|
"grad_norm": 0.5151995683265159, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5574, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.4648729446935724, |
|
"grad_norm": 0.5099370836269055, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5595, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.4947683109118086, |
|
"grad_norm": 1.9533464161877931, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5537, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.5246636771300448, |
|
"grad_norm": 0.48399021315551105, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5495, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.554559043348281, |
|
"grad_norm": 0.6214335190059805, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5613, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.5844544095665172, |
|
"grad_norm": 0.5861564517777224, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5541, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.6143497757847534, |
|
"grad_norm": 0.49297078946119194, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5586, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.6442451420029895, |
|
"grad_norm": 0.5714533139717856, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5593, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.6741405082212257, |
|
"grad_norm": 0.5006114767351605, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5671, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.704035874439462, |
|
"grad_norm": 0.46703065624186, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5598, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.733931240657698, |
|
"grad_norm": 0.7730998357221065, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5634, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.7638266068759343, |
|
"grad_norm": 0.49277366694442, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5605, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.7937219730941703, |
|
"grad_norm": 0.5504433736301172, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5561, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.8236173393124067, |
|
"grad_norm": 0.5967647274743713, |
|
"learning_rate": 5e-06, |
|
"loss": 0.566, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.8535127055306426, |
|
"grad_norm": 0.48890667749726296, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5605, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.883408071748879, |
|
"grad_norm": 0.4629644980754441, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5595, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.913303437967115, |
|
"grad_norm": 0.6662601841176711, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5558, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.9431988041853514, |
|
"grad_norm": 0.49445892345223935, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5656, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.9730941704035874, |
|
"grad_norm": 0.49038356562054847, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5516, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.5952211022377014, |
|
"eval_runtime": 180.2664, |
|
"eval_samples_per_second": 49.993, |
|
"eval_steps_per_second": 0.394, |
|
"step": 669 |
|
}, |
|
{ |
|
"epoch": 2.0029895366218238, |
|
"grad_norm": 0.8976719305214347, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5594, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 2.0328849028400597, |
|
"grad_norm": 0.5732430368808332, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5126, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 2.062780269058296, |
|
"grad_norm": 0.711257434778952, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5066, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 2.092675635276532, |
|
"grad_norm": 0.47578936442907804, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5091, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.1225710014947685, |
|
"grad_norm": 0.6502616890155823, |
|
"learning_rate": 5e-06, |
|
"loss": 0.502, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 2.1524663677130045, |
|
"grad_norm": 0.5259070657358127, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5098, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 2.1823617339312404, |
|
"grad_norm": 0.48835956768596295, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5056, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 2.212257100149477, |
|
"grad_norm": 0.49943490023849413, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5075, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 2.242152466367713, |
|
"grad_norm": 0.5545099395398798, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5119, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.2720478325859492, |
|
"grad_norm": 0.49299741003621683, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5117, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 2.301943198804185, |
|
"grad_norm": 0.6118684934934306, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5072, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 2.3318385650224216, |
|
"grad_norm": 0.5626142080265026, |
|
"learning_rate": 5e-06, |
|
"loss": 0.51, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 2.3617339312406576, |
|
"grad_norm": 0.6010691540792277, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5161, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 2.391629297458894, |
|
"grad_norm": 0.5047015995016646, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5153, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.42152466367713, |
|
"grad_norm": 0.6949616571712588, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5149, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 2.4514200298953663, |
|
"grad_norm": 0.5728721236291187, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5152, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 2.4813153961136023, |
|
"grad_norm": 0.6710964944926828, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5101, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 2.5112107623318387, |
|
"grad_norm": 0.5811240824837944, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5178, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 2.5411061285500747, |
|
"grad_norm": 0.4751656854800782, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5093, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.5710014947683106, |
|
"grad_norm": 0.5791534437598944, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5172, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 2.600896860986547, |
|
"grad_norm": 0.6285700826659869, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5114, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 2.6307922272047835, |
|
"grad_norm": 0.5891524124365762, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5155, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 2.6606875934230194, |
|
"grad_norm": 0.5554272805920556, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5136, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 2.6905829596412554, |
|
"grad_norm": 0.5212686694617318, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5057, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.720478325859492, |
|
"grad_norm": 0.5049518346407917, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5124, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 2.750373692077728, |
|
"grad_norm": 0.5199545471660975, |
|
"learning_rate": 5e-06, |
|
"loss": 0.511, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 2.780269058295964, |
|
"grad_norm": 0.5504296104314776, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5131, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 2.8101644245142, |
|
"grad_norm": 0.6934304296183573, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5141, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 2.8400597907324365, |
|
"grad_norm": 0.5471886063573883, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5085, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.8699551569506725, |
|
"grad_norm": 0.5910070248229388, |
|
"learning_rate": 5e-06, |
|
"loss": 0.511, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 2.899850523168909, |
|
"grad_norm": 0.7084764050035022, |
|
"learning_rate": 5e-06, |
|
"loss": 0.512, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 2.929745889387145, |
|
"grad_norm": 0.4994611316507679, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5083, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 2.9596412556053813, |
|
"grad_norm": 0.5300263474194525, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5077, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 2.9895366218236172, |
|
"grad_norm": 0.4912770545978263, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5082, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.9955156950672643, |
|
"eval_loss": 0.5980154871940613, |
|
"eval_runtime": 180.5721, |
|
"eval_samples_per_second": 49.908, |
|
"eval_steps_per_second": 0.393, |
|
"step": 1002 |
|
}, |
|
{ |
|
"epoch": 2.9955156950672643, |
|
"step": 1002, |
|
"total_flos": 1677968560619520.0, |
|
"train_loss": 0.5696332816354291, |
|
"train_runtime": 30032.1545, |
|
"train_samples_per_second": 17.104, |
|
"train_steps_per_second": 0.033 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1002, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1677968560619520.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|