PEFT
Safetensors
qwen2
alignment-handbook
trl
dpo
Generated from Trainer
khongtrunght's picture
Model save
844f260 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9995419147961521,
"eval_steps": 100,
"global_step": 1091,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0009161704076958314,
"grad_norm": 2.538351535797119,
"learning_rate": 9.09090909090909e-09,
"logits/chosen": -0.612647533416748,
"logits/rejected": -0.43005383014678955,
"logps/chosen": -269.1338195800781,
"logps/rejected": -265.996826171875,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.009161704076958314,
"grad_norm": 3.99981427192688,
"learning_rate": 9.09090909090909e-08,
"logits/chosen": -0.7947888374328613,
"logits/rejected": -0.8272799849510193,
"logps/chosen": -172.9255828857422,
"logps/rejected": -192.15106201171875,
"loss": 0.6942,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.012904312461614609,
"rewards/margins": -0.015133652836084366,
"rewards/rejected": 0.002229340374469757,
"step": 10
},
{
"epoch": 0.01832340815391663,
"grad_norm": 1.3273310661315918,
"learning_rate": 1.818181818181818e-07,
"logits/chosen": -0.7671724557876587,
"logits/rejected": -0.7643235921859741,
"logps/chosen": -177.4793243408203,
"logps/rejected": -201.07656860351562,
"loss": 0.6933,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.006979703903198242,
"rewards/margins": -0.00390571728348732,
"rewards/rejected": -0.003073985455557704,
"step": 20
},
{
"epoch": 0.027485112230874943,
"grad_norm": 2.06701922416687,
"learning_rate": 2.727272727272727e-07,
"logits/chosen": -0.7477768659591675,
"logits/rejected": -0.770847737789154,
"logps/chosen": -219.7743377685547,
"logps/rejected": -253.83798217773438,
"loss": 0.6933,
"rewards/accuracies": 0.574999988079071,
"rewards/chosen": -0.007790957577526569,
"rewards/margins": 0.0053612953051924706,
"rewards/rejected": -0.013152251951396465,
"step": 30
},
{
"epoch": 0.03664681630783326,
"grad_norm": 2.176619052886963,
"learning_rate": 3.636363636363636e-07,
"logits/chosen": -0.6840143203735352,
"logits/rejected": -0.6969764828681946,
"logps/chosen": -159.89796447753906,
"logps/rejected": -218.1041717529297,
"loss": 0.6902,
"rewards/accuracies": 0.375,
"rewards/chosen": 0.0005666827782988548,
"rewards/margins": 0.012707608751952648,
"rewards/rejected": -0.012140927836298943,
"step": 40
},
{
"epoch": 0.04580852038479157,
"grad_norm": 2.182387590408325,
"learning_rate": 4.545454545454545e-07,
"logits/chosen": -0.5698288083076477,
"logits/rejected": -0.7136921286582947,
"logps/chosen": -203.5851287841797,
"logps/rejected": -231.3826446533203,
"loss": 0.6878,
"rewards/accuracies": 0.550000011920929,
"rewards/chosen": 0.01093050092458725,
"rewards/margins": 0.021884005516767502,
"rewards/rejected": -0.010953502729535103,
"step": 50
},
{
"epoch": 0.054970224461749886,
"grad_norm": 1.757957935333252,
"learning_rate": 5.454545454545454e-07,
"logits/chosen": -0.7015949487686157,
"logits/rejected": -0.7727667689323425,
"logps/chosen": -233.5804901123047,
"logps/rejected": -278.2530212402344,
"loss": 0.6876,
"rewards/accuracies": 0.44999998807907104,
"rewards/chosen": 0.021485041826963425,
"rewards/margins": 0.0006370929768308997,
"rewards/rejected": 0.020847950130701065,
"step": 60
},
{
"epoch": 0.0641319285387082,
"grad_norm": 2.0394017696380615,
"learning_rate": 6.363636363636363e-07,
"logits/chosen": -0.8689748644828796,
"logits/rejected": -0.808820366859436,
"logps/chosen": -147.0337371826172,
"logps/rejected": -209.3056182861328,
"loss": 0.6815,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": 0.05839823558926582,
"rewards/margins": 0.03576263412833214,
"rewards/rejected": 0.022635603323578835,
"step": 70
},
{
"epoch": 0.07329363261566652,
"grad_norm": 3.2475733757019043,
"learning_rate": 7.272727272727272e-07,
"logits/chosen": -0.7972738742828369,
"logits/rejected": -0.8733490109443665,
"logps/chosen": -202.31976318359375,
"logps/rejected": -230.676513671875,
"loss": 0.6706,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.0555371418595314,
"rewards/margins": 0.04019620642066002,
"rewards/rejected": 0.015340929850935936,
"step": 80
},
{
"epoch": 0.08245533669262482,
"grad_norm": 2.1004042625427246,
"learning_rate": 8.181818181818182e-07,
"logits/chosen": -0.7756220698356628,
"logits/rejected": -0.7883769869804382,
"logps/chosen": -181.07911682128906,
"logps/rejected": -253.35226440429688,
"loss": 0.657,
"rewards/accuracies": 0.574999988079071,
"rewards/chosen": 0.09145340323448181,
"rewards/margins": 0.05497417598962784,
"rewards/rejected": 0.03647923097014427,
"step": 90
},
{
"epoch": 0.09161704076958314,
"grad_norm": 1.711387276649475,
"learning_rate": 9.09090909090909e-07,
"logits/chosen": -0.6988880634307861,
"logits/rejected": -0.695970356464386,
"logps/chosen": -210.9344940185547,
"logps/rejected": -292.63885498046875,
"loss": 0.6437,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": 0.2389262169599533,
"rewards/margins": 0.13863424956798553,
"rewards/rejected": 0.10029196739196777,
"step": 100
},
{
"epoch": 0.09161704076958314,
"eval_logits/chosen": -0.7206078171730042,
"eval_logits/rejected": -0.7319283485412598,
"eval_logps/chosen": -228.31155395507812,
"eval_logps/rejected": -265.4963073730469,
"eval_loss": 0.6128209829330444,
"eval_rewards/accuracies": 0.7254335284233093,
"eval_rewards/chosen": 0.30496734380722046,
"eval_rewards/margins": 0.23110172152519226,
"eval_rewards/rejected": 0.0738656222820282,
"eval_runtime": 264.1865,
"eval_samples_per_second": 10.459,
"eval_steps_per_second": 1.31,
"step": 100
},
{
"epoch": 0.10077874484654145,
"grad_norm": 2.3657712936401367,
"learning_rate": 1e-06,
"logits/chosen": -0.9458168745040894,
"logits/rejected": -0.912136435508728,
"logps/chosen": -266.46087646484375,
"logps/rejected": -255.1190643310547,
"loss": 0.6226,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": 0.24092476069927216,
"rewards/margins": 0.15844842791557312,
"rewards/rejected": 0.08247633278369904,
"step": 110
},
{
"epoch": 0.10994044892349977,
"grad_norm": 1.905785083770752,
"learning_rate": 9.997436315234263e-07,
"logits/chosen": -0.7388381958007812,
"logits/rejected": -0.7873945236206055,
"logps/chosen": -167.81883239746094,
"logps/rejected": -176.68295288085938,
"loss": 0.6136,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 0.3122307062149048,
"rewards/margins": 0.20933406054973602,
"rewards/rejected": 0.10289661586284637,
"step": 120
},
{
"epoch": 0.11910215300045808,
"grad_norm": 1.5484569072723389,
"learning_rate": 9.989747889928883e-07,
"logits/chosen": -0.7524275183677673,
"logits/rejected": -0.8292320370674133,
"logps/chosen": -197.0650177001953,
"logps/rejected": -237.57839965820312,
"loss": 0.5938,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 0.5554567575454712,
"rewards/margins": 0.4092523455619812,
"rewards/rejected": 0.14620442688465118,
"step": 130
},
{
"epoch": 0.1282638570774164,
"grad_norm": 1.4368153810501099,
"learning_rate": 9.976942608363393e-07,
"logits/chosen": -0.632690966129303,
"logits/rejected": -0.7571207284927368,
"logps/chosen": -173.34666442871094,
"logps/rejected": -211.22317504882812,
"loss": 0.5709,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 0.58498615026474,
"rewards/margins": 0.40937310457229614,
"rewards/rejected": 0.17561307549476624,
"step": 140
},
{
"epoch": 0.1374255611543747,
"grad_norm": 1.5536640882492065,
"learning_rate": 9.9590336020199e-07,
"logits/chosen": -0.6405803561210632,
"logits/rejected": -0.7340038418769836,
"logps/chosen": -182.8199462890625,
"logps/rejected": -243.0964813232422,
"loss": 0.5731,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 0.6845705509185791,
"rewards/margins": 0.4665129780769348,
"rewards/rejected": 0.2180575579404831,
"step": 150
},
{
"epoch": 0.14658726523133303,
"grad_norm": 1.1569141149520874,
"learning_rate": 9.936039236117095e-07,
"logits/chosen": -0.8644550442695618,
"logits/rejected": -0.8035632967948914,
"logps/chosen": -200.20794677734375,
"logps/rejected": -239.16915893554688,
"loss": 0.544,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.826056957244873,
"rewards/margins": 0.5113142132759094,
"rewards/rejected": 0.3147428035736084,
"step": 160
},
{
"epoch": 0.15574896930829135,
"grad_norm": 1.5735410451889038,
"learning_rate": 9.907983090777206e-07,
"logits/chosen": -0.7325465083122253,
"logits/rejected": -0.7175777554512024,
"logps/chosen": -195.422607421875,
"logps/rejected": -211.19418334960938,
"loss": 0.5275,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 0.7453492879867554,
"rewards/margins": 0.558306872844696,
"rewards/rejected": 0.1870425045490265,
"step": 170
},
{
"epoch": 0.16491067338524965,
"grad_norm": 1.4354184865951538,
"learning_rate": 9.874893936845187e-07,
"logits/chosen": -0.6600767374038696,
"logits/rejected": -0.7184512615203857,
"logps/chosen": -214.3909149169922,
"logps/rejected": -291.69512939453125,
"loss": 0.5309,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 0.8096879124641418,
"rewards/margins": 0.5819981098175049,
"rewards/rejected": 0.2276897132396698,
"step": 180
},
{
"epoch": 0.17407237746220797,
"grad_norm": 1.9378727674484253,
"learning_rate": 9.836805706384983e-07,
"logits/chosen": -0.796898603439331,
"logits/rejected": -0.7903488874435425,
"logps/chosen": -151.75074768066406,
"logps/rejected": -193.89720153808594,
"loss": 0.4979,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 0.9862753748893738,
"rewards/margins": 0.6403971910476685,
"rewards/rejected": 0.3458781838417053,
"step": 190
},
{
"epoch": 0.1832340815391663,
"grad_norm": 1.368870735168457,
"learning_rate": 9.793757457883061e-07,
"logits/chosen": -0.7233944535255432,
"logits/rejected": -0.7641991972923279,
"logps/chosen": -131.52737426757812,
"logps/rejected": -177.9940185546875,
"loss": 0.5175,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.935263454914093,
"rewards/margins": 0.7129745483398438,
"rewards/rejected": 0.2222888469696045,
"step": 200
},
{
"epoch": 0.1832340815391663,
"eval_logits/chosen": -0.7067926526069641,
"eval_logits/rejected": -0.7133929133415222,
"eval_logps/chosen": -211.8815460205078,
"eval_logps/rejected": -261.1459655761719,
"eval_loss": 0.4986709654331207,
"eval_rewards/accuracies": 0.823699414730072,
"eval_rewards/chosen": 1.1264668703079224,
"eval_rewards/margins": 0.8350852131843567,
"eval_rewards/rejected": 0.29138168692588806,
"eval_runtime": 253.2388,
"eval_samples_per_second": 10.911,
"eval_steps_per_second": 1.366,
"step": 200
},
{
"epoch": 0.1923957856161246,
"grad_norm": 1.181227207183838,
"learning_rate": 9.745793336194975e-07,
"logits/chosen": -0.744937539100647,
"logits/rejected": -0.753220796585083,
"logps/chosen": -157.19473266601562,
"logps/rejected": -238.6236114501953,
"loss": 0.485,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 0.9487007260322571,
"rewards/margins": 0.5822451710700989,
"rewards/rejected": 0.366455614566803,
"step": 210
},
{
"epoch": 0.2015574896930829,
"grad_norm": 2.322333812713623,
"learning_rate": 9.69296252727595e-07,
"logits/chosen": -0.7686578035354614,
"logits/rejected": -0.7208544015884399,
"logps/chosen": -161.76914978027344,
"logps/rejected": -203.6949005126953,
"loss": 0.486,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 1.1700208187103271,
"rewards/margins": 0.8085041046142578,
"rewards/rejected": 0.36151671409606934,
"step": 220
},
{
"epoch": 0.21071919377004122,
"grad_norm": 3.2851109504699707,
"learning_rate": 9.63531920774199e-07,
"logits/chosen": -0.8623464703559875,
"logits/rejected": -0.864261269569397,
"logps/chosen": -132.91622924804688,
"logps/rejected": -190.5310516357422,
"loss": 0.4889,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 1.2793647050857544,
"rewards/margins": 0.8540099859237671,
"rewards/rejected": 0.4253547787666321,
"step": 230
},
{
"epoch": 0.21988089784699955,
"grad_norm": 2.092069387435913,
"learning_rate": 9.572922489313142e-07,
"logits/chosen": -0.8346965909004211,
"logits/rejected": -0.8602925539016724,
"logps/chosen": -177.9022216796875,
"logps/rejected": -209.01220703125,
"loss": 0.4294,
"rewards/accuracies": 0.75,
"rewards/chosen": 1.2863993644714355,
"rewards/margins": 0.5576584935188293,
"rewards/rejected": 0.7287408709526062,
"step": 240
},
{
"epoch": 0.22904260192395787,
"grad_norm": 1.2932628393173218,
"learning_rate": 9.505836358195993e-07,
"logits/chosen": -0.7524776458740234,
"logits/rejected": -0.8281770944595337,
"logps/chosen": -144.74905395507812,
"logps/rejected": -226.6658172607422,
"loss": 0.4293,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 1.3644707202911377,
"rewards/margins": 0.8554983139038086,
"rewards/rejected": 0.5089724659919739,
"step": 250
},
{
"epoch": 0.23820430600091616,
"grad_norm": 1.3950133323669434,
"learning_rate": 9.434129609467483e-07,
"logits/chosen": -0.6876672506332397,
"logits/rejected": -0.6876662969589233,
"logps/chosen": -263.18585205078125,
"logps/rejected": -264.4542541503906,
"loss": 0.4518,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 1.4175243377685547,
"rewards/margins": 0.8856536746025085,
"rewards/rejected": 0.5318707227706909,
"step": 260
},
{
"epoch": 0.24736601007787448,
"grad_norm": 1.4819687604904175,
"learning_rate": 9.357875776527333e-07,
"logits/chosen": -0.6820667386054993,
"logits/rejected": -0.6322587132453918,
"logps/chosen": -173.28085327148438,
"logps/rejected": -196.96194458007812,
"loss": 0.4433,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.1709938049316406,
"rewards/margins": 0.7361399531364441,
"rewards/rejected": 0.4348538815975189,
"step": 270
},
{
"epoch": 0.2565277141548328,
"grad_norm": 1.2729604244232178,
"learning_rate": 9.27715305569148e-07,
"logits/chosen": -0.6576655507087708,
"logits/rejected": -0.6413074731826782,
"logps/chosen": -159.5121307373047,
"logps/rejected": -207.47195434570312,
"loss": 0.4029,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.8569538593292236,
"rewards/margins": 1.4031217098236084,
"rewards/rejected": 0.45383185148239136,
"step": 280
},
{
"epoch": 0.2656894182317911,
"grad_norm": 1.4309152364730835,
"learning_rate": 9.192044226003788e-07,
"logits/chosen": -0.7165778875350952,
"logits/rejected": -0.7207854390144348,
"logps/chosen": -171.72616577148438,
"logps/rejected": -212.6331024169922,
"loss": 0.4505,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 1.8912508487701416,
"rewards/margins": 1.2686805725097656,
"rewards/rejected": 0.6225701570510864,
"step": 290
},
{
"epoch": 0.2748511223087494,
"grad_norm": 1.5633419752120972,
"learning_rate": 9.102636564348294e-07,
"logits/chosen": -0.5978932976722717,
"logits/rejected": -0.7183943390846252,
"logps/chosen": -173.94984436035156,
"logps/rejected": -202.29295349121094,
"loss": 0.3903,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.945762276649475,
"rewards/margins": 1.3720638751983643,
"rewards/rejected": 0.5736981630325317,
"step": 300
},
{
"epoch": 0.2748511223087494,
"eval_logits/chosen": -0.6642194390296936,
"eval_logits/rejected": -0.6700440049171448,
"eval_logps/chosen": -199.8172607421875,
"eval_logps/rejected": -257.196044921875,
"eval_loss": 0.4279369115829468,
"eval_rewards/accuracies": 0.8468208312988281,
"eval_rewards/chosen": 1.7296818494796753,
"eval_rewards/margins": 1.240803837776184,
"eval_rewards/rejected": 0.48887789249420166,
"eval_runtime": 253.1385,
"eval_samples_per_second": 10.915,
"eval_steps_per_second": 1.367,
"step": 300
},
{
"epoch": 0.28401282638570774,
"grad_norm": 1.3390766382217407,
"learning_rate": 9.009021755949051e-07,
"logits/chosen": -0.6982103586196899,
"logits/rejected": -0.7174701690673828,
"logps/chosen": -159.46409606933594,
"logps/rejected": -160.19491577148438,
"loss": 0.4083,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 1.7644752264022827,
"rewards/margins": 1.0205423831939697,
"rewards/rejected": 0.7439330220222473,
"step": 310
},
{
"epoch": 0.29317453046266606,
"grad_norm": 1.3620954751968384,
"learning_rate": 8.911295800349314e-07,
"logits/chosen": -0.6473032832145691,
"logits/rejected": -0.6676048040390015,
"logps/chosen": -232.72787475585938,
"logps/rejected": -252.5911407470703,
"loss": 0.4178,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.7409747838974,
"rewards/margins": 1.134825348854065,
"rewards/rejected": 0.6061495542526245,
"step": 320
},
{
"epoch": 0.3023362345396244,
"grad_norm": 0.9761648774147034,
"learning_rate": 8.809558912966519e-07,
"logits/chosen": -0.6019878387451172,
"logits/rejected": -0.6780513525009155,
"logps/chosen": -134.56356811523438,
"logps/rejected": -185.90939331054688,
"loss": 0.354,
"rewards/accuracies": 0.875,
"rewards/chosen": 2.3775978088378906,
"rewards/margins": 1.7786915302276611,
"rewards/rejected": 0.5989062786102295,
"step": 330
},
{
"epoch": 0.3114979386165827,
"grad_norm": 1.6643030643463135,
"learning_rate": 8.703915422323984e-07,
"logits/chosen": -0.5226669907569885,
"logits/rejected": -0.5020606517791748,
"logps/chosen": -184.17132568359375,
"logps/rejected": -203.7013397216797,
"loss": 0.4022,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.763214111328125,
"rewards/margins": 1.1016991138458252,
"rewards/rejected": 0.661514937877655,
"step": 340
},
{
"epoch": 0.320659642693541,
"grad_norm": 1.045599102973938,
"learning_rate": 8.594473663064734e-07,
"logits/chosen": -0.7285621762275696,
"logits/rejected": -0.7740557193756104,
"logps/chosen": -133.10691833496094,
"logps/rejected": -191.98646545410156,
"loss": 0.3784,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.8301376104354858,
"rewards/margins": 1.3312700986862183,
"rewards/rejected": 0.49886733293533325,
"step": 350
},
{
"epoch": 0.3298213467704993,
"grad_norm": 2.110759973526001,
"learning_rate": 8.481345864857146e-07,
"logits/chosen": -0.5418592095375061,
"logits/rejected": -0.588280975818634,
"logps/chosen": -179.9706573486328,
"logps/rejected": -242.3267822265625,
"loss": 0.401,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 1.6830288171768188,
"rewards/margins": 1.2477144002914429,
"rewards/rejected": 0.43531447649002075,
"step": 360
},
{
"epoch": 0.3389830508474576,
"grad_norm": 1.335124135017395,
"learning_rate": 8.36464803730636e-07,
"logits/chosen": -0.8127607107162476,
"logits/rejected": -0.8397665023803711,
"logps/chosen": -143.43109130859375,
"logps/rejected": -185.8011932373047,
"loss": 0.3572,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.7833560705184937,
"rewards/margins": 1.1723896265029907,
"rewards/rejected": 0.6109665632247925,
"step": 370
},
{
"epoch": 0.34814475492441593,
"grad_norm": 1.2357772588729858,
"learning_rate": 8.244499850989451e-07,
"logits/chosen": -0.7481725811958313,
"logits/rejected": -0.756425678730011,
"logps/chosen": -117.90687561035156,
"logps/rejected": -198.514892578125,
"loss": 0.3691,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.9100993871688843,
"rewards/margins": 1.3691816329956055,
"rewards/rejected": 0.5409177541732788,
"step": 380
},
{
"epoch": 0.35730645900137425,
"grad_norm": 1.2337630987167358,
"learning_rate": 8.121024514736377e-07,
"logits/chosen": -0.5506582260131836,
"logits/rejected": -0.6542531251907349,
"logps/chosen": -116.30818176269531,
"logps/rejected": -186.07508850097656,
"loss": 0.3132,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 2.1005172729492188,
"rewards/margins": 1.9795573949813843,
"rewards/rejected": 0.12095997482538223,
"step": 390
},
{
"epoch": 0.3664681630783326,
"grad_norm": 1.261998176574707,
"learning_rate": 7.994348649282532e-07,
"logits/chosen": -0.6467943787574768,
"logits/rejected": -0.6284207701683044,
"logps/chosen": -167.55819702148438,
"logps/rejected": -237.6490936279297,
"loss": 0.3712,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 1.9129873514175415,
"rewards/margins": 1.4685484170913696,
"rewards/rejected": 0.4444388747215271,
"step": 400
},
{
"epoch": 0.3664681630783326,
"eval_logits/chosen": -0.6691383123397827,
"eval_logits/rejected": -0.6756234765052795,
"eval_logps/chosen": -199.86724853515625,
"eval_logps/rejected": -262.46453857421875,
"eval_loss": 0.37812188267707825,
"eval_rewards/accuracies": 0.8468208312988281,
"eval_rewards/chosen": 1.7271815538406372,
"eval_rewards/margins": 1.5017303228378296,
"eval_rewards/rejected": 0.22545117139816284,
"eval_runtime": 253.3553,
"eval_samples_per_second": 10.906,
"eval_steps_per_second": 1.366,
"step": 400
},
{
"epoch": 0.3756298671552909,
"grad_norm": 1.2864240407943726,
"learning_rate": 7.8646021574225e-07,
"logits/chosen": -0.5721285343170166,
"logits/rejected": -0.5845418572425842,
"logps/chosen": -163.110595703125,
"logps/rejected": -217.90377807617188,
"loss": 0.3708,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 2.0738914012908936,
"rewards/margins": 1.6767698526382446,
"rewards/rejected": 0.397121787071228,
"step": 410
},
{
"epoch": 0.3847915712322492,
"grad_norm": 1.066874623298645,
"learning_rate": 7.731918090798113e-07,
"logits/chosen": -0.6550859212875366,
"logits/rejected": -0.6960932016372681,
"logps/chosen": -151.92108154296875,
"logps/rejected": -187.91384887695312,
"loss": 0.3284,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.930013656616211,
"rewards/margins": 1.5344407558441162,
"rewards/rejected": 0.39557284116744995,
"step": 420
},
{
"epoch": 0.39395327530920754,
"grad_norm": 1.4810634851455688,
"learning_rate": 7.596432513457482e-07,
"logits/chosen": -0.7274349927902222,
"logits/rejected": -0.7099400162696838,
"logps/chosen": -145.75698852539062,
"logps/rejected": -185.64678955078125,
"loss": 0.3359,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.9275859594345093,
"rewards/margins": 1.4946165084838867,
"rewards/rejected": 0.43296942114830017,
"step": 430
},
{
"epoch": 0.4031149793861658,
"grad_norm": 1.650376319885254,
"learning_rate": 7.458284362324842e-07,
"logits/chosen": -0.5438752174377441,
"logits/rejected": -0.6026032567024231,
"logps/chosen": -132.947021484375,
"logps/rejected": -219.47055053710938,
"loss": 0.3276,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 1.8638055324554443,
"rewards/margins": 2.066131353378296,
"rewards/rejected": -0.20232591032981873,
"step": 440
},
{
"epoch": 0.4122766834631241,
"grad_norm": 0.992289125919342,
"learning_rate": 7.317615304724387e-07,
"logits/chosen": -0.6501020193099976,
"logits/rejected": -0.680503249168396,
"logps/chosen": -157.24925231933594,
"logps/rejected": -180.8325958251953,
"loss": 0.3249,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.7174543142318726,
"rewards/margins": 1.594322681427002,
"rewards/rejected": 0.12313172966241837,
"step": 450
},
{
"epoch": 0.42143838754008245,
"grad_norm": 1.2098772525787354,
"learning_rate": 7.174569593104108e-07,
"logits/chosen": -0.7602720260620117,
"logits/rejected": -0.7695900201797485,
"logps/chosen": -174.65504455566406,
"logps/rejected": -225.5537567138672,
"loss": 0.3396,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.8735862970352173,
"rewards/margins": 1.4029169082641602,
"rewards/rejected": 0.47066912055015564,
"step": 460
},
{
"epoch": 0.43060009161704077,
"grad_norm": 1.896801471710205,
"learning_rate": 7.029293917108677e-07,
"logits/chosen": -0.6379343271255493,
"logits/rejected": -0.629081130027771,
"logps/chosen": -264.4695739746094,
"logps/rejected": -247.133056640625,
"loss": 0.3216,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 1.6404482126235962,
"rewards/margins": 1.7144851684570312,
"rewards/rejected": -0.07403700053691864,
"step": 470
},
{
"epoch": 0.4397617956939991,
"grad_norm": 2.521409273147583,
"learning_rate": 6.881937253153051e-07,
"logits/chosen": -0.7469202876091003,
"logits/rejected": -0.7621224522590637,
"logps/chosen": -165.92147827148438,
"logps/rejected": -223.8385772705078,
"loss": 0.3246,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 2.0734469890594482,
"rewards/margins": 2.012188673019409,
"rewards/rejected": 0.061258465051651,
"step": 480
},
{
"epoch": 0.4489234997709574,
"grad_norm": 1.2489752769470215,
"learning_rate": 6.732650711651031e-07,
"logits/chosen": -0.5696443319320679,
"logits/rejected": -0.618468165397644,
"logps/chosen": -189.3000030517578,
"logps/rejected": -246.3811798095703,
"loss": 0.2909,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.7483714818954468,
"rewards/margins": 2.0019850730895996,
"rewards/rejected": -0.2536138892173767,
"step": 490
},
{
"epoch": 0.45808520384791573,
"grad_norm": 1.3916680812835693,
"learning_rate": 6.581587382055491e-07,
"logits/chosen": -0.761835515499115,
"logits/rejected": -0.7702199220657349,
"logps/chosen": -151.83370971679688,
"logps/rejected": -229.975341796875,
"loss": 0.3064,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.8061788082122803,
"rewards/margins": 1.7005802392959595,
"rewards/rejected": 0.10559873282909393,
"step": 500
},
{
"epoch": 0.45808520384791573,
"eval_logits/chosen": -0.6487900018692017,
"eval_logits/rejected": -0.6642398834228516,
"eval_logps/chosen": -199.9703826904297,
"eval_logps/rejected": -267.3388671875,
"eval_loss": 0.347669780254364,
"eval_rewards/accuracies": 0.8612716794013977,
"eval_rewards/chosen": 1.7220263481140137,
"eval_rewards/margins": 1.7402905225753784,
"eval_rewards/rejected": -0.01826408877968788,
"eval_runtime": 253.4005,
"eval_samples_per_second": 10.904,
"eval_steps_per_second": 1.365,
"step": 500
},
{
"epoch": 0.467246907924874,
"grad_norm": 1.3511228561401367,
"learning_rate": 6.428902175869126e-07,
"logits/chosen": -0.680508553981781,
"logits/rejected": -0.6774734258651733,
"logps/chosen": -177.41610717773438,
"logps/rejected": -221.32138061523438,
"loss": 0.3258,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 2.022366762161255,
"rewards/margins": 2.05668044090271,
"rewards/rejected": -0.03431398794054985,
"step": 510
},
{
"epoch": 0.4764086120018323,
"grad_norm": 0.9841328263282776,
"learning_rate": 6.274751667786761e-07,
"logits/chosen": -0.6339150667190552,
"logits/rejected": -0.5782631635665894,
"logps/chosen": -230.2734832763672,
"logps/rejected": -311.7682800292969,
"loss": 0.3229,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.7927030324935913,
"rewards/margins": 1.7812907695770264,
"rewards/rejected": 0.011412340216338634,
"step": 520
},
{
"epoch": 0.48557031607879064,
"grad_norm": 2.7643489837646484,
"learning_rate": 6.119293935132075e-07,
"logits/chosen": -0.6203776597976685,
"logits/rejected": -0.6682835817337036,
"logps/chosen": -151.23324584960938,
"logps/rejected": -187.77114868164062,
"loss": 0.3034,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.8519824743270874,
"rewards/margins": 2.0718624591827393,
"rewards/rejected": -0.21987971663475037,
"step": 530
},
{
"epoch": 0.49473202015574896,
"grad_norm": 1.3088020086288452,
"learning_rate": 5.962688395753437e-07,
"logits/chosen": -0.8648042678833008,
"logits/rejected": -0.9114105105400085,
"logps/chosen": -137.6197052001953,
"logps/rejected": -205.31900024414062,
"loss": 0.2938,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.5214287042617798,
"rewards/margins": 1.6869144439697266,
"rewards/rejected": -0.16548602283000946,
"step": 540
},
{
"epoch": 0.5038937242327073,
"grad_norm": 1.9471220970153809,
"learning_rate": 5.80509564454506e-07,
"logits/chosen": -0.7066371440887451,
"logits/rejected": -0.6964636445045471,
"logps/chosen": -106.696533203125,
"logps/rejected": -200.95729064941406,
"loss": 0.311,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 1.6337581872940063,
"rewards/margins": 1.6309497356414795,
"rewards/rejected": 0.0028083801735192537,
"step": 550
},
{
"epoch": 0.5130554283096656,
"grad_norm": 1.3545409440994263,
"learning_rate": 5.646677288761132e-07,
"logits/chosen": -0.653856098651886,
"logits/rejected": -0.7129830121994019,
"logps/chosen": -153.89035034179688,
"logps/rejected": -214.0993194580078,
"loss": 0.3112,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.5801527500152588,
"rewards/margins": 1.731884241104126,
"rewards/rejected": -0.1517314463853836,
"step": 560
},
{
"epoch": 0.5222171323866239,
"grad_norm": 2.128307819366455,
"learning_rate": 5.487595782291784e-07,
"logits/chosen": -0.6990654468536377,
"logits/rejected": -0.7342058420181274,
"logps/chosen": -178.05380249023438,
"logps/rejected": -225.2575225830078,
"loss": 0.289,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 1.7597728967666626,
"rewards/margins": 1.7312393188476562,
"rewards/rejected": 0.02853367291390896,
"step": 570
},
{
"epoch": 0.5313788364635822,
"grad_norm": 1.2450644969940186,
"learning_rate": 5.328014259070878e-07,
"logits/chosen": -0.619064211845398,
"logits/rejected": -0.6336346864700317,
"logps/chosen": -188.67752075195312,
"logps/rejected": -231.69503784179688,
"loss": 0.3388,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.4915143251419067,
"rewards/margins": 1.5344158411026,
"rewards/rejected": -0.042901456356048584,
"step": 580
},
{
"epoch": 0.5405405405405406,
"grad_norm": 1.2011312246322632,
"learning_rate": 5.168096365786402e-07,
"logits/chosen": -0.7081841230392456,
"logits/rejected": -0.7206417918205261,
"logps/chosen": -163.11752319335938,
"logps/rejected": -235.26065063476562,
"loss": 0.311,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": 1.9438568353652954,
"rewards/margins": 2.347649574279785,
"rewards/rejected": -0.4037927985191345,
"step": 590
},
{
"epoch": 0.5497022446174988,
"grad_norm": 1.7624154090881348,
"learning_rate": 5.008006094065069e-07,
"logits/chosen": -0.721166729927063,
"logits/rejected": -0.7850608229637146,
"logps/chosen": -165.5583953857422,
"logps/rejected": -213.85855102539062,
"loss": 0.3054,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.5962111949920654,
"rewards/margins": 1.4221036434173584,
"rewards/rejected": 0.174107626080513,
"step": 600
},
{
"epoch": 0.5497022446174988,
"eval_logits/chosen": -0.6406525373458862,
"eval_logits/rejected": -0.6576036214828491,
"eval_logps/chosen": -201.4723358154297,
"eval_logps/rejected": -270.9281311035156,
"eval_loss": 0.3270590603351593,
"eval_rewards/accuracies": 0.8670520186424255,
"eval_rewards/chosen": 1.6469277143478394,
"eval_rewards/margins": 1.8446547985076904,
"eval_rewards/rejected": -0.1977270245552063,
"eval_runtime": 253.7625,
"eval_samples_per_second": 10.888,
"eval_steps_per_second": 1.363,
"step": 600
},
{
"epoch": 0.5588639486944572,
"grad_norm": 1.902051329612732,
"learning_rate": 4.847907612303182e-07,
"logits/chosen": -0.7130570411682129,
"logits/rejected": -0.7378814816474915,
"logps/chosen": -188.9144287109375,
"logps/rejected": -259.7423400878906,
"loss": 0.3009,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 1.5585817098617554,
"rewards/margins": 1.3624755144119263,
"rewards/rejected": 0.19610631465911865,
"step": 610
},
{
"epoch": 0.5680256527714155,
"grad_norm": 1.07755446434021,
"learning_rate": 4.687965097316223e-07,
"logits/chosen": -0.5912365317344666,
"logits/rejected": -0.7409440875053406,
"logps/chosen": -126.41873931884766,
"logps/rejected": -238.76437377929688,
"loss": 0.275,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.855369210243225,
"rewards/margins": 2.4938321113586426,
"rewards/rejected": -0.638463020324707,
"step": 620
},
{
"epoch": 0.5771873568483737,
"grad_norm": 1.3470137119293213,
"learning_rate": 4.5283425659798175e-07,
"logits/chosen": -0.8164669275283813,
"logits/rejected": -0.8073943853378296,
"logps/chosen": -201.78501892089844,
"logps/rejected": -291.32183837890625,
"loss": 0.3179,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.852560043334961,
"rewards/margins": 1.8420225381851196,
"rewards/rejected": 0.010537643916904926,
"step": 630
},
{
"epoch": 0.5863490609253321,
"grad_norm": 1.2435747385025024,
"learning_rate": 4.3692037070347123e-07,
"logits/chosen": -0.6459102630615234,
"logits/rejected": -0.6654535531997681,
"logps/chosen": -139.29067993164062,
"logps/rejected": -212.83740234375,
"loss": 0.3016,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 1.6537023782730103,
"rewards/margins": 2.188789129257202,
"rewards/rejected": -0.5350866913795471,
"step": 640
},
{
"epoch": 0.5955107650022904,
"grad_norm": 1.0110821723937988,
"learning_rate": 4.21071171322823e-07,
"logits/chosen": -0.6093601584434509,
"logits/rejected": -0.5892384648323059,
"logps/chosen": -275.14105224609375,
"logps/rejected": -322.5516662597656,
"loss": 0.3134,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 1.5108485221862793,
"rewards/margins": 1.933189034461975,
"rewards/rejected": -0.4223404824733734,
"step": 650
},
{
"epoch": 0.6046724690792488,
"grad_norm": 0.9678570628166199,
"learning_rate": 4.0530291139643755e-07,
"logits/chosen": -0.8226197957992554,
"logits/rejected": -0.8122960925102234,
"logps/chosen": -143.47821044921875,
"logps/rejected": -207.33187866210938,
"loss": 0.2829,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 2.0050506591796875,
"rewards/margins": 2.1615443229675293,
"rewards/rejected": -0.1564939320087433,
"step": 660
},
{
"epoch": 0.613834173156207,
"grad_norm": 1.5877900123596191,
"learning_rate": 3.8963176086341727e-07,
"logits/chosen": -0.6404609084129333,
"logits/rejected": -0.7193113565444946,
"logps/chosen": -163.66485595703125,
"logps/rejected": -219.49063110351562,
"loss": 0.2704,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.613854169845581,
"rewards/margins": 2.1832098960876465,
"rewards/rejected": -0.5693557858467102,
"step": 670
},
{
"epoch": 0.6229958772331654,
"grad_norm": 1.301003098487854,
"learning_rate": 3.7407379007971506e-07,
"logits/chosen": -0.6765211820602417,
"logits/rejected": -0.6541970372200012,
"logps/chosen": -199.01051330566406,
"logps/rejected": -279.47454833984375,
"loss": 0.2896,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.5939905643463135,
"rewards/margins": 2.2719438076019287,
"rewards/rejected": -0.6779531240463257,
"step": 680
},
{
"epoch": 0.6321575813101237,
"grad_norm": 2.2288615703582764,
"learning_rate": 3.586449533384048e-07,
"logits/chosen": -0.6064814925193787,
"logits/rejected": -0.6105560660362244,
"logps/chosen": -137.80624389648438,
"logps/rejected": -186.99191284179688,
"loss": 0.3041,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.5289627313613892,
"rewards/margins": 1.8546040058135986,
"rewards/rejected": -0.32564133405685425,
"step": 690
},
{
"epoch": 0.641319285387082,
"grad_norm": 5.320913314819336,
"learning_rate": 3.433610725089692e-07,
"logits/chosen": -0.7031580209732056,
"logits/rejected": -0.6630051136016846,
"logps/chosen": -185.18197631835938,
"logps/rejected": -280.9058532714844,
"loss": 0.2919,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.8430522680282593,
"rewards/margins": 1.9076077938079834,
"rewards/rejected": -0.06455531716346741,
"step": 700
},
{
"epoch": 0.641319285387082,
"eval_logits/chosen": -0.6672143936157227,
"eval_logits/rejected": -0.6753049492835999,
"eval_logps/chosen": -199.65896606445312,
"eval_logps/rejected": -273.0414123535156,
"eval_loss": 0.3144252896308899,
"eval_rewards/accuracies": 0.8641618490219116,
"eval_rewards/chosen": 1.7375967502593994,
"eval_rewards/margins": 2.0409882068634033,
"eval_rewards/rejected": -0.30339136719703674,
"eval_runtime": 253.1657,
"eval_samples_per_second": 10.914,
"eval_steps_per_second": 1.367,
"step": 700
},
{
"epoch": 0.6504809894640403,
"grad_norm": 1.6359617710113525,
"learning_rate": 3.2823782081238555e-07,
"logits/chosen": -0.7354801893234253,
"logits/rejected": -0.7596527338027954,
"logps/chosen": -145.60165405273438,
"logps/rejected": -207.4108123779297,
"loss": 0.3059,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.8920338153839111,
"rewards/margins": 1.903387427330017,
"rewards/rejected": -0.011353528127074242,
"step": 710
},
{
"epoch": 0.6596426935409986,
"grad_norm": 1.6316208839416504,
"learning_rate": 3.132907067486471e-07,
"logits/chosen": -0.6749696135520935,
"logits/rejected": -0.7063171863555908,
"logps/chosen": -158.86273193359375,
"logps/rejected": -211.850341796875,
"loss": 0.2837,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.6359964609146118,
"rewards/margins": 1.919615387916565,
"rewards/rejected": -0.2836189270019531,
"step": 720
},
{
"epoch": 0.668804397617957,
"grad_norm": 2.493192672729492,
"learning_rate": 2.985350581932005e-07,
"logits/chosen": -0.7640320062637329,
"logits/rejected": -0.7546281814575195,
"logps/chosen": -185.2474822998047,
"logps/rejected": -231.47119140625,
"loss": 0.3068,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 2.423882007598877,
"rewards/margins": 2.5569610595703125,
"rewards/rejected": -0.13307929039001465,
"step": 730
},
{
"epoch": 0.6779661016949152,
"grad_norm": 1.0160990953445435,
"learning_rate": 2.839860066786103e-07,
"logits/chosen": -0.7123746871948242,
"logits/rejected": -0.7270351648330688,
"logps/chosen": -131.58505249023438,
"logps/rejected": -189.05264282226562,
"loss": 0.2778,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.8099651336669922,
"rewards/margins": 2.3398964405059814,
"rewards/rejected": -0.5299314260482788,
"step": 740
},
{
"epoch": 0.6871278057718736,
"grad_norm": 1.4675796031951904,
"learning_rate": 2.6965847187756553e-07,
"logits/chosen": -0.7339153289794922,
"logits/rejected": -0.6953274011611938,
"logps/chosen": -144.21389770507812,
"logps/rejected": -171.4956512451172,
"loss": 0.2746,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 2.1637752056121826,
"rewards/margins": 2.3990566730499268,
"rewards/rejected": -0.2352815419435501,
"step": 750
},
{
"epoch": 0.6962895098488319,
"grad_norm": 2.0078845024108887,
"learning_rate": 2.5556714630314613e-07,
"logits/chosen": -0.783401370048523,
"logits/rejected": -0.854455828666687,
"logps/chosen": -105.17523193359375,
"logps/rejected": -193.60110473632812,
"loss": 0.2727,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 2.012260913848877,
"rewards/margins": 2.8885557651519775,
"rewards/rejected": -0.8762944340705872,
"step": 760
},
{
"epoch": 0.7054512139257902,
"grad_norm": 1.2648169994354248,
"learning_rate": 2.417264802420343e-07,
"logits/chosen": -0.6513649225234985,
"logits/rejected": -0.6898313760757446,
"logps/chosen": -141.83282470703125,
"logps/rejected": -238.6311492919922,
"loss": 0.3154,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 1.7510058879852295,
"rewards/margins": 2.0821216106414795,
"rewards/rejected": -0.33111587166786194,
"step": 770
},
{
"epoch": 0.7146129180027485,
"grad_norm": 2.6904611587524414,
"learning_rate": 2.2815066693612117e-07,
"logits/chosen": -0.7128955721855164,
"logits/rejected": -0.7633499503135681,
"logps/chosen": -155.6310577392578,
"logps/rejected": -210.39285278320312,
"loss": 0.2793,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.7025461196899414,
"rewards/margins": 2.1338300704956055,
"rewards/rejected": -0.4312838613986969,
"step": 780
},
{
"epoch": 0.7237746220797068,
"grad_norm": 2.1170003414154053,
"learning_rate": 2.1485362802770862e-07,
"logits/chosen": -0.6805752515792847,
"logits/rejected": -0.7278204560279846,
"logps/chosen": -207.82858276367188,
"logps/rejected": -320.7551574707031,
"loss": 0.2457,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 1.8095613718032837,
"rewards/margins": 2.516162157058716,
"rewards/rejected": -0.7066007852554321,
"step": 790
},
{
"epoch": 0.7329363261566652,
"grad_norm": 1.6859688758850098,
"learning_rate": 2.018489992832283e-07,
"logits/chosen": -0.7043607831001282,
"logits/rejected": -0.6326649785041809,
"logps/chosen": -189.71005249023438,
"logps/rejected": -247.50723266601562,
"loss": 0.314,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.972821593284607,
"rewards/margins": 2.5004470348358154,
"rewards/rejected": -0.5276254415512085,
"step": 800
},
{
"epoch": 0.7329363261566652,
"eval_logits/chosen": -0.6573572754859924,
"eval_logits/rejected": -0.6684801578521729,
"eval_logps/chosen": -200.33786010742188,
"eval_logps/rejected": -275.4322814941406,
"eval_loss": 0.3056153357028961,
"eval_rewards/accuracies": 0.8670520186424255,
"eval_rewards/chosen": 1.703651785850525,
"eval_rewards/margins": 2.126586437225342,
"eval_rewards/rejected": -0.4229348599910736,
"eval_runtime": 253.4624,
"eval_samples_per_second": 10.901,
"eval_steps_per_second": 1.365,
"step": 800
},
{
"epoch": 0.7420980302336234,
"grad_norm": 1.46702241897583,
"learning_rate": 1.891501166101187e-07,
"logits/chosen": -0.8299296498298645,
"logits/rejected": -0.8194143176078796,
"logps/chosen": -139.70791625976562,
"logps/rejected": -179.2274932861328,
"loss": 0.2988,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.740025281906128,
"rewards/margins": 2.0235095024108887,
"rewards/rejected": -0.28348422050476074,
"step": 810
},
{
"epoch": 0.7512597343105818,
"grad_norm": 2.791546583175659,
"learning_rate": 1.767700023812e-07,
"logits/chosen": -0.7000871896743774,
"logits/rejected": -0.7028741240501404,
"logps/chosen": -174.82821655273438,
"logps/rejected": -264.07470703125,
"loss": 0.272,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 2.0946412086486816,
"rewards/margins": 2.783123016357422,
"rewards/rejected": -0.6884818077087402,
"step": 820
},
{
"epoch": 0.7604214383875401,
"grad_norm": 1.3349977731704712,
"learning_rate": 1.6472135208057125e-07,
"logits/chosen": -0.6306108832359314,
"logits/rejected": -0.698475182056427,
"logps/chosen": -155.9135284423828,
"logps/rejected": -198.2060546875,
"loss": 0.3032,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.55352783203125,
"rewards/margins": 2.126960515975952,
"rewards/rejected": -0.5734325647354126,
"step": 830
},
{
"epoch": 0.7695831424644984,
"grad_norm": 2.1777162551879883,
"learning_rate": 1.530165212847217e-07,
"logits/chosen": -0.7774958610534668,
"logits/rejected": -0.7312562465667725,
"logps/chosen": -141.79075622558594,
"logps/rejected": -190.49310302734375,
"loss": 0.2807,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.8885326385498047,
"rewards/margins": 2.229785442352295,
"rewards/rejected": -0.3412528932094574,
"step": 840
},
{
"epoch": 0.7787448465414567,
"grad_norm": 1.2950890064239502,
"learning_rate": 1.4166751299221003e-07,
"logits/chosen": -0.6935632228851318,
"logits/rejected": -0.681289553642273,
"logps/chosen": -166.78561401367188,
"logps/rejected": -229.1016387939453,
"loss": 0.2802,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.8389393091201782,
"rewards/margins": 3.066588878631592,
"rewards/rejected": -1.2276496887207031,
"step": 850
},
{
"epoch": 0.7879065506184151,
"grad_norm": 1.1921981573104858,
"learning_rate": 1.306859653149025e-07,
"logits/chosen": -0.7318634390830994,
"logits/rejected": -0.7256627082824707,
"logps/chosen": -171.42092895507812,
"logps/rejected": -247.11831665039062,
"loss": 0.2934,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.7239230871200562,
"rewards/margins": 2.5621562004089355,
"rewards/rejected": -0.8382335901260376,
"step": 860
},
{
"epoch": 0.7970682546953733,
"grad_norm": 1.4422613382339478,
"learning_rate": 1.2008313954339305e-07,
"logits/chosen": -0.586450457572937,
"logits/rejected": -0.5815967321395874,
"logps/chosen": -215.48007202148438,
"logps/rejected": -242.58511352539062,
"loss": 0.2549,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 1.6724342107772827,
"rewards/margins": 1.982318639755249,
"rewards/rejected": -0.3098844885826111,
"step": 870
},
{
"epoch": 0.8062299587723316,
"grad_norm": 1.141564130783081,
"learning_rate": 1.098699085988432e-07,
"logits/chosen": -0.7615233659744263,
"logits/rejected": -0.8405082821846008,
"logps/chosen": -163.58689880371094,
"logps/rejected": -256.6318054199219,
"loss": 0.2998,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.7078421115875244,
"rewards/margins": 1.9117094278335571,
"rewards/rejected": -0.20386750996112823,
"step": 880
},
{
"epoch": 0.81539166284929,
"grad_norm": 1.329354166984558,
"learning_rate": 1.0005674588308566e-07,
"logits/chosen": -0.7380274534225464,
"logits/rejected": -0.75429767370224,
"logps/chosen": -136.90902709960938,
"logps/rejected": -224.83151245117188,
"loss": 0.2231,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.7314188480377197,
"rewards/margins": 2.723328113555908,
"rewards/rejected": -0.9919096231460571,
"step": 890
},
{
"epoch": 0.8245533669262483,
"grad_norm": 1.717936396598816,
"learning_rate": 9.065371453842358e-08,
"logits/chosen": -0.670835018157959,
"logits/rejected": -0.6879727244377136,
"logps/chosen": -138.87889099121094,
"logps/rejected": -184.62257385253906,
"loss": 0.3014,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": 2.0553038120269775,
"rewards/margins": 2.568469524383545,
"rewards/rejected": -0.5131659507751465,
"step": 900
},
{
"epoch": 0.8245533669262483,
"eval_logits/chosen": -0.6641213893890381,
"eval_logits/rejected": -0.670166015625,
"eval_logps/chosen": -200.79710388183594,
"eval_logps/rejected": -276.2374267578125,
"eval_loss": 0.3019951581954956,
"eval_rewards/accuracies": 0.8699421882629395,
"eval_rewards/chosen": 1.6806890964508057,
"eval_rewards/margins": 2.1438791751861572,
"eval_rewards/rejected": -0.4631901979446411,
"eval_runtime": 253.6623,
"eval_samples_per_second": 10.892,
"eval_steps_per_second": 1.364,
"step": 900
},
{
"epoch": 0.8337150710032066,
"grad_norm": 1.479974627494812,
"learning_rate": 8.167045712814108e-08,
"logits/chosen": -0.6214216351509094,
"logits/rejected": -0.641666054725647,
"logps/chosen": -177.58828735351562,
"logps/rejected": -273.30755615234375,
"loss": 0.2533,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.7214215993881226,
"rewards/margins": 1.9738502502441406,
"rewards/rejected": -0.2524286210536957,
"step": 910
},
{
"epoch": 0.8428767750801649,
"grad_norm": 2.062375783920288,
"learning_rate": 7.311618574830569e-08,
"logits/chosen": -0.5943703651428223,
"logits/rejected": -0.6128894686698914,
"logps/chosen": -163.90029907226562,
"logps/rejected": -248.54415893554688,
"loss": 0.2868,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 2.0122509002685547,
"rewards/margins": 2.265479803085327,
"rewards/rejected": -0.25322893261909485,
"step": 920
},
{
"epoch": 0.8520384791571233,
"grad_norm": 1.7314789295196533,
"learning_rate": 6.499967258100514e-08,
"logits/chosen": -0.6944621801376343,
"logits/rejected": -0.7938731908798218,
"logps/chosen": -163.20828247070312,
"logps/rejected": -237.55078125,
"loss": 0.2645,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.7433500289916992,
"rewards/margins": 2.302280902862549,
"rewards/rejected": -0.5589307546615601,
"step": 930
},
{
"epoch": 0.8612001832340815,
"grad_norm": 0.9841431379318237,
"learning_rate": 5.732924089870245e-08,
"logits/chosen": -0.46664732694625854,
"logits/rejected": -0.6183963418006897,
"logps/chosen": -208.5711669921875,
"logps/rejected": -259.8545837402344,
"loss": 0.2725,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.87375009059906,
"rewards/margins": 2.2145848274230957,
"rewards/rejected": -0.34083452820777893,
"step": 940
},
{
"epoch": 0.8703618873110398,
"grad_norm": 1.0470377206802368,
"learning_rate": 5.011275652893782e-08,
"logits/chosen": -0.6211365461349487,
"logits/rejected": -0.6796506643295288,
"logps/chosen": -152.32177734375,
"logps/rejected": -218.6947479248047,
"loss": 0.2395,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.6267896890640259,
"rewards/margins": 2.076831102371216,
"rewards/rejected": -0.45004144310951233,
"step": 950
},
{
"epoch": 0.8795235913879982,
"grad_norm": 1.149141550064087,
"learning_rate": 4.3357619788127634e-08,
"logits/chosen": -0.6848306655883789,
"logits/rejected": -0.7928074598312378,
"logps/chosen": -200.9459228515625,
"logps/rejected": -254.53952026367188,
"loss": 0.2671,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 1.663435697555542,
"rewards/margins": 1.9824724197387695,
"rewards/rejected": -0.3190363943576813,
"step": 960
},
{
"epoch": 0.8886852954649564,
"grad_norm": 1.5327177047729492,
"learning_rate": 3.707075789273306e-08,
"logits/chosen": -0.6709850430488586,
"logits/rejected": -0.6786261796951294,
"logps/chosen": -146.47488403320312,
"logps/rejected": -247.5612335205078,
"loss": 0.2843,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": 1.8335565328598022,
"rewards/margins": 2.448019504547119,
"rewards/rejected": -0.6144627332687378,
"step": 970
},
{
"epoch": 0.8978469995419148,
"grad_norm": 1.4160970449447632,
"learning_rate": 3.125861785558015e-08,
"logits/chosen": -0.7669690251350403,
"logits/rejected": -0.8644639849662781,
"logps/chosen": -169.50274658203125,
"logps/rejected": -304.95098876953125,
"loss": 0.2834,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 1.8083531856536865,
"rewards/margins": 2.9228787422180176,
"rewards/rejected": -1.114525556564331,
"step": 980
},
{
"epoch": 0.9070087036188731,
"grad_norm": 2.2089922428131104,
"learning_rate": 2.592715987461702e-08,
"logits/chosen": -0.7596947550773621,
"logits/rejected": -0.7801377177238464,
"logps/chosen": -217.3747100830078,
"logps/rejected": -258.47796630859375,
"loss": 0.3283,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.5211076736450195,
"rewards/margins": 1.6776663064956665,
"rewards/rejected": -0.15655846893787384,
"step": 990
},
{
"epoch": 0.9161704076958315,
"grad_norm": 1.1835498809814453,
"learning_rate": 2.108185122088546e-08,
"logits/chosen": -0.7457928657531738,
"logits/rejected": -0.6937334537506104,
"logps/chosen": -169.40447998046875,
"logps/rejected": -247.85061645507812,
"loss": 0.268,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.8612730503082275,
"rewards/margins": 2.29799747467041,
"rewards/rejected": -0.43672457337379456,
"step": 1000
},
{
"epoch": 0.9161704076958315,
"eval_logits/chosen": -0.6635109186172485,
"eval_logits/rejected": -0.6690148711204529,
"eval_logps/chosen": -200.81570434570312,
"eval_logps/rejected": -276.8312072753906,
"eval_loss": 0.29993191361427307,
"eval_rewards/accuracies": 0.884393036365509,
"eval_rewards/chosen": 1.6797596216201782,
"eval_rewards/margins": 2.1726391315460205,
"eval_rewards/rejected": -0.4928795397281647,
"eval_runtime": 253.6729,
"eval_samples_per_second": 10.892,
"eval_steps_per_second": 1.364,
"step": 1000
},
{
"epoch": 0.9253321117727897,
"grad_norm": 1.9540088176727295,
"learning_rate": 1.672766063197789e-08,
"logits/chosen": -0.6848675608634949,
"logits/rejected": -0.6749913692474365,
"logps/chosen": -182.75892639160156,
"logps/rejected": -234.47232055664062,
"loss": 0.2621,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.729029655456543,
"rewards/margins": 2.2015597820281982,
"rewards/rejected": -0.47253018617630005,
"step": 1010
},
{
"epoch": 0.934493815849748,
"grad_norm": 1.5091657638549805,
"learning_rate": 1.286905321672621e-08,
"logits/chosen": -0.6353659629821777,
"logits/rejected": -0.6381145715713501,
"logps/chosen": -118.5170669555664,
"logps/rejected": -205.03689575195312,
"loss": 0.2602,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.9368598461151123,
"rewards/margins": 2.6814284324645996,
"rewards/rejected": -0.744568407535553,
"step": 1020
},
{
"epoch": 0.9436555199267064,
"grad_norm": 1.7073416709899902,
"learning_rate": 9.509985876349491e-09,
"logits/chosen": -0.61668461561203,
"logits/rejected": -0.6262907385826111,
"logps/chosen": -143.79531860351562,
"logps/rejected": -229.5824737548828,
"loss": 0.2564,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.7922176122665405,
"rewards/margins": 2.186096429824829,
"rewards/rejected": -0.3938787579536438,
"step": 1030
},
{
"epoch": 0.9528172240036646,
"grad_norm": 1.6124757528305054,
"learning_rate": 6.6539032467546885e-09,
"logits/chosen": -0.790496289730072,
"logits/rejected": -0.7677043080329895,
"logps/chosen": -256.6092834472656,
"logps/rejected": -325.1084289550781,
"loss": 0.262,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 1.504821538925171,
"rewards/margins": 2.0350840091705322,
"rewards/rejected": -0.5302623510360718,
"step": 1040
},
{
"epoch": 0.961978928080623,
"grad_norm": 1.0290203094482422,
"learning_rate": 4.303734166152706e-09,
"logits/chosen": -0.7079821825027466,
"logits/rejected": -0.6711887121200562,
"logps/chosen": -177.19007873535156,
"logps/rejected": -210.00613403320312,
"loss": 0.3015,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.794146180152893,
"rewards/margins": 2.358898639678955,
"rewards/rejected": -0.564752459526062,
"step": 1050
},
{
"epoch": 0.9711406321575813,
"grad_norm": 1.0389478206634521,
"learning_rate": 2.4618886716110676e-09,
"logits/chosen": -0.6768301725387573,
"logits/rejected": -0.6700726747512817,
"logps/chosen": -167.974609375,
"logps/rejected": -247.9666290283203,
"loss": 0.2928,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 1.5335534811019897,
"rewards/margins": 1.9949595928192139,
"rewards/rejected": -0.4614059329032898,
"step": 1060
},
{
"epoch": 0.9803023362345397,
"grad_norm": 0.8039052486419678,
"learning_rate": 1.1302555276238579e-09,
"logits/chosen": -0.6699908375740051,
"logits/rejected": -0.7130982279777527,
"logps/chosen": -109.6313247680664,
"logps/rejected": -177.20127868652344,
"loss": 0.232,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.9235477447509766,
"rewards/margins": 2.2871975898742676,
"rewards/rejected": -0.3636501729488373,
"step": 1070
},
{
"epoch": 0.9894640403114979,
"grad_norm": 2.4051105976104736,
"learning_rate": 3.102002892329536e-10,
"logits/chosen": -0.6233320832252502,
"logits/rejected": -0.6594210267066956,
"logps/chosen": -142.9510040283203,
"logps/rejected": -230.3099365234375,
"loss": 0.2467,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.8195747137069702,
"rewards/margins": 2.5644664764404297,
"rewards/rejected": -0.7448917031288147,
"step": 1080
},
{
"epoch": 0.9986257443884563,
"grad_norm": 1.9035027027130127,
"learning_rate": 2.5639016871248366e-12,
"logits/chosen": -0.608766496181488,
"logits/rejected": -0.5473885536193848,
"logps/chosen": -232.18392944335938,
"logps/rejected": -249.3759002685547,
"loss": 0.2929,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.7724415063858032,
"rewards/margins": 2.1429972648620605,
"rewards/rejected": -0.3705558478832245,
"step": 1090
},
{
"epoch": 0.9995419147961521,
"step": 1091,
"total_flos": 0.0,
"train_loss": 0.37405444834616947,
"train_runtime": 8738.9435,
"train_samples_per_second": 3.996,
"train_steps_per_second": 0.125
}
],
"logging_steps": 10,
"max_steps": 1091,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}