PEFT
Safetensors
qwen2
alignment-handbook
trl
dpo
Generated from Trainer
khongtrunght's picture
Model save
6f7a339 verified
raw
history blame
47.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9996824388694824,
"eval_steps": 100,
"global_step": 787,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0012702445220704986,
"grad_norm": 2.221076726913452,
"learning_rate": 6.329113924050633e-09,
"logits/chosen": -1.009307622909546,
"logits/rejected": -0.8146543502807617,
"logps/chosen": -239.11328125,
"logps/rejected": -496.86553955078125,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.012702445220704985,
"grad_norm": 2.367328405380249,
"learning_rate": 6.329113924050633e-08,
"logits/chosen": -0.6964383125305176,
"logits/rejected": -0.7254159450531006,
"logps/chosen": -208.61737060546875,
"logps/rejected": -263.8990478515625,
"loss": 0.6946,
"rewards/accuracies": 0.3611111044883728,
"rewards/chosen": -0.0008582437876611948,
"rewards/margins": -0.016089631244540215,
"rewards/rejected": 0.015231387689709663,
"step": 10
},
{
"epoch": 0.02540489044140997,
"grad_norm": 1.8726933002471924,
"learning_rate": 1.2658227848101266e-07,
"logits/chosen": -0.6946450471878052,
"logits/rejected": -0.6647360920906067,
"logps/chosen": -277.7767333984375,
"logps/rejected": -374.0205078125,
"loss": 0.6983,
"rewards/accuracies": 0.5,
"rewards/chosen": 0.004380431957542896,
"rewards/margins": 0.007971120066940784,
"rewards/rejected": -0.003590688807889819,
"step": 20
},
{
"epoch": 0.03810733566211496,
"grad_norm": 2.08988356590271,
"learning_rate": 1.89873417721519e-07,
"logits/chosen": -0.6807274222373962,
"logits/rejected": -0.7538415193557739,
"logps/chosen": -198.79428100585938,
"logps/rejected": -285.8013000488281,
"loss": 0.6965,
"rewards/accuracies": 0.4000000059604645,
"rewards/chosen": -0.014045005664229393,
"rewards/margins": -0.013275370001792908,
"rewards/rejected": -0.0007696342654526234,
"step": 30
},
{
"epoch": 0.05080978088281994,
"grad_norm": 1.9258034229278564,
"learning_rate": 2.5316455696202533e-07,
"logits/chosen": -0.6203823089599609,
"logits/rejected": -0.5783265829086304,
"logps/chosen": -229.74234008789062,
"logps/rejected": -239.532470703125,
"loss": 0.6964,
"rewards/accuracies": 0.30000001192092896,
"rewards/chosen": -0.02148367092013359,
"rewards/margins": -0.029883552342653275,
"rewards/rejected": 0.008399883285164833,
"step": 40
},
{
"epoch": 0.06351222610352493,
"grad_norm": 2.685725688934326,
"learning_rate": 3.1645569620253163e-07,
"logits/chosen": -0.7241995334625244,
"logits/rejected": -0.6957526803016663,
"logps/chosen": -192.46080017089844,
"logps/rejected": -221.1816864013672,
"loss": 0.6949,
"rewards/accuracies": 0.30000001192092896,
"rewards/chosen": -0.01110866479575634,
"rewards/margins": -0.017371322959661484,
"rewards/rejected": 0.006262653972953558,
"step": 50
},
{
"epoch": 0.07621467132422992,
"grad_norm": 2.3835973739624023,
"learning_rate": 3.79746835443038e-07,
"logits/chosen": -0.7485499382019043,
"logits/rejected": -0.7531597018241882,
"logps/chosen": -213.107177734375,
"logps/rejected": -216.20693969726562,
"loss": 0.6934,
"rewards/accuracies": 0.4749999940395355,
"rewards/chosen": 0.008523592725396156,
"rewards/margins": 0.011266695335507393,
"rewards/rejected": -0.002743100980296731,
"step": 60
},
{
"epoch": 0.0889171165449349,
"grad_norm": 2.3520050048828125,
"learning_rate": 4.4303797468354424e-07,
"logits/chosen": -0.6595640182495117,
"logits/rejected": -0.640306830406189,
"logps/chosen": -190.93426513671875,
"logps/rejected": -257.3955383300781,
"loss": 0.6871,
"rewards/accuracies": 0.550000011920929,
"rewards/chosen": 0.030752846971154213,
"rewards/margins": 0.012878650799393654,
"rewards/rejected": 0.01787419244647026,
"step": 70
},
{
"epoch": 0.10161956176563988,
"grad_norm": 1.8163669109344482,
"learning_rate": 4.999975388247991e-07,
"logits/chosen": -0.7872985005378723,
"logits/rejected": -0.7733204364776611,
"logps/chosen": -247.6918487548828,
"logps/rejected": -275.13970947265625,
"loss": 0.6852,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": 0.020416706800460815,
"rewards/margins": 0.015500515699386597,
"rewards/rejected": 0.004916190169751644,
"step": 80
},
{
"epoch": 0.11432200698634487,
"grad_norm": 1.9721612930297852,
"learning_rate": 4.99702256431661e-07,
"logits/chosen": -0.6585575342178345,
"logits/rejected": -0.7301813364028931,
"logps/chosen": -201.46627807617188,
"logps/rejected": -238.66836547851562,
"loss": 0.681,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.040891751646995544,
"rewards/margins": 0.011915323324501514,
"rewards/rejected": 0.028976425528526306,
"step": 90
},
{
"epoch": 0.12702445220704986,
"grad_norm": 1.911104440689087,
"learning_rate": 4.989154050948158e-07,
"logits/chosen": -0.668161928653717,
"logits/rejected": -0.6479376554489136,
"logps/chosen": -226.9923553466797,
"logps/rejected": -273.5852966308594,
"loss": 0.6782,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.053215205669403076,
"rewards/margins": 0.045479319989681244,
"rewards/rejected": 0.007735882885754108,
"step": 100
},
{
"epoch": 0.12702445220704986,
"eval_logits/chosen": -0.728423535823822,
"eval_logits/rejected": -0.7425215840339661,
"eval_logps/chosen": -285.03399658203125,
"eval_logps/rejected": -338.5301513671875,
"eval_loss": 0.6610745787620544,
"eval_rewards/accuracies": 0.800000011920929,
"eval_rewards/chosen": 0.10375739634037018,
"eval_rewards/margins": 0.07656524330377579,
"eval_rewards/rejected": 0.027192166075110435,
"eval_runtime": 189.1315,
"eval_samples_per_second": 11.394,
"eval_steps_per_second": 1.428,
"step": 100
},
{
"epoch": 0.13972689742775485,
"grad_norm": 1.7344638109207153,
"learning_rate": 4.976385338258185e-07,
"logits/chosen": -0.6940301656723022,
"logits/rejected": -0.7501449584960938,
"logps/chosen": -219.23306274414062,
"logps/rejected": -329.6980895996094,
"loss": 0.6732,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": 0.06183234974741936,
"rewards/margins": 0.03217070922255516,
"rewards/rejected": 0.0296616367995739,
"step": 110
},
{
"epoch": 0.15242934264845984,
"grad_norm": 2.249697685241699,
"learning_rate": 4.958741562994349e-07,
"logits/chosen": -0.6255596876144409,
"logits/rejected": -0.5508826971054077,
"logps/chosen": -243.20614624023438,
"logps/rejected": -247.3019561767578,
"loss": 0.6605,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 0.12279131263494492,
"rewards/margins": 0.09338497370481491,
"rewards/rejected": 0.029406333342194557,
"step": 120
},
{
"epoch": 0.16513178786916483,
"grad_norm": 2.507534980773926,
"learning_rate": 4.936257459051702e-07,
"logits/chosen": -0.8180228471755981,
"logits/rejected": -0.7405130863189697,
"logps/chosen": -232.1027374267578,
"logps/rejected": -192.86990356445312,
"loss": 0.65,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.10972237586975098,
"rewards/margins": 0.06441587209701538,
"rewards/rejected": 0.045306503772735596,
"step": 130
},
{
"epoch": 0.1778342330898698,
"grad_norm": 2.6342945098876953,
"learning_rate": 4.908977289094842e-07,
"logits/chosen": -0.8401117324829102,
"logits/rejected": -0.835118293762207,
"logps/chosen": -222.7890625,
"logps/rejected": -225.2012481689453,
"loss": 0.6394,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 0.16260744631290436,
"rewards/margins": 0.1253637969493866,
"rewards/rejected": 0.03724365308880806,
"step": 140
},
{
"epoch": 0.19053667831057478,
"grad_norm": 1.9931992292404175,
"learning_rate": 4.876954757421523e-07,
"logits/chosen": -0.6221734285354614,
"logits/rejected": -0.6039702296257019,
"logps/chosen": -223.699462890625,
"logps/rejected": -273.8064880371094,
"loss": 0.6319,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.18279646337032318,
"rewards/margins": 0.13619089126586914,
"rewards/rejected": 0.04660557210445404,
"step": 150
},
{
"epoch": 0.20323912353127976,
"grad_norm": 2.2039902210235596,
"learning_rate": 4.840252904239291e-07,
"logits/chosen": -0.8504929542541504,
"logits/rejected": -0.9042933583259583,
"logps/chosen": -209.73348999023438,
"logps/rejected": -263.92095947265625,
"loss": 0.6225,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.28889310359954834,
"rewards/margins": 0.1839434802532196,
"rewards/rejected": 0.10494961589574814,
"step": 160
},
{
"epoch": 0.21594156875198475,
"grad_norm": 2.3716001510620117,
"learning_rate": 4.798943981563237e-07,
"logits/chosen": -0.7665102481842041,
"logits/rejected": -0.7120729088783264,
"logps/chosen": -246.37387084960938,
"logps/rejected": -248.454833984375,
"loss": 0.6031,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.2789171636104584,
"rewards/margins": 0.21589259803295135,
"rewards/rejected": 0.06302457302808762,
"step": 170
},
{
"epoch": 0.22864401397268974,
"grad_norm": 1.7225255966186523,
"learning_rate": 4.7531093109792233e-07,
"logits/chosen": -0.7288578748703003,
"logits/rejected": -0.7030431032180786,
"logps/chosen": -258.3373107910156,
"logps/rejected": -265.3787841796875,
"loss": 0.6029,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 0.3224382996559143,
"rewards/margins": 0.2643985450267792,
"rewards/rejected": 0.05803976580500603,
"step": 180
},
{
"epoch": 0.24134645919339473,
"grad_norm": 1.8457423448562622,
"learning_rate": 4.702839123552541e-07,
"logits/chosen": -0.6772662401199341,
"logits/rejected": -0.6806420087814331,
"logps/chosen": -215.664306640625,
"logps/rejected": -358.85662841796875,
"loss": 0.5872,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": 0.30164834856987,
"rewards/margins": 0.17489030957221985,
"rewards/rejected": 0.12675802409648895,
"step": 190
},
{
"epoch": 0.2540489044140997,
"grad_norm": 1.892817735671997,
"learning_rate": 4.6482323821972103e-07,
"logits/chosen": -0.6628939509391785,
"logits/rejected": -0.6188849210739136,
"logps/chosen": -196.9568634033203,
"logps/rejected": -201.54971313476562,
"loss": 0.5811,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.347095251083374,
"rewards/margins": 0.3189987540245056,
"rewards/rejected": 0.028096511960029602,
"step": 200
},
{
"epoch": 0.2540489044140997,
"eval_logits/chosen": -0.6945326924324036,
"eval_logits/rejected": -0.7306325435638428,
"eval_logps/chosen": -275.95892333984375,
"eval_logps/rejected": -336.28448486328125,
"eval_loss": 0.5408667922019958,
"eval_rewards/accuracies": 0.8370370268821716,
"eval_rewards/chosen": 0.5575132966041565,
"eval_rewards/margins": 0.41803672909736633,
"eval_rewards/rejected": 0.13947655260562897,
"eval_runtime": 185.7997,
"eval_samples_per_second": 11.599,
"eval_steps_per_second": 1.453,
"step": 200
},
{
"epoch": 0.2667513496348047,
"grad_norm": 1.5741887092590332,
"learning_rate": 4.589396586855566e-07,
"logits/chosen": -0.8169937133789062,
"logits/rejected": -0.7217267155647278,
"logps/chosen": -223.4730224609375,
"logps/rejected": -215.4644012451172,
"loss": 0.5698,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 0.3899025022983551,
"rewards/margins": 0.24544532597064972,
"rewards/rejected": 0.1444571316242218,
"step": 210
},
{
"epoch": 0.2794537948555097,
"grad_norm": 1.5209985971450806,
"learning_rate": 4.5264475628716847e-07,
"logits/chosen": -0.6111925840377808,
"logits/rejected": -0.6522852778434753,
"logps/chosen": -181.52911376953125,
"logps/rejected": -195.41268920898438,
"loss": 0.5606,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 0.46004748344421387,
"rewards/margins": 0.2880765497684479,
"rewards/rejected": 0.171970933675766,
"step": 220
},
{
"epoch": 0.29215624007621466,
"grad_norm": 1.8266632556915283,
"learning_rate": 4.4595092329752583e-07,
"logits/chosen": -0.6476088762283325,
"logits/rejected": -0.6599763035774231,
"logps/chosen": -242.3888702392578,
"logps/rejected": -243.00399780273438,
"loss": 0.5502,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 0.5840136408805847,
"rewards/margins": 0.4849432110786438,
"rewards/rejected": 0.09907043725252151,
"step": 230
},
{
"epoch": 0.3048586852969197,
"grad_norm": 1.6798079013824463,
"learning_rate": 4.388713373324785e-07,
"logits/chosen": -0.5340258479118347,
"logits/rejected": -0.5237671732902527,
"logps/chosen": -161.2602996826172,
"logps/rejected": -239.09765625,
"loss": 0.5516,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.5116562843322754,
"rewards/margins": 0.3430051803588867,
"rewards/rejected": 0.16865113377571106,
"step": 240
},
{
"epoch": 0.31756113051762463,
"grad_norm": 1.6784318685531616,
"learning_rate": 4.3141993540903397e-07,
"logits/chosen": -0.5829135179519653,
"logits/rejected": -0.5493456721305847,
"logps/chosen": -278.18975830078125,
"logps/rejected": -333.10760498046875,
"loss": 0.5244,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 0.644333004951477,
"rewards/margins": 0.4387938380241394,
"rewards/rejected": 0.20553907752037048,
"step": 250
},
{
"epoch": 0.33026357573832965,
"grad_norm": 1.523425817489624,
"learning_rate": 4.2361138650866257e-07,
"logits/chosen": -0.6888426542282104,
"logits/rejected": -0.7238985300064087,
"logps/chosen": -190.53945922851562,
"logps/rejected": -283.5095520019531,
"loss": 0.5305,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": 0.6690517663955688,
"rewards/margins": 0.5147332549095154,
"rewards/rejected": 0.1543184369802475,
"step": 260
},
{
"epoch": 0.3429660209590346,
"grad_norm": 1.3767415285110474,
"learning_rate": 4.154610626996412e-07,
"logits/chosen": -0.7415731549263,
"logits/rejected": -0.733380913734436,
"logps/chosen": -230.6995391845703,
"logps/rejected": -249.3251953125,
"loss": 0.5172,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": 0.7108834385871887,
"rewards/margins": 0.4339624345302582,
"rewards/rejected": 0.2769209146499634,
"step": 270
},
{
"epoch": 0.3556684661797396,
"grad_norm": 1.251821756362915,
"learning_rate": 4.0698500887528797e-07,
"logits/chosen": -0.6578670740127563,
"logits/rejected": -0.6777629852294922,
"logps/chosen": -195.59007263183594,
"logps/rejected": -200.3335418701172,
"loss": 0.5238,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": 0.7475778460502625,
"rewards/margins": 0.4930620789527893,
"rewards/rejected": 0.2545158267021179,
"step": 280
},
{
"epoch": 0.3683709114004446,
"grad_norm": 1.571493148803711,
"learning_rate": 3.981999111676577e-07,
"logits/chosen": -0.7467225790023804,
"logits/rejected": -0.6888502836227417,
"logps/chosen": -202.24697875976562,
"logps/rejected": -233.97476196289062,
"loss": 0.5336,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": 0.6827602982521057,
"rewards/margins": 0.42996150255203247,
"rewards/rejected": 0.25279879570007324,
"step": 290
},
{
"epoch": 0.38107335662114955,
"grad_norm": 1.4679317474365234,
"learning_rate": 3.891230640988834e-07,
"logits/chosen": -0.71528559923172,
"logits/rejected": -0.7373882532119751,
"logps/chosen": -215.18008422851562,
"logps/rejected": -254.1000213623047,
"loss": 0.5484,
"rewards/accuracies": 0.574999988079071,
"rewards/chosen": 0.7753348350524902,
"rewards/margins": 0.3309822380542755,
"rewards/rejected": 0.44435256719589233,
"step": 300
},
{
"epoch": 0.38107335662114955,
"eval_logits/chosen": -0.7031270265579224,
"eval_logits/rejected": -0.7282780408859253,
"eval_logps/chosen": -268.3231201171875,
"eval_logps/rejected": -334.5019226074219,
"eval_loss": 0.4777176082134247,
"eval_rewards/accuracies": 0.800000011920929,
"eval_rewards/chosen": 0.9393030405044556,
"eval_rewards/margins": 0.710697591304779,
"eval_rewards/rejected": 0.22860530018806458,
"eval_runtime": 185.7035,
"eval_samples_per_second": 11.605,
"eval_steps_per_second": 1.454,
"step": 300
},
{
"epoch": 0.39377580184185457,
"grad_norm": 1.2585582733154297,
"learning_rate": 3.7977233653482764e-07,
"logits/chosen": -0.7015618085861206,
"logits/rejected": -0.693391740322113,
"logps/chosen": -158.01071166992188,
"logps/rejected": -207.1593780517578,
"loss": 0.5119,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.8586467504501343,
"rewards/margins": 0.6648082137107849,
"rewards/rejected": 0.19383853673934937,
"step": 310
},
{
"epoch": 0.40647824706255953,
"grad_norm": 1.2252905368804932,
"learning_rate": 3.7016613650807005e-07,
"logits/chosen": -0.5902287364006042,
"logits/rejected": -0.6502476930618286,
"logps/chosen": -174.2211151123047,
"logps/rejected": -259.4792175292969,
"loss": 0.5175,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 0.8190739750862122,
"rewards/margins": 0.7484151124954224,
"rewards/rejected": 0.07065889984369278,
"step": 320
},
{
"epoch": 0.41918069228326454,
"grad_norm": 1.463016390800476,
"learning_rate": 3.603233749794792e-07,
"logits/chosen": -0.7536525726318359,
"logits/rejected": -0.755484402179718,
"logps/chosen": -212.7367401123047,
"logps/rejected": -285.80450439453125,
"loss": 0.4963,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 0.8492296934127808,
"rewards/margins": 0.691977322101593,
"rewards/rejected": 0.15725243091583252,
"step": 330
},
{
"epoch": 0.4318831375039695,
"grad_norm": 1.512898325920105,
"learning_rate": 3.5026342860971036e-07,
"logits/chosen": -0.6014536619186401,
"logits/rejected": -0.5738852620124817,
"logps/chosen": -144.85147094726562,
"logps/rejected": -185.1627655029297,
"loss": 0.4891,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": 0.849956214427948,
"rewards/margins": 0.5345536470413208,
"rewards/rejected": 0.3154025673866272,
"step": 340
},
{
"epoch": 0.4445855827246745,
"grad_norm": 1.7750078439712524,
"learning_rate": 3.400061016139175e-07,
"logits/chosen": -0.6042419672012329,
"logits/rejected": -0.6242547631263733,
"logps/chosen": -166.7231903076172,
"logps/rejected": -218.9178009033203,
"loss": 0.5233,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": 0.8389670252799988,
"rewards/margins": 0.4035259783267975,
"rewards/rejected": 0.4354410767555237,
"step": 350
},
{
"epoch": 0.4572880279453795,
"grad_norm": 1.3576321601867676,
"learning_rate": 3.295715867747715e-07,
"logits/chosen": -0.6411730051040649,
"logits/rejected": -0.6592927575111389,
"logps/chosen": -238.73324584960938,
"logps/rejected": -274.3454895019531,
"loss": 0.4891,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 1.1362016201019287,
"rewards/margins": 0.8709964752197266,
"rewards/rejected": 0.2652052044868469,
"step": 360
},
{
"epoch": 0.46999047316608444,
"grad_norm": 1.349590539932251,
"learning_rate": 3.1898042569053765e-07,
"logits/chosen": -0.9314397573471069,
"logits/rejected": -0.9426021575927734,
"logps/chosen": -160.0816192626953,
"logps/rejected": -219.6217041015625,
"loss": 0.4604,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 0.9980394244194031,
"rewards/margins": 1.0329411029815674,
"rewards/rejected": -0.03490174934267998,
"step": 370
},
{
"epoch": 0.48269291838678946,
"grad_norm": 1.3657554388046265,
"learning_rate": 3.082534683364673e-07,
"logits/chosen": -0.728752613067627,
"logits/rejected": -0.6840351819992065,
"logps/chosen": -237.2197265625,
"logps/rejected": -302.3899841308594,
"loss": 0.4712,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 1.0956271886825562,
"rewards/margins": 0.9040347337722778,
"rewards/rejected": 0.19159242510795593,
"step": 380
},
{
"epoch": 0.4953953636074944,
"grad_norm": 3.238619804382324,
"learning_rate": 2.974118320191124e-07,
"logits/chosen": -0.7578514814376831,
"logits/rejected": -0.8504966497421265,
"logps/chosen": -207.72116088867188,
"logps/rejected": -293.9294128417969,
"loss": 0.475,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": 0.7986660003662109,
"rewards/margins": 0.5549193024635315,
"rewards/rejected": 0.24374675750732422,
"step": 390
},
{
"epoch": 0.5080978088281994,
"grad_norm": 1.573013186454773,
"learning_rate": 2.864768598043654e-07,
"logits/chosen": -0.618281364440918,
"logits/rejected": -0.5907317399978638,
"logps/chosen": -165.56369018554688,
"logps/rejected": -225.3529052734375,
"loss": 0.4531,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 0.9883586764335632,
"rewards/margins": 0.6656315326690674,
"rewards/rejected": 0.32272714376449585,
"step": 400
},
{
"epoch": 0.5080978088281994,
"eval_logits/chosen": -0.6878591179847717,
"eval_logits/rejected": -0.7169935703277588,
"eval_logps/chosen": -264.5438537597656,
"eval_logps/rejected": -333.8891296386719,
"eval_loss": 0.4534913897514343,
"eval_rewards/accuracies": 0.8296296000480652,
"eval_rewards/chosen": 1.1282644271850586,
"eval_rewards/margins": 0.8690196871757507,
"eval_rewards/rejected": 0.259244829416275,
"eval_runtime": 185.5493,
"eval_samples_per_second": 11.614,
"eval_steps_per_second": 1.455,
"step": 400
},
{
"epoch": 0.5208002540489044,
"grad_norm": 1.1028372049331665,
"learning_rate": 2.7547007850106624e-07,
"logits/chosen": -0.7413824200630188,
"logits/rejected": -0.7111606597900391,
"logps/chosen": -205.3790283203125,
"logps/rejected": -249.98715209960938,
"loss": 0.4727,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.1707881689071655,
"rewards/margins": 1.0088186264038086,
"rewards/rejected": 0.16196946799755096,
"step": 410
},
{
"epoch": 0.5335026992696094,
"grad_norm": 1.1876864433288574,
"learning_rate": 2.6441315628288787e-07,
"logits/chosen": -0.5842682123184204,
"logits/rejected": -0.544371485710144,
"logps/chosen": -236.72720336914062,
"logps/rejected": -303.4477844238281,
"loss": 0.45,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 1.21246337890625,
"rewards/margins": 0.9723795652389526,
"rewards/rejected": 0.24008378386497498,
"step": 420
},
{
"epoch": 0.5462051444903144,
"grad_norm": 1.1539926528930664,
"learning_rate": 2.5332786003192846e-07,
"logits/chosen": -0.5589911341667175,
"logits/rejected": -0.6083785891532898,
"logps/chosen": -140.20826721191406,
"logps/rejected": -199.82095336914062,
"loss": 0.4595,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": 1.1569812297821045,
"rewards/margins": 0.9193629026412964,
"rewards/rejected": 0.23761825263500214,
"step": 430
},
{
"epoch": 0.5589075897110194,
"grad_norm": 1.1994924545288086,
"learning_rate": 2.4223601248798486e-07,
"logits/chosen": -0.849249541759491,
"logits/rejected": -0.7424459457397461,
"logps/chosen": -144.5553741455078,
"logps/rejected": -174.42587280273438,
"loss": 0.4473,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 1.3170301914215088,
"rewards/margins": 1.0188204050064087,
"rewards/rejected": 0.29820993542671204,
"step": 440
},
{
"epoch": 0.5716100349317244,
"grad_norm": 1.4127888679504395,
"learning_rate": 2.3115944928786259e-07,
"logits/chosen": -0.7207301259040833,
"logits/rejected": -0.7091584205627441,
"logps/chosen": -149.24386596679688,
"logps/rejected": -187.45126342773438,
"loss": 0.4731,
"rewards/accuracies": 0.75,
"rewards/chosen": 1.0391677618026733,
"rewards/margins": 0.7007487416267395,
"rewards/rejected": 0.3384190499782562,
"step": 450
},
{
"epoch": 0.5843124801524293,
"grad_norm": 1.1525746583938599,
"learning_rate": 2.2011997597929656e-07,
"logits/chosen": -0.6746450662612915,
"logits/rejected": -0.6320729851722717,
"logps/chosen": -225.69400024414062,
"logps/rejected": -271.2292785644531,
"loss": 0.4691,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 1.2698278427124023,
"rewards/margins": 0.8749024271965027,
"rewards/rejected": 0.3949252665042877,
"step": 460
},
{
"epoch": 0.5970149253731343,
"grad_norm": 0.8780391216278076,
"learning_rate": 2.091393250941067e-07,
"logits/chosen": -0.6812957525253296,
"logits/rejected": -0.6979972124099731,
"logps/chosen": -267.6707763671875,
"logps/rejected": -380.0335388183594,
"loss": 0.4701,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 1.2073866128921509,
"rewards/margins": 0.8721296191215515,
"rewards/rejected": 0.33525699377059937,
"step": 470
},
{
"epoch": 0.6097173705938393,
"grad_norm": 1.180364727973938,
"learning_rate": 1.9823911336509303e-07,
"logits/chosen": -0.5717257261276245,
"logits/rejected": -0.5920293927192688,
"logps/chosen": -242.43545532226562,
"logps/rejected": -303.1468200683594,
"loss": 0.4618,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": 0.9825584292411804,
"rewards/margins": 0.6587264537811279,
"rewards/rejected": 0.32383203506469727,
"step": 480
},
{
"epoch": 0.6224198158145443,
"grad_norm": 1.6435413360595703,
"learning_rate": 1.8744079917089568e-07,
"logits/chosen": -0.4968065619468689,
"logits/rejected": -0.5184975862503052,
"logps/chosen": -178.31259155273438,
"logps/rejected": -267.4254455566406,
"loss": 0.4588,
"rewards/accuracies": 0.75,
"rewards/chosen": 1.2706286907196045,
"rewards/margins": 1.0893701314926147,
"rewards/rejected": 0.18125855922698975,
"step": 490
},
{
"epoch": 0.6351222610352493,
"grad_norm": 1.284885048866272,
"learning_rate": 1.7676564029259403e-07,
"logits/chosen": -0.618794322013855,
"logits/rejected": -0.5696099400520325,
"logps/chosen": -199.17227172851562,
"logps/rejected": -268.42584228515625,
"loss": 0.4577,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 1.1055831909179688,
"rewards/margins": 0.5972105860710144,
"rewards/rejected": 0.5083726048469543,
"step": 500
},
{
"epoch": 0.6351222610352493,
"eval_logits/chosen": -0.6864954829216003,
"eval_logits/rejected": -0.7146346569061279,
"eval_logps/chosen": -262.1005554199219,
"eval_logps/rejected": -333.3753356933594,
"eval_loss": 0.44154587388038635,
"eval_rewards/accuracies": 0.8148148059844971,
"eval_rewards/chosen": 1.2504315376281738,
"eval_rewards/margins": 0.9654996395111084,
"eval_rewards/rejected": 0.2849319875240326,
"eval_runtime": 185.3366,
"eval_samples_per_second": 11.627,
"eval_steps_per_second": 1.457,
"step": 500
},
{
"epoch": 0.6478247062559542,
"grad_norm": 1.353294014930725,
"learning_rate": 1.662346520652064e-07,
"logits/chosen": -0.6487321853637695,
"logits/rejected": -0.6713512539863586,
"logps/chosen": -205.1356964111328,
"logps/rejected": -274.49456787109375,
"loss": 0.4601,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 1.3229888677597046,
"rewards/margins": 1.2638088464736938,
"rewards/rejected": 0.059180211275815964,
"step": 510
},
{
"epoch": 0.6605271514766593,
"grad_norm": 1.7050055265426636,
"learning_rate": 1.5586856600647344e-07,
"logits/chosen": -0.6692296266555786,
"logits/rejected": -0.614514946937561,
"logps/chosen": -209.00289916992188,
"logps/rejected": -292.3746032714844,
"loss": 0.4076,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.253557562828064,
"rewards/margins": 1.0823756456375122,
"rewards/rejected": 0.17118188738822937,
"step": 520
},
{
"epoch": 0.6732295966973643,
"grad_norm": 1.3828163146972656,
"learning_rate": 1.4568778900437088e-07,
"logits/chosen": -0.6728982329368591,
"logits/rejected": -0.6769570708274841,
"logps/chosen": -179.9535675048828,
"logps/rejected": -236.40933227539062,
"loss": 0.4519,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 1.318244218826294,
"rewards/margins": 1.0222580432891846,
"rewards/rejected": 0.29598650336265564,
"step": 530
},
{
"epoch": 0.6859320419180692,
"grad_norm": 1.160503625869751,
"learning_rate": 1.3571236314369423e-07,
"logits/chosen": -0.5671137571334839,
"logits/rejected": -0.5883482694625854,
"logps/chosen": -187.75180053710938,
"logps/rejected": -223.4710693359375,
"loss": 0.4377,
"rewards/accuracies": 0.875,
"rewards/chosen": 1.2862342596054077,
"rewards/margins": 0.87205970287323,
"rewards/rejected": 0.41417449712753296,
"step": 540
},
{
"epoch": 0.6986344871387742,
"grad_norm": 1.1118255853652954,
"learning_rate": 1.2596192625080293e-07,
"logits/chosen": -0.5980581045150757,
"logits/rejected": -0.6084080934524536,
"logps/chosen": -191.54229736328125,
"logps/rejected": -219.666259765625,
"loss": 0.4281,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.3361330032348633,
"rewards/margins": 1.1481592655181885,
"rewards/rejected": 0.18797388672828674,
"step": 550
},
{
"epoch": 0.7113369323594791,
"grad_norm": 2.1222221851348877,
"learning_rate": 1.1645567323419564e-07,
"logits/chosen": -0.6676138639450073,
"logits/rejected": -0.5566924810409546,
"logps/chosen": -151.61227416992188,
"logps/rejected": -165.1892852783203,
"loss": 0.4793,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 1.3049802780151367,
"rewards/margins": 0.9568456411361694,
"rewards/rejected": 0.3481346666812897,
"step": 560
},
{
"epoch": 0.7240393775801842,
"grad_norm": 1.3787516355514526,
"learning_rate": 1.0721231829702293e-07,
"logits/chosen": -0.6384049654006958,
"logits/rejected": -0.6119662523269653,
"logps/chosen": -175.11257934570312,
"logps/rejected": -262.1940002441406,
"loss": 0.4495,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.2990167140960693,
"rewards/margins": 1.024531364440918,
"rewards/rejected": 0.2744852304458618,
"step": 570
},
{
"epoch": 0.7367418228008892,
"grad_norm": 1.6886653900146484,
"learning_rate": 9.825005809592563e-08,
"logits/chosen": -0.6275514364242554,
"logits/rejected": -0.6404930353164673,
"logps/chosen": -244.3218231201172,
"logps/rejected": -299.2083435058594,
"loss": 0.4549,
"rewards/accuracies": 0.75,
"rewards/chosen": 1.245491623878479,
"rewards/margins": 0.7635373473167419,
"rewards/rejected": 0.48195403814315796,
"step": 580
},
{
"epoch": 0.7494442680215941,
"grad_norm": 1.4264272451400757,
"learning_rate": 8.958653591872676e-08,
"logits/chosen": -0.7465443015098572,
"logits/rejected": -0.6715372800827026,
"logps/chosen": -181.6468048095703,
"logps/rejected": -280.73455810546875,
"loss": 0.432,
"rewards/accuracies": 0.75,
"rewards/chosen": 1.3341939449310303,
"rewards/margins": 1.559653401374817,
"rewards/rejected": -0.225459486246109,
"step": 590
},
{
"epoch": 0.7621467132422991,
"grad_norm": 1.1271839141845703,
"learning_rate": 8.123880695149609e-08,
"logits/chosen": -0.7006452083587646,
"logits/rejected": -0.6168887615203857,
"logps/chosen": -230.3083038330078,
"logps/rejected": -241.9764862060547,
"loss": 0.4715,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": 1.2569708824157715,
"rewards/margins": 1.1274547576904297,
"rewards/rejected": 0.129516139626503,
"step": 600
},
{
"epoch": 0.7621467132422991,
"eval_logits/chosen": -0.6912533640861511,
"eval_logits/rejected": -0.7174736261367798,
"eval_logps/chosen": -261.1841735839844,
"eval_logps/rejected": -333.3468933105469,
"eval_loss": 0.43639105558395386,
"eval_rewards/accuracies": 0.8148148059844971,
"eval_rewards/chosen": 1.296250343322754,
"eval_rewards/margins": 1.0098947286605835,
"eval_rewards/rejected": 0.28635546565055847,
"eval_runtime": 185.3004,
"eval_samples_per_second": 11.63,
"eval_steps_per_second": 1.457,
"step": 600
},
{
"epoch": 0.7748491584630042,
"grad_norm": 1.1930325031280518,
"learning_rate": 7.322330470336313e-08,
"logits/chosen": -0.8094059824943542,
"logits/rejected": -0.797622561454773,
"logps/chosen": -228.1602325439453,
"logps/rejected": -284.61572265625,
"loss": 0.4464,
"rewards/accuracies": 0.75,
"rewards/chosen": 1.4457851648330688,
"rewards/margins": 1.0705646276474,
"rewards/rejected": 0.3752205967903137,
"step": 610
},
{
"epoch": 0.7875516036837091,
"grad_norm": 1.3116133213043213,
"learning_rate": 6.555580865517723e-08,
"logits/chosen": -0.824436366558075,
"logits/rejected": -0.7766145467758179,
"logps/chosen": -197.40274047851562,
"logps/rejected": -225.97409057617188,
"loss": 0.4325,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 1.3687061071395874,
"rewards/margins": 1.191906452178955,
"rewards/rejected": 0.17679981887340546,
"step": 620
},
{
"epoch": 0.8002540489044141,
"grad_norm": 1.002442717552185,
"learning_rate": 5.825141319570032e-08,
"logits/chosen": -0.6042752265930176,
"logits/rejected": -0.6582852005958557,
"logps/chosen": -191.42745971679688,
"logps/rejected": -280.71923828125,
"loss": 0.4689,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 1.2531533241271973,
"rewards/margins": 0.9376416206359863,
"rewards/rejected": 0.31551170349121094,
"step": 630
},
{
"epoch": 0.8129564941251191,
"grad_norm": 1.0543403625488281,
"learning_rate": 5.13244979064856e-08,
"logits/chosen": -0.5482285022735596,
"logits/rejected": -0.5604708194732666,
"logps/chosen": -215.088134765625,
"logps/rejected": -227.7083282470703,
"loss": 0.4403,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 1.3573553562164307,
"rewards/margins": 0.8670072555541992,
"rewards/rejected": 0.4903482496738434,
"step": 640
},
{
"epoch": 0.825658939345824,
"grad_norm": 0.9852587580680847,
"learning_rate": 4.4788699253941435e-08,
"logits/chosen": -0.5300780534744263,
"logits/rejected": -0.5268362760543823,
"logps/chosen": -256.68707275390625,
"logps/rejected": -314.91387939453125,
"loss": 0.4782,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": 1.2139735221862793,
"rewards/margins": 1.106140375137329,
"rewards/rejected": 0.10783319175243378,
"step": 650
},
{
"epoch": 0.8383613845665291,
"grad_norm": 1.099290132522583,
"learning_rate": 3.865688374430534e-08,
"logits/chosen": -0.6461602449417114,
"logits/rejected": -0.6405094861984253,
"logps/chosen": -184.21109008789062,
"logps/rejected": -245.28939819335938,
"loss": 0.4394,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": 1.329444169998169,
"rewards/margins": 0.9136478304862976,
"rewards/rejected": 0.4157963693141937,
"step": 660
},
{
"epoch": 0.851063829787234,
"grad_norm": 1.1408357620239258,
"learning_rate": 3.294112259437823e-08,
"logits/chosen": -0.7240138053894043,
"logits/rejected": -0.637109637260437,
"logps/chosen": -236.3959197998047,
"logps/rejected": -234.95932006835938,
"loss": 0.4492,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": 1.321778655052185,
"rewards/margins": 0.8872841596603394,
"rewards/rejected": 0.43449434638023376,
"step": 670
},
{
"epoch": 0.863766275007939,
"grad_norm": 1.260270118713379,
"learning_rate": 2.765266796788146e-08,
"logits/chosen": -0.6490672826766968,
"logits/rejected": -0.6002823114395142,
"logps/chosen": -216.1714630126953,
"logps/rejected": -259.24688720703125,
"loss": 0.481,
"rewards/accuracies": 0.75,
"rewards/chosen": 1.3467859029769897,
"rewards/margins": 1.0647392272949219,
"rewards/rejected": 0.2820465564727783,
"step": 680
},
{
"epoch": 0.876468720228644,
"grad_norm": 1.33726167678833,
"learning_rate": 2.280193082421819e-08,
"logits/chosen": -0.5675562620162964,
"logits/rejected": -0.6162486672401428,
"logps/chosen": -216.9974365234375,
"logps/rejected": -380.35784912109375,
"loss": 0.4422,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 1.1809179782867432,
"rewards/margins": 0.8867913484573364,
"rewards/rejected": 0.2941267490386963,
"step": 690
},
{
"epoch": 0.889171165449349,
"grad_norm": 1.586350679397583,
"learning_rate": 1.8398460423246857e-08,
"logits/chosen": -0.6772684454917908,
"logits/rejected": -0.5936257243156433,
"logps/chosen": -232.6663360595703,
"logps/rejected": -293.7143249511719,
"loss": 0.4508,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.3128650188446045,
"rewards/margins": 1.278008222579956,
"rewards/rejected": 0.034857120364904404,
"step": 700
},
{
"epoch": 0.889171165449349,
"eval_logits/chosen": -0.6937349438667297,
"eval_logits/rejected": -0.718503475189209,
"eval_logps/chosen": -261.1283264160156,
"eval_logps/rejected": -333.4368896484375,
"eval_loss": 0.4347935914993286,
"eval_rewards/accuracies": 0.8222222328186035,
"eval_rewards/chosen": 1.299042820930481,
"eval_rewards/margins": 1.0171869993209839,
"eval_rewards/rejected": 0.28185585141181946,
"eval_runtime": 185.3417,
"eval_samples_per_second": 11.627,
"eval_steps_per_second": 1.457,
"step": 700
},
{
"epoch": 0.901873610670054,
"grad_norm": 1.170996904373169,
"learning_rate": 1.4450925526413999e-08,
"logits/chosen": -0.5574235916137695,
"logits/rejected": -0.5444169044494629,
"logps/chosen": -176.5850067138672,
"logps/rejected": -242.562255859375,
"loss": 0.4564,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 1.3142030239105225,
"rewards/margins": 1.3917624950408936,
"rewards/rejected": -0.07755955308675766,
"step": 710
},
{
"epoch": 0.914576055890759,
"grad_norm": 1.0479140281677246,
"learning_rate": 1.0967097331253866e-08,
"logits/chosen": -0.6848015189170837,
"logits/rejected": -0.5968100428581238,
"logps/chosen": -262.5163879394531,
"logps/rejected": -332.9224853515625,
"loss": 0.4905,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 1.430826187133789,
"rewards/margins": 1.149071455001831,
"rewards/rejected": 0.28175467252731323,
"step": 720
},
{
"epoch": 0.9272785011114639,
"grad_norm": 1.341672420501709,
"learning_rate": 7.953834172850865e-09,
"logits/chosen": -0.5690629482269287,
"logits/rejected": -0.5203067064285278,
"logps/chosen": -216.2689208984375,
"logps/rejected": -235.74697875976562,
"loss": 0.3816,
"rewards/accuracies": 0.75,
"rewards/chosen": 1.1782363653182983,
"rewards/margins": 1.0911990404129028,
"rewards/rejected": 0.08703745901584625,
"step": 730
},
{
"epoch": 0.9399809463321689,
"grad_norm": 1.169708251953125,
"learning_rate": 5.417068022381593e-09,
"logits/chosen": -0.7713934779167175,
"logits/rejected": -0.6765065789222717,
"logps/chosen": -225.0927734375,
"logps/rejected": -256.9820861816406,
"loss": 0.4498,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": 1.4216610193252563,
"rewards/margins": 1.1197834014892578,
"rewards/rejected": 0.30187731981277466,
"step": 740
},
{
"epoch": 0.952683391552874,
"grad_norm": 1.013831377029419,
"learning_rate": 3.361792809315728e-09,
"logits/chosen": -0.5675567388534546,
"logits/rejected": -0.6261376738548279,
"logps/chosen": -180.87399291992188,
"logps/rejected": -250.5148162841797,
"loss": 0.4227,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": 1.395775556564331,
"rewards/margins": 1.2045260667800903,
"rewards/rejected": 0.19124947488307953,
"step": 750
},
{
"epoch": 0.9653858367735789,
"grad_norm": 1.2621924877166748,
"learning_rate": 1.7920545902646023e-09,
"logits/chosen": -0.7681080102920532,
"logits/rejected": -0.7932102680206299,
"logps/chosen": -202.87564086914062,
"logps/rejected": -314.3702697753906,
"loss": 0.4557,
"rewards/accuracies": 0.75,
"rewards/chosen": 1.1459980010986328,
"rewards/margins": 0.9246684312820435,
"rewards/rejected": 0.22132959961891174,
"step": 760
},
{
"epoch": 0.9780882819942839,
"grad_norm": 1.0491993427276611,
"learning_rate": 7.109435838320299e-10,
"logits/chosen": -0.8198385238647461,
"logits/rejected": -0.8252199292182922,
"logps/chosen": -137.2249298095703,
"logps/rejected": -203.43255615234375,
"loss": 0.4137,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 1.4417117834091187,
"rewards/margins": 1.4029321670532227,
"rewards/rejected": 0.03877962380647659,
"step": 770
},
{
"epoch": 0.9907907272149888,
"grad_norm": 1.5026969909667969,
"learning_rate": 1.2058808714698887e-10,
"logits/chosen": -0.5986198782920837,
"logits/rejected": -0.6104772686958313,
"logps/chosen": -200.24258422851562,
"logps/rejected": -333.97491455078125,
"loss": 0.4447,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": 1.3231384754180908,
"rewards/margins": 0.8273136019706726,
"rewards/rejected": 0.4958249032497406,
"step": 780
},
{
"epoch": 0.9996824388694824,
"step": 787,
"total_flos": 0.0,
"train_loss": 0.5202696668753327,
"train_runtime": 5902.1361,
"train_samples_per_second": 4.267,
"train_steps_per_second": 0.133
}
],
"logging_steps": 10,
"max_steps": 787,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}