ap-normistral-7b-align-scan / trainer_state.json
hugodk-sch's picture
Model save
fc657c3 verified
raw
history blame
22 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 100,
"global_step": 385,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 22.875,
"learning_rate": 1.282051282051282e-07,
"logits/chosen": 88.18099975585938,
"logits/rejected": 88.25153350830078,
"logps/chosen": -29.073104858398438,
"logps/rejected": -26.25731658935547,
"loss": 0.5,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.03,
"grad_norm": 8.75,
"learning_rate": 1.282051282051282e-06,
"logits/chosen": 81.07839965820312,
"logits/rejected": 80.78140258789062,
"logps/chosen": -34.22772216796875,
"logps/rejected": -33.004886627197266,
"loss": 0.493,
"rewards/accuracies": 0.4583333432674408,
"rewards/chosen": 0.009805028326809406,
"rewards/margins": 0.0423436164855957,
"rewards/rejected": -0.03253858909010887,
"step": 10
},
{
"epoch": 0.05,
"grad_norm": 15.5,
"learning_rate": 2.564102564102564e-06,
"logits/chosen": 80.6578369140625,
"logits/rejected": 80.54452514648438,
"logps/chosen": -33.567832946777344,
"logps/rejected": -30.769500732421875,
"loss": 0.4824,
"rewards/accuracies": 0.4749999940395355,
"rewards/chosen": 0.0854543000459671,
"rewards/margins": 0.07986308634281158,
"rewards/rejected": 0.005591208580881357,
"step": 20
},
{
"epoch": 0.08,
"grad_norm": 17.875,
"learning_rate": 3.846153846153847e-06,
"logits/chosen": 82.4737777709961,
"logits/rejected": 82.50477600097656,
"logps/chosen": -33.87335205078125,
"logps/rejected": -31.195053100585938,
"loss": 0.5111,
"rewards/accuracies": 0.4124999940395355,
"rewards/chosen": 0.09762730449438095,
"rewards/margins": -0.06251715123653412,
"rewards/rejected": 0.16014449298381805,
"step": 30
},
{
"epoch": 0.1,
"grad_norm": 15.625,
"learning_rate": 4.999896948438434e-06,
"logits/chosen": 81.05291748046875,
"logits/rejected": 81.04745483398438,
"logps/chosen": -32.597801208496094,
"logps/rejected": -33.1552734375,
"loss": 0.4525,
"rewards/accuracies": 0.550000011920929,
"rewards/chosen": 0.40338650345802307,
"rewards/margins": 0.25411099195480347,
"rewards/rejected": 0.14927545189857483,
"step": 40
},
{
"epoch": 0.13,
"grad_norm": 14.5625,
"learning_rate": 4.987541037542187e-06,
"logits/chosen": 78.7928466796875,
"logits/rejected": 78.8023910522461,
"logps/chosen": -30.487594604492188,
"logps/rejected": -30.689586639404297,
"loss": 0.4635,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": 0.5023558735847473,
"rewards/margins": 0.23376783728599548,
"rewards/rejected": 0.26858800649642944,
"step": 50
},
{
"epoch": 0.16,
"grad_norm": 13.375,
"learning_rate": 4.954691471941119e-06,
"logits/chosen": 83.43833923339844,
"logits/rejected": 83.5004653930664,
"logps/chosen": -30.851482391357422,
"logps/rejected": -29.280162811279297,
"loss": 0.501,
"rewards/accuracies": 0.42500001192092896,
"rewards/chosen": 0.22657600045204163,
"rewards/margins": 0.009325137361884117,
"rewards/rejected": 0.21725085377693176,
"step": 60
},
{
"epoch": 0.18,
"grad_norm": 13.5,
"learning_rate": 4.901618883413549e-06,
"logits/chosen": 84.01557159423828,
"logits/rejected": 84.04491424560547,
"logps/chosen": -30.311126708984375,
"logps/rejected": -32.73175811767578,
"loss": 0.4997,
"rewards/accuracies": 0.42500001192092896,
"rewards/chosen": 0.22253043949604034,
"rewards/margins": 0.008363036438822746,
"rewards/rejected": 0.21416740119457245,
"step": 70
},
{
"epoch": 0.21,
"grad_norm": 16.375,
"learning_rate": 4.828760511501322e-06,
"logits/chosen": 81.79966735839844,
"logits/rejected": 81.77557373046875,
"logps/chosen": -31.009265899658203,
"logps/rejected": -30.690744400024414,
"loss": 0.4434,
"rewards/accuracies": 0.574999988079071,
"rewards/chosen": 0.34697332978248596,
"rewards/margins": 0.3001402020454407,
"rewards/rejected": 0.04683312773704529,
"step": 80
},
{
"epoch": 0.23,
"grad_norm": 20.75,
"learning_rate": 4.7367166013034295e-06,
"logits/chosen": 78.57239532470703,
"logits/rejected": 78.55184173583984,
"logps/chosen": -32.28063201904297,
"logps/rejected": -30.936914443969727,
"loss": 0.4612,
"rewards/accuracies": 0.5249999761581421,
"rewards/chosen": 0.2502840459346771,
"rewards/margins": 0.2400922328233719,
"rewards/rejected": 0.0101918401196599,
"step": 90
},
{
"epoch": 0.26,
"grad_norm": 15.5625,
"learning_rate": 4.626245458345211e-06,
"logits/chosen": 83.69164276123047,
"logits/rejected": 83.69895935058594,
"logps/chosen": -33.99507141113281,
"logps/rejected": -31.751794815063477,
"loss": 0.461,
"rewards/accuracies": 0.512499988079071,
"rewards/chosen": 0.21694469451904297,
"rewards/margins": 0.21781782805919647,
"rewards/rejected": -0.0008731067064218223,
"step": 100
},
{
"epoch": 0.26,
"eval_logits/chosen": 98.70431518554688,
"eval_logits/rejected": 98.68829345703125,
"eval_logps/chosen": -32.346439361572266,
"eval_logps/rejected": -35.87228775024414,
"eval_loss": 0.5000348687171936,
"eval_rewards/accuracies": 0.5157807469367981,
"eval_rewards/chosen": 0.06771623343229294,
"eval_rewards/margins": 0.0017476709326729178,
"eval_rewards/rejected": 0.06596855819225311,
"eval_runtime": 104.2308,
"eval_samples_per_second": 3.291,
"eval_steps_per_second": 0.413,
"step": 100
},
{
"epoch": 0.29,
"grad_norm": 17.625,
"learning_rate": 4.498257201263691e-06,
"logits/chosen": 83.92507934570312,
"logits/rejected": 83.79533386230469,
"logps/chosen": -32.187767028808594,
"logps/rejected": -32.65043258666992,
"loss": 0.3988,
"rewards/accuracies": 0.637499988079071,
"rewards/chosen": 0.5845246911048889,
"rewards/margins": 0.5807043313980103,
"rewards/rejected": 0.0038203417789191008,
"step": 110
},
{
"epoch": 0.31,
"grad_norm": 19.375,
"learning_rate": 4.353806263777678e-06,
"logits/chosen": 83.96737670898438,
"logits/rejected": 84.08332061767578,
"logps/chosen": -28.09195327758789,
"logps/rejected": -35.17109680175781,
"loss": 0.4351,
"rewards/accuracies": 0.550000011920929,
"rewards/chosen": 0.5863426327705383,
"rewards/margins": 0.3847105801105499,
"rewards/rejected": 0.2016320526599884,
"step": 120
},
{
"epoch": 0.34,
"grad_norm": 13.25,
"learning_rate": 4.1940827077152755e-06,
"logits/chosen": 81.23406219482422,
"logits/rejected": 81.2734603881836,
"logps/chosen": -30.209375381469727,
"logps/rejected": -31.70619773864746,
"loss": 0.4414,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": 0.48858681321144104,
"rewards/margins": 0.330787718296051,
"rewards/rejected": 0.1577991098165512,
"step": 130
},
{
"epoch": 0.36,
"grad_norm": 15.3125,
"learning_rate": 4.0204024186666215e-06,
"logits/chosen": 82.31855773925781,
"logits/rejected": 82.33930206298828,
"logps/chosen": -26.636510848999023,
"logps/rejected": -32.51647186279297,
"loss": 0.3966,
"rewards/accuracies": 0.612500011920929,
"rewards/chosen": 0.5675702691078186,
"rewards/margins": 0.6327372789382935,
"rewards/rejected": -0.06516699492931366,
"step": 140
},
{
"epoch": 0.39,
"grad_norm": 14.5625,
"learning_rate": 3.834196265035119e-06,
"logits/chosen": 80.7647476196289,
"logits/rejected": 80.73397064208984,
"logps/chosen": -28.527297973632812,
"logps/rejected": -32.617584228515625,
"loss": 0.3943,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": 0.603269100189209,
"rewards/margins": 0.6390293836593628,
"rewards/rejected": -0.035760145634412766,
"step": 150
},
{
"epoch": 0.42,
"grad_norm": 16.0,
"learning_rate": 3.636998309800573e-06,
"logits/chosen": 82.51421356201172,
"logits/rejected": 82.53550720214844,
"logps/chosen": -33.11312484741211,
"logps/rejected": -30.041656494140625,
"loss": 0.3649,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": 0.7165369987487793,
"rewards/margins": 0.7789196372032166,
"rewards/rejected": -0.06238250061869621,
"step": 160
},
{
"epoch": 0.44,
"grad_norm": 11.75,
"learning_rate": 3.4304331721118078e-06,
"logits/chosen": 83.22119903564453,
"logits/rejected": 83.18238067626953,
"logps/chosen": -30.43136978149414,
"logps/rejected": -32.384212493896484,
"loss": 0.3752,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": 0.5795407295227051,
"rewards/margins": 0.8344497680664062,
"rewards/rejected": -0.25490903854370117,
"step": 170
},
{
"epoch": 0.47,
"grad_norm": 7.625,
"learning_rate": 3.2162026428305436e-06,
"logits/chosen": 80.77767181396484,
"logits/rejected": 80.74992370605469,
"logps/chosen": -30.34661293029785,
"logps/rejected": -31.299999237060547,
"loss": 0.3834,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": 0.5953787565231323,
"rewards/margins": 0.6953333616256714,
"rewards/rejected": -0.09995462000370026,
"step": 180
},
{
"epoch": 0.49,
"grad_norm": 7.5,
"learning_rate": 2.996071664294641e-06,
"logits/chosen": 82.40937805175781,
"logits/rejected": 82.38029479980469,
"logps/chosen": -29.8316593170166,
"logps/rejected": -30.288339614868164,
"loss": 0.4151,
"rewards/accuracies": 0.612500011920929,
"rewards/chosen": 0.6543310284614563,
"rewards/margins": 0.5285537838935852,
"rewards/rejected": 0.1257772147655487,
"step": 190
},
{
"epoch": 0.52,
"grad_norm": 8.5625,
"learning_rate": 2.7718537898066833e-06,
"logits/chosen": 77.76971435546875,
"logits/rejected": 77.71305847167969,
"logps/chosen": -33.00738525390625,
"logps/rejected": -32.31935119628906,
"loss": 0.3172,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": 1.2217466831207275,
"rewards/margins": 1.0999095439910889,
"rewards/rejected": 0.12183725833892822,
"step": 200
},
{
"epoch": 0.52,
"eval_logits/chosen": 98.44633483886719,
"eval_logits/rejected": 98.44091033935547,
"eval_logps/chosen": -32.319034576416016,
"eval_logps/rejected": -35.88397979736328,
"eval_loss": 0.49560824036598206,
"eval_rewards/accuracies": 0.5132890343666077,
"eval_rewards/chosen": 0.08689937740564346,
"eval_rewards/margins": 0.029115768149495125,
"eval_rewards/rejected": 0.057783618569374084,
"eval_runtime": 104.0206,
"eval_samples_per_second": 3.297,
"eval_steps_per_second": 0.413,
"step": 200
},
{
"epoch": 0.55,
"grad_norm": 18.875,
"learning_rate": 2.5453962426402006e-06,
"logits/chosen": 80.35867309570312,
"logits/rejected": 80.27238464355469,
"logps/chosen": -32.687442779541016,
"logps/rejected": -34.85881423950195,
"loss": 0.3718,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": 0.8807934522628784,
"rewards/margins": 0.7916989922523499,
"rewards/rejected": 0.08909468352794647,
"step": 210
},
{
"epoch": 0.57,
"grad_norm": 15.625,
"learning_rate": 2.3185646976551794e-06,
"logits/chosen": 82.43341064453125,
"logits/rejected": 82.50484466552734,
"logps/chosen": -30.421199798583984,
"logps/rejected": -30.724105834960938,
"loss": 0.3248,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": 1.0694276094436646,
"rewards/margins": 1.0660343170166016,
"rewards/rejected": 0.0033930898644030094,
"step": 220
},
{
"epoch": 0.6,
"grad_norm": 11.6875,
"learning_rate": 2.0932279108998323e-06,
"logits/chosen": 79.52705383300781,
"logits/rejected": 79.57157897949219,
"logps/chosen": -31.819992065429688,
"logps/rejected": -33.83860397338867,
"loss": 0.4096,
"rewards/accuracies": 0.5874999761581421,
"rewards/chosen": 0.6900272369384766,
"rewards/margins": 0.5685457587242126,
"rewards/rejected": 0.1214815154671669,
"step": 230
},
{
"epoch": 0.62,
"grad_norm": 17.25,
"learning_rate": 1.8712423238279358e-06,
"logits/chosen": 81.8853988647461,
"logits/rejected": 82.16355895996094,
"logps/chosen": -30.063451766967773,
"logps/rejected": -31.44558334350586,
"loss": 0.3113,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": 1.137229561805725,
"rewards/margins": 1.0767086744308472,
"rewards/rejected": 0.06052086502313614,
"step": 240
},
{
"epoch": 0.65,
"grad_norm": 17.0,
"learning_rate": 1.6544367689701824e-06,
"logits/chosen": 80.61282348632812,
"logits/rejected": 80.67658996582031,
"logps/chosen": -26.506671905517578,
"logps/rejected": -29.721996307373047,
"loss": 0.4011,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": 0.7624577283859253,
"rewards/margins": 0.6373406648635864,
"rewards/rejected": 0.12511704862117767,
"step": 250
},
{
"epoch": 0.68,
"grad_norm": 12.25,
"learning_rate": 1.4445974030621963e-06,
"logits/chosen": 77.79762268066406,
"logits/rejected": 77.97430419921875,
"logps/chosen": -29.87813949584961,
"logps/rejected": -35.95857238769531,
"loss": 0.3311,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": 1.1779998540878296,
"rewards/margins": 1.0778592824935913,
"rewards/rejected": 0.10014041513204575,
"step": 260
},
{
"epoch": 0.7,
"grad_norm": 8.8125,
"learning_rate": 1.243452991757889e-06,
"logits/chosen": 77.17610168457031,
"logits/rejected": 77.21318817138672,
"logps/chosen": -30.205867767333984,
"logps/rejected": -31.4431095123291,
"loss": 0.3276,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": 1.1011395454406738,
"rewards/margins": 1.0575218200683594,
"rewards/rejected": 0.04361782968044281,
"step": 270
},
{
"epoch": 0.73,
"grad_norm": 17.375,
"learning_rate": 1.0526606671603523e-06,
"logits/chosen": 79.84697723388672,
"logits/rejected": 79.62041473388672,
"logps/chosen": -30.549224853515625,
"logps/rejected": -29.2475643157959,
"loss": 0.3847,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": 0.8824693560600281,
"rewards/margins": 0.588868260383606,
"rewards/rejected": 0.2936010956764221,
"step": 280
},
{
"epoch": 0.75,
"grad_norm": 13.125,
"learning_rate": 8.737922755071455e-07,
"logits/chosen": 80.05425262451172,
"logits/rejected": 79.96458435058594,
"logps/chosen": -32.43302536010742,
"logps/rejected": -32.11684036254883,
"loss": 0.3019,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": 1.169856309890747,
"rewards/margins": 1.2398390769958496,
"rewards/rejected": -0.06998284161090851,
"step": 290
},
{
"epoch": 0.78,
"grad_norm": 9.5625,
"learning_rate": 7.08321427484816e-07,
"logits/chosen": 75.59251403808594,
"logits/rejected": 75.66484069824219,
"logps/chosen": -31.76091957092285,
"logps/rejected": -28.981002807617188,
"loss": 0.3303,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": 1.151352882385254,
"rewards/margins": 1.0666378736495972,
"rewards/rejected": 0.08471502363681793,
"step": 300
},
{
"epoch": 0.78,
"eval_logits/chosen": 98.37130737304688,
"eval_logits/rejected": 98.36297607421875,
"eval_logps/chosen": -32.2767219543457,
"eval_logps/rejected": -35.79054260253906,
"eval_loss": 0.49796876311302185,
"eval_rewards/accuracies": 0.5016611218452454,
"eval_rewards/chosen": 0.11651887744665146,
"eval_rewards/margins": -0.0066675543785095215,
"eval_rewards/rejected": 0.12318644672632217,
"eval_runtime": 104.1032,
"eval_samples_per_second": 3.295,
"eval_steps_per_second": 0.413,
"step": 300
},
{
"epoch": 0.81,
"grad_norm": 10.625,
"learning_rate": 5.576113578589035e-07,
"logits/chosen": 82.78644561767578,
"logits/rejected": 82.81319427490234,
"logps/chosen": -29.468921661376953,
"logps/rejected": -32.31016159057617,
"loss": 0.3414,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": 0.9762304425239563,
"rewards/margins": 1.0535131692886353,
"rewards/rejected": -0.07728256285190582,
"step": 310
},
{
"epoch": 0.83,
"grad_norm": 13.0625,
"learning_rate": 4.229036944380913e-07,
"logits/chosen": 80.16923522949219,
"logits/rejected": 80.17256164550781,
"logps/chosen": -29.88885498046875,
"logps/rejected": -28.86279296875,
"loss": 0.3116,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": 1.1967929601669312,
"rewards/margins": 1.157100796699524,
"rewards/rejected": 0.03969240561127663,
"step": 320
},
{
"epoch": 0.86,
"grad_norm": 11.6875,
"learning_rate": 3.053082288996112e-07,
"logits/chosen": 77.39164733886719,
"logits/rejected": 77.41889953613281,
"logps/chosen": -28.371896743774414,
"logps/rejected": -32.67755889892578,
"loss": 0.2678,
"rewards/accuracies": 0.75,
"rewards/chosen": 1.3864963054656982,
"rewards/margins": 1.4163827896118164,
"rewards/rejected": -0.02988656982779503,
"step": 330
},
{
"epoch": 0.88,
"grad_norm": 15.3125,
"learning_rate": 2.0579377374915805e-07,
"logits/chosen": 81.65691375732422,
"logits/rejected": 81.6773681640625,
"logps/chosen": -31.6644229888916,
"logps/rejected": -33.2541389465332,
"loss": 0.3491,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": 1.0834088325500488,
"rewards/margins": 1.003218412399292,
"rewards/rejected": 0.08019042015075684,
"step": 340
},
{
"epoch": 0.91,
"grad_norm": 9.5625,
"learning_rate": 1.2518018074041684e-07,
"logits/chosen": 80.73246765136719,
"logits/rejected": 80.75402069091797,
"logps/chosen": -31.80560874938965,
"logps/rejected": -32.868934631347656,
"loss": 0.3373,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": 1.2866908311843872,
"rewards/margins": 1.1306837797164917,
"rewards/rejected": 0.15600721538066864,
"step": 350
},
{
"epoch": 0.94,
"grad_norm": 10.5,
"learning_rate": 6.41315865106129e-08,
"logits/chosen": 82.23421478271484,
"logits/rejected": 82.29096221923828,
"logps/chosen": -27.619741439819336,
"logps/rejected": -31.5566349029541,
"loss": 0.2941,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": 1.3527272939682007,
"rewards/margins": 1.2874782085418701,
"rewards/rejected": 0.06524892896413803,
"step": 360
},
{
"epoch": 0.96,
"grad_norm": 11.5,
"learning_rate": 2.3150941078050325e-08,
"logits/chosen": 81.71080017089844,
"logits/rejected": 81.74567413330078,
"logps/chosen": -31.354503631591797,
"logps/rejected": -34.771522521972656,
"loss": 0.3671,
"rewards/accuracies": 0.612500011920929,
"rewards/chosen": 1.0651617050170898,
"rewards/margins": 0.8286684155464172,
"rewards/rejected": 0.23649325966835022,
"step": 370
},
{
"epoch": 0.99,
"grad_norm": 13.8125,
"learning_rate": 2.575864278703266e-09,
"logits/chosen": 75.57669067382812,
"logits/rejected": 75.45292663574219,
"logps/chosen": -29.130199432373047,
"logps/rejected": -27.9501953125,
"loss": 0.3558,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": 1.0121484994888306,
"rewards/margins": 0.8648824691772461,
"rewards/rejected": 0.14726600050926208,
"step": 380
},
{
"epoch": 1.0,
"step": 385,
"total_flos": 0.0,
"train_loss": 0.38857739250381274,
"train_runtime": 2559.074,
"train_samples_per_second": 1.203,
"train_steps_per_second": 0.15
}
],
"logging_steps": 10,
"max_steps": 385,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}