|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 443, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.111111111111111e-08, |
|
"logits/chosen": -2.737483501434326, |
|
"logits/rejected": -2.581545829772949, |
|
"logps/chosen": -157.5369873046875, |
|
"logps/rejected": -100.35362243652344, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.111111111111111e-07, |
|
"logits/chosen": -2.61592960357666, |
|
"logits/rejected": -2.572232484817505, |
|
"logps/chosen": -124.513671875, |
|
"logps/rejected": -91.37435913085938, |
|
"loss": 0.6778, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.04052966088056564, |
|
"rewards/margins": 0.01970052346587181, |
|
"rewards/rejected": 0.020829135552048683, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.222222222222222e-07, |
|
"logits/chosen": -2.565521001815796, |
|
"logits/rejected": -2.4634904861450195, |
|
"logps/chosen": -114.85684967041016, |
|
"logps/rejected": -80.09593963623047, |
|
"loss": 0.5129, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": 0.4715304970741272, |
|
"rewards/margins": 0.5839148759841919, |
|
"rewards/rejected": -0.11238440126180649, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 3.333333333333333e-07, |
|
"logits/chosen": -2.5056052207946777, |
|
"logits/rejected": -2.4653897285461426, |
|
"logps/chosen": -123.69754791259766, |
|
"logps/rejected": -103.3398208618164, |
|
"loss": 0.4006, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.01100767869502306, |
|
"rewards/margins": 1.2376883029937744, |
|
"rewards/rejected": -1.2266806364059448, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.444444444444444e-07, |
|
"logits/chosen": -2.4814658164978027, |
|
"logits/rejected": -2.4201178550720215, |
|
"logps/chosen": -136.6073760986328, |
|
"logps/rejected": -117.58763122558594, |
|
"loss": 0.3056, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -0.353680282831192, |
|
"rewards/margins": 1.6959145069122314, |
|
"rewards/rejected": -2.0495948791503906, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.937185929648241e-07, |
|
"logits/chosen": -2.458440065383911, |
|
"logits/rejected": -2.3304443359375, |
|
"logps/chosen": -124.83992004394531, |
|
"logps/rejected": -106.70866394042969, |
|
"loss": 0.2679, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -0.31784066557884216, |
|
"rewards/margins": 2.447019100189209, |
|
"rewards/rejected": -2.764859914779663, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.811557788944723e-07, |
|
"logits/chosen": -2.396515369415283, |
|
"logits/rejected": -2.293870687484741, |
|
"logps/chosen": -135.10882568359375, |
|
"logps/rejected": -122.99256896972656, |
|
"loss": 0.2447, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -0.10166273266077042, |
|
"rewards/margins": 3.037165641784668, |
|
"rewards/rejected": -3.1388285160064697, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.685929648241206e-07, |
|
"logits/chosen": -2.355457305908203, |
|
"logits/rejected": -2.2316598892211914, |
|
"logps/chosen": -122.27568054199219, |
|
"logps/rejected": -113.7423095703125, |
|
"loss": 0.2718, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": -0.5933733582496643, |
|
"rewards/margins": 2.744062662124634, |
|
"rewards/rejected": -3.3374359607696533, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.5603015075376885e-07, |
|
"logits/chosen": -2.361450672149658, |
|
"logits/rejected": -2.286520481109619, |
|
"logps/chosen": -127.52728271484375, |
|
"logps/rejected": -126.6650161743164, |
|
"loss": 0.2313, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": -0.7555137872695923, |
|
"rewards/margins": 3.271893262863159, |
|
"rewards/rejected": -4.027407169342041, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.4346733668341706e-07, |
|
"logits/chosen": -2.3498244285583496, |
|
"logits/rejected": -2.257256507873535, |
|
"logps/chosen": -122.7756118774414, |
|
"logps/rejected": -121.1026382446289, |
|
"loss": 0.2307, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -0.7029477953910828, |
|
"rewards/margins": 3.352973222732544, |
|
"rewards/rejected": -4.0559210777282715, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.309045226130653e-07, |
|
"logits/chosen": -2.4514331817626953, |
|
"logits/rejected": -2.378006935119629, |
|
"logps/chosen": -129.557861328125, |
|
"logps/rejected": -123.76164245605469, |
|
"loss": 0.2078, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.20816242694854736, |
|
"rewards/margins": 4.0717453956604, |
|
"rewards/rejected": -3.8635826110839844, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.1834170854271357e-07, |
|
"logits/chosen": -2.358485698699951, |
|
"logits/rejected": -2.279703378677368, |
|
"logps/chosen": -133.0589141845703, |
|
"logps/rejected": -129.48451232910156, |
|
"loss": 0.2371, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -0.7045987248420715, |
|
"rewards/margins": 3.171238422393799, |
|
"rewards/rejected": -3.8758368492126465, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.057788944723618e-07, |
|
"logits/chosen": -2.3714640140533447, |
|
"logits/rejected": -2.3009307384490967, |
|
"logps/chosen": -129.02052307128906, |
|
"logps/rejected": -130.68905639648438, |
|
"loss": 0.1905, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": -1.2331945896148682, |
|
"rewards/margins": 3.5529701709747314, |
|
"rewards/rejected": -4.7861647605896, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 3.9321608040201003e-07, |
|
"logits/chosen": -2.385488510131836, |
|
"logits/rejected": -2.304129123687744, |
|
"logps/chosen": -128.46060180664062, |
|
"logps/rejected": -135.4064483642578, |
|
"loss": 0.1812, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -0.6662181615829468, |
|
"rewards/margins": 4.080785274505615, |
|
"rewards/rejected": -4.74700403213501, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 3.806532663316583e-07, |
|
"logits/chosen": -2.331386089324951, |
|
"logits/rejected": -2.2633156776428223, |
|
"logps/chosen": -138.77073669433594, |
|
"logps/rejected": -150.08639526367188, |
|
"loss": 0.1728, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -0.8451410531997681, |
|
"rewards/margins": 4.410196781158447, |
|
"rewards/rejected": -5.255337715148926, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 3.6809045226130655e-07, |
|
"logits/chosen": -2.261727809906006, |
|
"logits/rejected": -2.253206729888916, |
|
"logps/chosen": -128.15460205078125, |
|
"logps/rejected": -137.04916381835938, |
|
"loss": 0.2448, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -1.2643935680389404, |
|
"rewards/margins": 4.03481912612915, |
|
"rewards/rejected": -5.299212455749512, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 3.5552763819095475e-07, |
|
"logits/chosen": -2.3198587894439697, |
|
"logits/rejected": -2.2583937644958496, |
|
"logps/chosen": -140.9436798095703, |
|
"logps/rejected": -147.12094116210938, |
|
"loss": 0.1615, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": -1.0014054775238037, |
|
"rewards/margins": 4.486364364624023, |
|
"rewards/rejected": -5.487771034240723, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.42964824120603e-07, |
|
"logits/chosen": -2.291534185409546, |
|
"logits/rejected": -2.208200454711914, |
|
"logps/chosen": -141.3668670654297, |
|
"logps/rejected": -155.08602905273438, |
|
"loss": 0.216, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": -1.5920617580413818, |
|
"rewards/margins": 4.606629371643066, |
|
"rewards/rejected": -6.198691368103027, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 3.3040201005025127e-07, |
|
"logits/chosen": -2.387359142303467, |
|
"logits/rejected": -2.28928279876709, |
|
"logps/chosen": -130.8974609375, |
|
"logps/rejected": -142.79708862304688, |
|
"loss": 0.2023, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": -2.409898519515991, |
|
"rewards/margins": 3.318312168121338, |
|
"rewards/rejected": -5.72821044921875, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 3.178391959798995e-07, |
|
"logits/chosen": -2.378145456314087, |
|
"logits/rejected": -2.3165206909179688, |
|
"logps/chosen": -131.51109313964844, |
|
"logps/rejected": -142.97760009765625, |
|
"loss": 0.1793, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": -1.6101114749908447, |
|
"rewards/margins": 3.964582920074463, |
|
"rewards/rejected": -5.574694633483887, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 3.0527638190954773e-07, |
|
"logits/chosen": -2.330974817276001, |
|
"logits/rejected": -2.276876449584961, |
|
"logps/chosen": -137.87515258789062, |
|
"logps/rejected": -152.93994140625, |
|
"loss": 0.1638, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -1.3574166297912598, |
|
"rewards/margins": 4.367352485656738, |
|
"rewards/rejected": -5.724769115447998, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 2.92713567839196e-07, |
|
"logits/chosen": -2.4077656269073486, |
|
"logits/rejected": -2.3121707439422607, |
|
"logps/chosen": -144.79739379882812, |
|
"logps/rejected": -147.3674774169922, |
|
"loss": 0.1603, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -1.5326565504074097, |
|
"rewards/margins": 3.852278470993042, |
|
"rewards/rejected": -5.384934902191162, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.8015075376884424e-07, |
|
"logits/chosen": -2.2944283485412598, |
|
"logits/rejected": -2.2064151763916016, |
|
"logps/chosen": -130.70140075683594, |
|
"logps/rejected": -147.8914794921875, |
|
"loss": 0.1641, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": -1.207008957862854, |
|
"rewards/margins": 4.792324542999268, |
|
"rewards/rejected": -5.999332427978516, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 2.6758793969849245e-07, |
|
"logits/chosen": -2.348940849304199, |
|
"logits/rejected": -2.164310932159424, |
|
"logps/chosen": -128.39413452148438, |
|
"logps/rejected": -140.557373046875, |
|
"loss": 0.166, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -1.3905706405639648, |
|
"rewards/margins": 4.686975479125977, |
|
"rewards/rejected": -6.077546119689941, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 2.550251256281407e-07, |
|
"logits/chosen": -2.395198106765747, |
|
"logits/rejected": -2.2796173095703125, |
|
"logps/chosen": -149.087646484375, |
|
"logps/rejected": -157.66110229492188, |
|
"loss": 0.1402, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": -1.3839337825775146, |
|
"rewards/margins": 4.870881080627441, |
|
"rewards/rejected": -6.254815578460693, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 2.424623115577889e-07, |
|
"logits/chosen": -2.4120841026306152, |
|
"logits/rejected": -2.2922260761260986, |
|
"logps/chosen": -133.91209411621094, |
|
"logps/rejected": -144.4380340576172, |
|
"loss": 0.1872, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -1.2101858854293823, |
|
"rewards/margins": 4.869531154632568, |
|
"rewards/rejected": -6.079718112945557, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 2.2989949748743717e-07, |
|
"logits/chosen": -2.331249237060547, |
|
"logits/rejected": -2.2102646827697754, |
|
"logps/chosen": -124.6241683959961, |
|
"logps/rejected": -138.6643524169922, |
|
"loss": 0.1476, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": -0.8653093576431274, |
|
"rewards/margins": 4.82875394821167, |
|
"rewards/rejected": -5.694063186645508, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 2.1733668341708543e-07, |
|
"logits/chosen": -2.2793853282928467, |
|
"logits/rejected": -2.1484787464141846, |
|
"logps/chosen": -141.07778930664062, |
|
"logps/rejected": -158.54676818847656, |
|
"loss": 0.1925, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": -0.8885013461112976, |
|
"rewards/margins": 5.88812780380249, |
|
"rewards/rejected": -6.776629447937012, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 2.0477386934673366e-07, |
|
"logits/chosen": -2.3435816764831543, |
|
"logits/rejected": -2.222494125366211, |
|
"logps/chosen": -132.23934936523438, |
|
"logps/rejected": -152.41224670410156, |
|
"loss": 0.1719, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": -0.8898094892501831, |
|
"rewards/margins": 5.38167667388916, |
|
"rewards/rejected": -6.271485805511475, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.922110552763819e-07, |
|
"logits/chosen": -2.337050437927246, |
|
"logits/rejected": -2.2481775283813477, |
|
"logps/chosen": -143.0239715576172, |
|
"logps/rejected": -167.92628479003906, |
|
"loss": 0.1408, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": -1.425594449043274, |
|
"rewards/margins": 5.652303695678711, |
|
"rewards/rejected": -7.0778985023498535, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.7964824120603015e-07, |
|
"logits/chosen": -2.3036749362945557, |
|
"logits/rejected": -2.1755778789520264, |
|
"logps/chosen": -143.76683044433594, |
|
"logps/rejected": -168.64463806152344, |
|
"loss": 0.1393, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": -1.336169719696045, |
|
"rewards/margins": 5.389135837554932, |
|
"rewards/rejected": -6.725306034088135, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.6708542713567838e-07, |
|
"logits/chosen": -2.2387304306030273, |
|
"logits/rejected": -2.1477370262145996, |
|
"logps/chosen": -134.2172393798828, |
|
"logps/rejected": -152.63461303710938, |
|
"loss": 0.1579, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": -1.4482789039611816, |
|
"rewards/margins": 4.973227500915527, |
|
"rewards/rejected": -6.421506404876709, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.5452261306532663e-07, |
|
"logits/chosen": -2.2377147674560547, |
|
"logits/rejected": -2.1322357654571533, |
|
"logps/chosen": -135.234130859375, |
|
"logps/rejected": -161.66934204101562, |
|
"loss": 0.1088, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -1.7265955209732056, |
|
"rewards/margins": 5.747531890869141, |
|
"rewards/rejected": -7.474127769470215, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 1.4195979899497487e-07, |
|
"logits/chosen": -2.236829996109009, |
|
"logits/rejected": -2.154937982559204, |
|
"logps/chosen": -141.66830444335938, |
|
"logps/rejected": -166.32823181152344, |
|
"loss": 0.1488, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": -1.782355546951294, |
|
"rewards/margins": 5.470345497131348, |
|
"rewards/rejected": -7.2527008056640625, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.2939698492462312e-07, |
|
"logits/chosen": -2.3523616790771484, |
|
"logits/rejected": -2.24237322807312, |
|
"logps/chosen": -134.5939178466797, |
|
"logps/rejected": -169.53317260742188, |
|
"loss": 0.1579, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -1.731003999710083, |
|
"rewards/margins": 6.211657524108887, |
|
"rewards/rejected": -7.942662239074707, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.1683417085427135e-07, |
|
"logits/chosen": -2.2935945987701416, |
|
"logits/rejected": -2.2166569232940674, |
|
"logps/chosen": -137.88473510742188, |
|
"logps/rejected": -160.505126953125, |
|
"loss": 0.1508, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -0.9919565916061401, |
|
"rewards/margins": 5.5159502029418945, |
|
"rewards/rejected": -6.507906913757324, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.0427135678391958e-07, |
|
"logits/chosen": -2.320068120956421, |
|
"logits/rejected": -2.2384986877441406, |
|
"logps/chosen": -134.75381469726562, |
|
"logps/rejected": -160.6112518310547, |
|
"loss": 0.1696, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -2.4626049995422363, |
|
"rewards/margins": 5.313817024230957, |
|
"rewards/rejected": -7.77642297744751, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 9.170854271356783e-08, |
|
"logits/chosen": -2.2218480110168457, |
|
"logits/rejected": -2.1257214546203613, |
|
"logps/chosen": -147.00196838378906, |
|
"logps/rejected": -171.19464111328125, |
|
"loss": 0.1453, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": -1.5424976348876953, |
|
"rewards/margins": 5.8535027503967285, |
|
"rewards/rejected": -7.395999908447266, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 7.914572864321607e-08, |
|
"logits/chosen": -2.27662992477417, |
|
"logits/rejected": -2.157721519470215, |
|
"logps/chosen": -131.88534545898438, |
|
"logps/rejected": -158.86459350585938, |
|
"loss": 0.1955, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -2.0121521949768066, |
|
"rewards/margins": 4.82122802734375, |
|
"rewards/rejected": -6.833380222320557, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 6.658291457286432e-08, |
|
"logits/chosen": -2.4072272777557373, |
|
"logits/rejected": -2.285789966583252, |
|
"logps/chosen": -140.86204528808594, |
|
"logps/rejected": -169.72288513183594, |
|
"loss": 0.1312, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": -0.6351500749588013, |
|
"rewards/margins": 6.939029693603516, |
|
"rewards/rejected": -7.574179649353027, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 5.4020100502512555e-08, |
|
"logits/chosen": -2.2212846279144287, |
|
"logits/rejected": -2.141895294189453, |
|
"logps/chosen": -126.89100646972656, |
|
"logps/rejected": -161.2249755859375, |
|
"loss": 0.2109, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": -0.795007050037384, |
|
"rewards/margins": 6.2724409103393555, |
|
"rewards/rejected": -7.067447662353516, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 4.14572864321608e-08, |
|
"logits/chosen": -2.370591878890991, |
|
"logits/rejected": -2.2174134254455566, |
|
"logps/chosen": -146.6661834716797, |
|
"logps/rejected": -165.058837890625, |
|
"loss": 0.1407, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": -0.8686304092407227, |
|
"rewards/margins": 6.0188398361206055, |
|
"rewards/rejected": -6.8874711990356445, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 2.8894472361809044e-08, |
|
"logits/chosen": -2.3981685638427734, |
|
"logits/rejected": -2.2463793754577637, |
|
"logps/chosen": -147.17486572265625, |
|
"logps/rejected": -157.16622924804688, |
|
"loss": 0.1245, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -1.544703483581543, |
|
"rewards/margins": 5.296902179718018, |
|
"rewards/rejected": -6.841605186462402, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 1.6331658291457288e-08, |
|
"logits/chosen": -2.3454318046569824, |
|
"logits/rejected": -2.2091023921966553, |
|
"logps/chosen": -153.2548370361328, |
|
"logps/rejected": -168.34854125976562, |
|
"loss": 0.1576, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": -1.147238850593567, |
|
"rewards/margins": 5.976444721221924, |
|
"rewards/rejected": -7.123683929443359, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 3.768844221105527e-09, |
|
"logits/chosen": -2.2823286056518555, |
|
"logits/rejected": -2.1865620613098145, |
|
"logps/chosen": -123.73600769042969, |
|
"logps/rejected": -146.88194274902344, |
|
"loss": 0.1626, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -1.5477970838546753, |
|
"rewards/margins": 5.064129829406738, |
|
"rewards/rejected": -6.611927032470703, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 443, |
|
"total_flos": 0.0, |
|
"train_loss": 0.2053286122078282, |
|
"train_runtime": 6354.2323, |
|
"train_samples_per_second": 2.231, |
|
"train_steps_per_second": 0.07 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 443, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|