|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.9959628582963262, |
|
"eval_steps": 500, |
|
"global_step": 309, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.12918853451756157, |
|
"grad_norm": 18.75351905822754, |
|
"learning_rate": 2.127659574468085e-07, |
|
"logits/chosen": -1.451206922531128, |
|
"logits/rejected": -1.453438401222229, |
|
"logps/chosen": -1178.2191162109375, |
|
"logps/rejected": -1103.279541015625, |
|
"loss": 0.6962, |
|
"rewards/accuracies": 0.4242187440395355, |
|
"rewards/chosen": -0.0020913523621857166, |
|
"rewards/margins": -0.008680100552737713, |
|
"rewards/rejected": 0.006588748190551996, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.25837706903512314, |
|
"grad_norm": 18.161069869995117, |
|
"learning_rate": 4.25531914893617e-07, |
|
"logits/chosen": -1.4470716714859009, |
|
"logits/rejected": -1.4471380710601807, |
|
"logps/chosen": -1174.496337890625, |
|
"logps/rejected": -1101.42333984375, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": 0.012329095974564552, |
|
"rewards/margins": 0.002261379035189748, |
|
"rewards/rejected": 0.010067718103528023, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3875656035526847, |
|
"grad_norm": 18.111698150634766, |
|
"learning_rate": 4.987903778327268e-07, |
|
"logits/chosen": -1.4492727518081665, |
|
"logits/rejected": -1.454036831855774, |
|
"logps/chosen": -1144.927490234375, |
|
"logps/rejected": -1060.2935791015625, |
|
"loss": 0.6842, |
|
"rewards/accuracies": 0.5726562738418579, |
|
"rewards/chosen": 0.04902833700180054, |
|
"rewards/margins": 0.026971647515892982, |
|
"rewards/rejected": 0.022056687623262405, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.5167541380702463, |
|
"grad_norm": 17.73369598388672, |
|
"learning_rate": 4.922396431162129e-07, |
|
"logits/chosen": -1.4595801830291748, |
|
"logits/rejected": -1.4629844427108765, |
|
"logps/chosen": -1171.3333740234375, |
|
"logps/rejected": -1109.41455078125, |
|
"loss": 0.6694, |
|
"rewards/accuracies": 0.610156238079071, |
|
"rewards/chosen": 0.1019735336303711, |
|
"rewards/margins": 0.06121420860290527, |
|
"rewards/rejected": 0.04075933247804642, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.6459426725878078, |
|
"grad_norm": 16.904024124145508, |
|
"learning_rate": 4.801467490723401e-07, |
|
"logits/chosen": -1.4490680694580078, |
|
"logits/rejected": -1.4522110223770142, |
|
"logps/chosen": -1182.560791015625, |
|
"logps/rejected": -1114.6134033203125, |
|
"loss": 0.6552, |
|
"rewards/accuracies": 0.6148437261581421, |
|
"rewards/chosen": 0.07732206583023071, |
|
"rewards/margins": 0.11570864915847778, |
|
"rewards/rejected": -0.03838658332824707, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7751312071053694, |
|
"grad_norm": 17.63266372680664, |
|
"learning_rate": 4.627883669538311e-07, |
|
"logits/chosen": -1.4325090646743774, |
|
"logits/rejected": -1.4361627101898193, |
|
"logps/chosen": -1182.9852294921875, |
|
"logps/rejected": -1119.953125, |
|
"loss": 0.647, |
|
"rewards/accuracies": 0.6351562738418579, |
|
"rewards/chosen": 0.06345769017934799, |
|
"rewards/margins": 0.14128440618515015, |
|
"rewards/rejected": -0.07782672345638275, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.9043197416229309, |
|
"grad_norm": 17.77622413635254, |
|
"learning_rate": 4.405616362137017e-07, |
|
"logits/chosen": -1.4502944946289062, |
|
"logits/rejected": -1.4528261423110962, |
|
"logps/chosen": -1160.4075927734375, |
|
"logps/rejected": -1093.4769287109375, |
|
"loss": 0.6328, |
|
"rewards/accuracies": 0.62109375, |
|
"rewards/chosen": 0.04682188481092453, |
|
"rewards/margins": 0.19679759442806244, |
|
"rewards/rejected": -0.1499757319688797, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.0335082761404926, |
|
"grad_norm": 16.421310424804688, |
|
"learning_rate": 4.139750784196997e-07, |
|
"logits/chosen": -1.4416507482528687, |
|
"logits/rejected": -1.4441019296646118, |
|
"logps/chosen": -1162.17919921875, |
|
"logps/rejected": -1097.062744140625, |
|
"loss": 0.6268, |
|
"rewards/accuracies": 0.6539062261581421, |
|
"rewards/chosen": 0.017796631902456284, |
|
"rewards/margins": 0.2243949919939041, |
|
"rewards/rejected": -0.20659832656383514, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.1626968106580542, |
|
"grad_norm": 16.477689743041992, |
|
"learning_rate": 3.836369628764067e-07, |
|
"logits/chosen": -1.4492335319519043, |
|
"logits/rejected": -1.4500490427017212, |
|
"logps/chosen": -1149.756103515625, |
|
"logps/rejected": -1079.184326171875, |
|
"loss": 0.5971, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.020682932808995247, |
|
"rewards/margins": 0.3120761513710022, |
|
"rewards/rejected": -0.3327590823173523, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.2918853451756156, |
|
"grad_norm": 16.82990074157715, |
|
"learning_rate": 3.5024139013594445e-07, |
|
"logits/chosen": -1.4431254863739014, |
|
"logits/rejected": -1.4477680921554565, |
|
"logps/chosen": -1168.659912109375, |
|
"logps/rejected": -1104.318115234375, |
|
"loss": 0.581, |
|
"rewards/accuracies": 0.7085937261581421, |
|
"rewards/chosen": -0.008671097457408905, |
|
"rewards/margins": 0.3281632959842682, |
|
"rewards/rejected": -0.3368344008922577, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.4210738796931772, |
|
"grad_norm": 16.532955169677734, |
|
"learning_rate": 3.1455241179026165e-07, |
|
"logits/chosen": -1.4374616146087646, |
|
"logits/rejected": -1.4397255182266235, |
|
"logps/chosen": -1178.955810546875, |
|
"logps/rejected": -1098.697021484375, |
|
"loss": 0.5797, |
|
"rewards/accuracies": 0.711718738079071, |
|
"rewards/chosen": -0.046892568469047546, |
|
"rewards/margins": 0.3680500090122223, |
|
"rewards/rejected": -0.41494256258010864, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.5502624142107388, |
|
"grad_norm": 15.883248329162598, |
|
"learning_rate": 2.7738654986555523e-07, |
|
"logits/chosen": -1.4424382448196411, |
|
"logits/rejected": -1.446033239364624, |
|
"logps/chosen": -1131.3668212890625, |
|
"logps/rejected": -1066.152099609375, |
|
"loss": 0.5729, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.07887513935565948, |
|
"rewards/margins": 0.3466903865337372, |
|
"rewards/rejected": -0.42556554079055786, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.6794509487283005, |
|
"grad_norm": 16.528196334838867, |
|
"learning_rate": 2.3959411575460777e-07, |
|
"logits/chosen": -1.437410831451416, |
|
"logits/rejected": -1.4404833316802979, |
|
"logps/chosen": -1151.679931640625, |
|
"logps/rejected": -1087.936279296875, |
|
"loss": 0.5745, |
|
"rewards/accuracies": 0.7210937738418579, |
|
"rewards/chosen": -0.05212847515940666, |
|
"rewards/margins": 0.4079786241054535, |
|
"rewards/rejected": -0.46010708808898926, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.8086394832458619, |
|
"grad_norm": 16.318754196166992, |
|
"learning_rate": 2.02039756087992e-07, |
|
"logits/chosen": -1.455380916595459, |
|
"logits/rejected": -1.4586334228515625, |
|
"logps/chosen": -1133.935791015625, |
|
"logps/rejected": -1072.302734375, |
|
"loss": 0.5721, |
|
"rewards/accuracies": 0.73046875, |
|
"rewards/chosen": -0.06314127147197723, |
|
"rewards/margins": 0.40634602308273315, |
|
"rewards/rejected": -0.4694872796535492, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.9378280177634235, |
|
"grad_norm": 16.61640739440918, |
|
"learning_rate": 1.655826706318234e-07, |
|
"logits/chosen": -1.4276014566421509, |
|
"logits/rejected": -1.4284788370132446, |
|
"logps/chosen": -1142.511962890625, |
|
"logps/rejected": -1087.729248046875, |
|
"loss": 0.564, |
|
"rewards/accuracies": 0.7367187738418579, |
|
"rewards/chosen": -0.07105865329504013, |
|
"rewards/margins": 0.41813844442367554, |
|
"rewards/rejected": -0.4891970753669739, |
|
"step": 300 |
|
} |
|
], |
|
"logging_steps": 20, |
|
"max_steps": 462, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|