|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.971563981042654, |
|
"eval_steps": 100, |
|
"global_step": 104, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.018957345971563982, |
|
"grad_norm": 132.15360444004384, |
|
"learning_rate": 4.545454545454545e-08, |
|
"logits/chosen": 117.53560638427734, |
|
"logits/rejected": 126.8960952758789, |
|
"logps/chosen": -335.40118408203125, |
|
"logps/rejected": -439.16552734375, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.1895734597156398, |
|
"grad_norm": 132.3674027987073, |
|
"learning_rate": 4.545454545454545e-07, |
|
"logits/chosen": 135.01699829101562, |
|
"logits/rejected": 138.37664794921875, |
|
"logps/chosen": -396.05718994140625, |
|
"logps/rejected": -439.1203918457031, |
|
"loss": 0.7127, |
|
"rewards/accuracies": 0.4583333432674408, |
|
"rewards/chosen": -0.0030322629027068615, |
|
"rewards/margins": -0.013390823267400265, |
|
"rewards/rejected": 0.010358559899032116, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.3791469194312796, |
|
"grad_norm": 131.21733523095625, |
|
"learning_rate": 4.885348141000122e-07, |
|
"logits/chosen": 121.60444641113281, |
|
"logits/rejected": 125.29842376708984, |
|
"logps/chosen": -370.2664489746094, |
|
"logps/rejected": -422.78851318359375, |
|
"loss": 0.6459, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.10727670043706894, |
|
"rewards/margins": 0.247134730219841, |
|
"rewards/rejected": -0.13985800743103027, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5687203791469194, |
|
"grad_norm": 117.90232463642135, |
|
"learning_rate": 4.5025027361734613e-07, |
|
"logits/chosen": 142.974853515625, |
|
"logits/rejected": 136.52386474609375, |
|
"logps/chosen": -424.7781677246094, |
|
"logps/rejected": -469.64813232421875, |
|
"loss": 0.5746, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -1.6156466007232666, |
|
"rewards/margins": 0.8666501045227051, |
|
"rewards/rejected": -2.4822967052459717, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.7582938388625592, |
|
"grad_norm": 104.91283452119073, |
|
"learning_rate": 3.893311157806091e-07, |
|
"logits/chosen": 126.9936752319336, |
|
"logits/rejected": 115.53365325927734, |
|
"logps/chosen": -399.81353759765625, |
|
"logps/rejected": -426.99853515625, |
|
"loss": 0.5456, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -2.2809689044952393, |
|
"rewards/margins": 1.1751956939697266, |
|
"rewards/rejected": -3.456164598464966, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.9478672985781991, |
|
"grad_norm": 123.57780236639618, |
|
"learning_rate": 3.126631330646801e-07, |
|
"logits/chosen": 142.1190643310547, |
|
"logits/rejected": 146.2515411376953, |
|
"logps/chosen": -456.97979736328125, |
|
"logps/rejected": -540.1392822265625, |
|
"loss": 0.489, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -1.891798734664917, |
|
"rewards/margins": 1.2988468408584595, |
|
"rewards/rejected": -3.190645456314087, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.1374407582938388, |
|
"grad_norm": 67.1680971334559, |
|
"learning_rate": 2.2891223348923882e-07, |
|
"logits/chosen": 133.56114196777344, |
|
"logits/rejected": 137.20738220214844, |
|
"logps/chosen": -449.55303955078125, |
|
"logps/rejected": -534.8367919921875, |
|
"loss": 0.3117, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": -2.5773684978485107, |
|
"rewards/margins": 2.346193790435791, |
|
"rewards/rejected": -4.923562049865723, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.3270142180094786, |
|
"grad_norm": 48.54475300946312, |
|
"learning_rate": 1.4754491880085317e-07, |
|
"logits/chosen": 125.71492004394531, |
|
"logits/rejected": 127.68719482421875, |
|
"logps/chosen": -426.90228271484375, |
|
"logps/rejected": -528.0679321289062, |
|
"loss": 0.195, |
|
"rewards/accuracies": 0.9437500238418579, |
|
"rewards/chosen": -2.9879212379455566, |
|
"rewards/margins": 2.7397806644439697, |
|
"rewards/rejected": -5.727701663970947, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.5165876777251186, |
|
"grad_norm": 55.371866892062, |
|
"learning_rate": 7.775827023107834e-08, |
|
"logits/chosen": 111.7248306274414, |
|
"logits/rejected": 128.3420867919922, |
|
"logps/chosen": -427.53106689453125, |
|
"logps/rejected": -546.7640991210938, |
|
"loss": 0.1651, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": -3.693999767303467, |
|
"rewards/margins": 2.9823195934295654, |
|
"rewards/rejected": -6.676319122314453, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.7061611374407581, |
|
"grad_norm": 40.99464664899818, |
|
"learning_rate": 2.7440387297912122e-08, |
|
"logits/chosen": 110.8941879272461, |
|
"logits/rejected": 123.70848083496094, |
|
"logps/chosen": -457.2183532714844, |
|
"logps/rejected": -575.8634033203125, |
|
"loss": 0.1557, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": -3.5393664836883545, |
|
"rewards/margins": 3.369715929031372, |
|
"rewards/rejected": -6.909082889556885, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.8957345971563981, |
|
"grad_norm": 45.385328063823785, |
|
"learning_rate": 2.27878296044029e-09, |
|
"logits/chosen": 117.1551284790039, |
|
"logits/rejected": 117.0487060546875, |
|
"logps/chosen": -446.9934997558594, |
|
"logps/rejected": -541.2728881835938, |
|
"loss": 0.1585, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": -3.250919818878174, |
|
"rewards/margins": 2.9745240211486816, |
|
"rewards/rejected": -6.2254438400268555, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.8957345971563981, |
|
"eval_logits/chosen": 96.73149871826172, |
|
"eval_logits/rejected": 91.1323013305664, |
|
"eval_logps/chosen": -446.253662109375, |
|
"eval_logps/rejected": -476.3663635253906, |
|
"eval_loss": 0.46732592582702637, |
|
"eval_rewards/accuracies": 0.6979166865348816, |
|
"eval_rewards/chosen": -4.1292724609375, |
|
"eval_rewards/margins": 1.5854991674423218, |
|
"eval_rewards/rejected": -5.714771270751953, |
|
"eval_runtime": 120.4793, |
|
"eval_samples_per_second": 6.225, |
|
"eval_steps_per_second": 0.199, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.971563981042654, |
|
"step": 104, |
|
"total_flos": 0.0, |
|
"train_loss": 0.39153398688022906, |
|
"train_runtime": 2311.0387, |
|
"train_samples_per_second": 5.842, |
|
"train_steps_per_second": 0.045 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 104, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 500, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|