|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.985781990521327, |
|
"eval_steps": 100, |
|
"global_step": 52, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 1957.9884167124578, |
|
"learning_rate": 8.333333333333333e-08, |
|
"logits/chosen": 124.78954315185547, |
|
"logits/rejected": 100.39772033691406, |
|
"logps/chosen": -796.0274658203125, |
|
"logps/rejected": -794.6148071289062, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 2425.4331055868297, |
|
"learning_rate": 4.907293218369498e-07, |
|
"logits/chosen": 113.938720703125, |
|
"logits/rejected": 135.90719604492188, |
|
"logps/chosen": -780.183837890625, |
|
"logps/rejected": -875.3695678710938, |
|
"loss": 2.1195, |
|
"rewards/accuracies": 0.4930555522441864, |
|
"rewards/chosen": 4.653940200805664, |
|
"rewards/margins": 1.20315420627594, |
|
"rewards/rejected": 3.4507861137390137, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 1819.3721154286172, |
|
"learning_rate": 3.941700805287168e-07, |
|
"logits/chosen": 126.57746887207031, |
|
"logits/rejected": 137.83840942382812, |
|
"logps/chosen": -774.68359375, |
|
"logps/rejected": -861.1171875, |
|
"loss": 2.0878, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -1.7321250438690186, |
|
"rewards/margins": 4.814647197723389, |
|
"rewards/rejected": -6.5467729568481445, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 1588.4595576906577, |
|
"learning_rate": 2.3293939665883228e-07, |
|
"logits/chosen": 124.16752624511719, |
|
"logits/rejected": 127.4310531616211, |
|
"logps/chosen": -818.1854248046875, |
|
"logps/rejected": -877.2722778320312, |
|
"loss": 2.3526, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -3.666670560836792, |
|
"rewards/margins": 6.680838584899902, |
|
"rewards/rejected": -10.347509384155273, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 1396.6549591742623, |
|
"learning_rate": 7.936171419533652e-08, |
|
"logits/chosen": 123.1187973022461, |
|
"logits/rejected": 118.75350189208984, |
|
"logps/chosen": -812.8169555664062, |
|
"logps/rejected": -826.1077270507812, |
|
"loss": 1.9731, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": 3.7158901691436768, |
|
"rewards/margins": 5.847080707550049, |
|
"rewards/rejected": -2.131190538406372, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 1524.5875251697705, |
|
"learning_rate": 2.328513490917311e-09, |
|
"logits/chosen": 133.9492645263672, |
|
"logits/rejected": 133.85220336914062, |
|
"logps/chosen": -822.7086791992188, |
|
"logps/rejected": -851.4788208007812, |
|
"loss": 1.681, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 4.888961315155029, |
|
"rewards/margins": 5.86912727355957, |
|
"rewards/rejected": -0.9801660776138306, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"step": 52, |
|
"total_flos": 0.0, |
|
"train_loss": 1.981816053390503, |
|
"train_runtime": 574.1146, |
|
"train_samples_per_second": 11.757, |
|
"train_steps_per_second": 0.091 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 52, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|