VPO-Zephyr-7B-SecondTry-iter-1 / trainer_state.json
Katayoon's picture
Model save
32431ed verified
raw
history blame
4.44 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9874476987447699,
"eval_steps": 500,
"global_step": 59,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016736401673640166,
"grad_norm": 5.777860228892019,
"learning_rate": 8.333333333333333e-08,
"logits/chosen": -2.832916021347046,
"logits/rejected": -2.8954272270202637,
"logps/chosen": -112.55461120605469,
"logps/pi_response": -112.97522735595703,
"logps/ref_response": -112.97522735595703,
"logps/rejected": -126.53972625732422,
"loss": 0.6918,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.16736401673640167,
"grad_norm": 5.866000234105043,
"learning_rate": 4.930057285201027e-07,
"logits/chosen": -2.791806697845459,
"logits/rejected": -2.7901265621185303,
"logps/chosen": -150.01193237304688,
"logps/pi_response": -152.32586669921875,
"logps/ref_response": -152.45172119140625,
"logps/rejected": -154.6370849609375,
"loss": 0.6917,
"rewards/accuracies": 0.4305555522441864,
"rewards/chosen": 0.0006289934390224516,
"rewards/margins": -0.0005195397534407675,
"rewards/rejected": 0.0011485331924632192,
"step": 10
},
{
"epoch": 0.33472803347280333,
"grad_norm": 6.174800184771954,
"learning_rate": 4.187457503795526e-07,
"logits/chosen": -2.8215346336364746,
"logits/rejected": -2.806915760040283,
"logps/chosen": -148.59841918945312,
"logps/pi_response": -149.57443237304688,
"logps/ref_response": -147.01174926757812,
"logps/rejected": -151.08834838867188,
"loss": 0.692,
"rewards/accuracies": 0.42500001192092896,
"rewards/chosen": -0.02874668501317501,
"rewards/margins": -0.003781392704695463,
"rewards/rejected": -0.02496529184281826,
"step": 20
},
{
"epoch": 0.502092050209205,
"grad_norm": 6.166347180577358,
"learning_rate": 2.8691164100062034e-07,
"logits/chosen": -2.8225722312927246,
"logits/rejected": -2.8092432022094727,
"logps/chosen": -168.7109832763672,
"logps/pi_response": -166.7081298828125,
"logps/ref_response": -149.63009643554688,
"logps/rejected": -168.52523803710938,
"loss": 0.6913,
"rewards/accuracies": 0.4937500059604645,
"rewards/chosen": -0.17003677785396576,
"rewards/margins": -0.0003072673571296036,
"rewards/rejected": -0.16972950100898743,
"step": 30
},
{
"epoch": 0.6694560669456067,
"grad_norm": 6.333082712906178,
"learning_rate": 1.4248369943086995e-07,
"logits/chosen": -2.7256391048431396,
"logits/rejected": -2.719358205795288,
"logps/chosen": -158.1101837158203,
"logps/pi_response": -156.38528442382812,
"logps/ref_response": -143.953857421875,
"logps/rejected": -155.69100952148438,
"loss": 0.69,
"rewards/accuracies": 0.550000011920929,
"rewards/chosen": -0.11886920034885406,
"rewards/margins": 0.013960828073322773,
"rewards/rejected": -0.13283005356788635,
"step": 40
},
{
"epoch": 0.8368200836820083,
"grad_norm": 6.029091338880282,
"learning_rate": 3.473909705816111e-08,
"logits/chosen": -2.7439560890197754,
"logits/rejected": -2.743741750717163,
"logps/chosen": -149.5755615234375,
"logps/pi_response": -147.55282592773438,
"logps/ref_response": -140.59339904785156,
"logps/rejected": -145.41294860839844,
"loss": 0.69,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.06889279931783676,
"rewards/margins": 0.005167593248188496,
"rewards/rejected": -0.07406039535999298,
"step": 50
},
{
"epoch": 0.9874476987447699,
"step": 59,
"total_flos": 0.0,
"train_loss": 0.6904175948288481,
"train_runtime": 1301.3878,
"train_samples_per_second": 11.744,
"train_steps_per_second": 0.045
}
],
"logging_steps": 10,
"max_steps": 59,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}