Llama-3.1-8B-gen-dpo-2k / trainer_state.json
AmberYifan's picture
Model save
a3f2a32 verified
raw
history blame
4.48 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.992,
"eval_steps": 200,
"global_step": 62,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016,
"grad_norm": 406.82268355522706,
"learning_rate": 7.142857142857142e-08,
"logits/generated": -1.7205866575241089,
"logits/real": -1.5578112602233887,
"logps/generated": -286.5030822753906,
"logps/real": -215.1640625,
"loss": 0.9129,
"rewards/accuracies": 0.0,
"rewards/generated": 0.0,
"rewards/margins": 0.0,
"rewards/real": 0.0,
"step": 1
},
{
"epoch": 0.16,
"grad_norm": 255.30926008233882,
"learning_rate": 4.727272727272727e-07,
"logits/generated": -1.643042802810669,
"logits/real": -1.5913658142089844,
"logps/generated": -247.2132110595703,
"logps/real": -229.58377075195312,
"loss": 0.9075,
"rewards/accuracies": 0.5138888955116272,
"rewards/generated": 0.033381447196006775,
"rewards/margins": 0.06060503050684929,
"rewards/real": 0.09398648142814636,
"step": 10
},
{
"epoch": 0.32,
"grad_norm": 126.87007618777977,
"learning_rate": 3.818181818181818e-07,
"logits/generated": -1.6706269979476929,
"logits/real": -1.6231067180633545,
"logps/generated": -255.0316619873047,
"logps/real": -233.18115234375,
"loss": 0.8444,
"rewards/accuracies": 0.7250000238418579,
"rewards/generated": -0.507367730140686,
"rewards/margins": 0.4785459637641907,
"rewards/real": -0.028821701183915138,
"step": 20
},
{
"epoch": 0.48,
"grad_norm": 58.59915104165316,
"learning_rate": 2.909090909090909e-07,
"logits/generated": -1.6554796695709229,
"logits/real": -1.5990644693374634,
"logps/generated": -259.2175598144531,
"logps/real": -229.8797607421875,
"loss": 0.7261,
"rewards/accuracies": 0.800000011920929,
"rewards/generated": -0.31560593843460083,
"rewards/margins": 0.8427440524101257,
"rewards/real": 0.5271381139755249,
"step": 30
},
{
"epoch": 0.64,
"grad_norm": 69.00503449133376,
"learning_rate": 2e-07,
"logits/generated": -1.6960480213165283,
"logits/real": -1.6210428476333618,
"logps/generated": -269.42431640625,
"logps/real": -236.22348022460938,
"loss": 0.6977,
"rewards/accuracies": 0.824999988079071,
"rewards/generated": -0.6807600259780884,
"rewards/margins": 1.2224102020263672,
"rewards/real": 0.5416500568389893,
"step": 40
},
{
"epoch": 0.8,
"grad_norm": 57.46050240577938,
"learning_rate": 1.0909090909090908e-07,
"logits/generated": -1.7112897634506226,
"logits/real": -1.6359144449234009,
"logps/generated": -271.5468444824219,
"logps/real": -238.4114532470703,
"loss": 0.6996,
"rewards/accuracies": 0.7749999761581421,
"rewards/generated": -1.030948519706726,
"rewards/margins": 1.2689697742462158,
"rewards/real": 0.2380211055278778,
"step": 50
},
{
"epoch": 0.96,
"grad_norm": 104.69079883597416,
"learning_rate": 1.818181818181818e-08,
"logits/generated": -1.7024548053741455,
"logits/real": -1.6506109237670898,
"logps/generated": -265.20843505859375,
"logps/real": -237.0784454345703,
"loss": 0.7147,
"rewards/accuracies": 0.824999988079071,
"rewards/generated": -0.9356173276901245,
"rewards/margins": 1.1806285381317139,
"rewards/real": 0.24501121044158936,
"step": 60
},
{
"epoch": 0.992,
"step": 62,
"total_flos": 0.0,
"train_loss": 0.7627417502864715,
"train_runtime": 798.2471,
"train_samples_per_second": 2.505,
"train_steps_per_second": 0.078
}
],
"logging_steps": 10,
"max_steps": 62,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}