Llama-3.1-8B-Instruct-SFT-500 / trainer_state.json
chchen's picture
End of training
97d9fef verified
{
"best_metric": 0.07805962860584259,
"best_model_checkpoint": "saves/Llama-3.1-8B-Instruct/lora/sft-500/checkpoint-250",
"epoch": 9.955555555555556,
"eval_steps": 50,
"global_step": 280,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.35555555555555557,
"grad_norm": 7.369814872741699,
"learning_rate": 1.7857142857142859e-06,
"loss": 1.7064,
"step": 10
},
{
"epoch": 0.7111111111111111,
"grad_norm": 5.399565696716309,
"learning_rate": 3.5714285714285718e-06,
"loss": 1.5556,
"step": 20
},
{
"epoch": 1.0666666666666667,
"grad_norm": 4.430342674255371,
"learning_rate": 4.999805731202437e-06,
"loss": 1.4196,
"step": 30
},
{
"epoch": 1.4222222222222223,
"grad_norm": 4.033640384674072,
"learning_rate": 4.976529986032632e-06,
"loss": 1.1164,
"step": 40
},
{
"epoch": 1.7777777777777777,
"grad_norm": 3.2736451625823975,
"learning_rate": 4.914814565722671e-06,
"loss": 0.8493,
"step": 50
},
{
"epoch": 1.7777777777777777,
"eval_loss": 0.818483293056488,
"eval_runtime": 1.2942,
"eval_samples_per_second": 38.633,
"eval_steps_per_second": 19.316,
"step": 50
},
{
"epoch": 2.1333333333333333,
"grad_norm": 2.509134292602539,
"learning_rate": 4.815617391525772e-06,
"loss": 0.6287,
"step": 60
},
{
"epoch": 2.488888888888889,
"grad_norm": 2.4819371700286865,
"learning_rate": 4.680478160991514e-06,
"loss": 0.4584,
"step": 70
},
{
"epoch": 2.8444444444444446,
"grad_norm": 2.9780495166778564,
"learning_rate": 4.511494449416671e-06,
"loss": 0.3315,
"step": 80
},
{
"epoch": 3.2,
"grad_norm": 1.2511732578277588,
"learning_rate": 4.311289152148182e-06,
"loss": 0.1892,
"step": 90
},
{
"epoch": 3.5555555555555554,
"grad_norm": 2.088059663772583,
"learning_rate": 4.0829697730853505e-06,
"loss": 0.1595,
"step": 100
},
{
"epoch": 3.5555555555555554,
"eval_loss": 0.11228977888822556,
"eval_runtime": 1.2903,
"eval_samples_per_second": 38.752,
"eval_steps_per_second": 19.376,
"step": 100
},
{
"epoch": 3.911111111111111,
"grad_norm": 1.010196328163147,
"learning_rate": 3.830080191288342e-06,
"loss": 0.1311,
"step": 110
},
{
"epoch": 4.266666666666667,
"grad_norm": 0.5358583927154541,
"learning_rate": 3.556545654351749e-06,
"loss": 0.1064,
"step": 120
},
{
"epoch": 4.622222222222222,
"grad_norm": 0.7223713994026184,
"learning_rate": 3.2666118523333363e-06,
"loss": 0.0928,
"step": 130
},
{
"epoch": 4.977777777777778,
"grad_norm": 0.6626089811325073,
"learning_rate": 2.964779017907287e-06,
"loss": 0.106,
"step": 140
},
{
"epoch": 5.333333333333333,
"grad_norm": 0.5725705027580261,
"learning_rate": 2.6557320756121306e-06,
"loss": 0.0797,
"step": 150
},
{
"epoch": 5.333333333333333,
"eval_loss": 0.0811280757188797,
"eval_runtime": 1.2941,
"eval_samples_per_second": 38.636,
"eval_steps_per_second": 19.318,
"step": 150
},
{
"epoch": 5.688888888888889,
"grad_norm": 1.287990927696228,
"learning_rate": 2.3442679243878698e-06,
"loss": 0.0918,
"step": 160
},
{
"epoch": 6.044444444444444,
"grad_norm": 1.3758983612060547,
"learning_rate": 2.035220982092714e-06,
"loss": 0.1028,
"step": 170
},
{
"epoch": 6.4,
"grad_norm": 2.8700389862060547,
"learning_rate": 1.7333881476666648e-06,
"loss": 0.0882,
"step": 180
},
{
"epoch": 6.7555555555555555,
"grad_norm": 0.7997310161590576,
"learning_rate": 1.443454345648252e-06,
"loss": 0.0792,
"step": 190
},
{
"epoch": 7.111111111111111,
"grad_norm": 1.6571403741836548,
"learning_rate": 1.169919808711659e-06,
"loss": 0.0997,
"step": 200
},
{
"epoch": 7.111111111111111,
"eval_loss": 0.07891152799129486,
"eval_runtime": 1.2907,
"eval_samples_per_second": 38.739,
"eval_steps_per_second": 19.369,
"step": 200
},
{
"epoch": 7.466666666666667,
"grad_norm": 0.7907149195671082,
"learning_rate": 9.170302269146509e-07,
"loss": 0.0651,
"step": 210
},
{
"epoch": 7.822222222222222,
"grad_norm": 0.6304967403411865,
"learning_rate": 6.887108478518184e-07,
"loss": 0.0977,
"step": 220
},
{
"epoch": 8.177777777777777,
"grad_norm": 1.6207903623580933,
"learning_rate": 4.885055505833291e-07,
"loss": 0.0592,
"step": 230
},
{
"epoch": 8.533333333333333,
"grad_norm": 0.4881339371204376,
"learning_rate": 3.1952183900848673e-07,
"loss": 0.0816,
"step": 240
},
{
"epoch": 8.88888888888889,
"grad_norm": 1.008658766746521,
"learning_rate": 1.843826084742284e-07,
"loss": 0.0896,
"step": 250
},
{
"epoch": 8.88888888888889,
"eval_loss": 0.07805962860584259,
"eval_runtime": 1.2925,
"eval_samples_per_second": 38.685,
"eval_steps_per_second": 19.342,
"step": 250
},
{
"epoch": 9.244444444444444,
"grad_norm": 1.0305126905441284,
"learning_rate": 8.518543427732951e-08,
"loss": 0.0924,
"step": 260
},
{
"epoch": 9.6,
"grad_norm": 1.7199366092681885,
"learning_rate": 2.347001396736798e-08,
"loss": 0.0848,
"step": 270
},
{
"epoch": 9.955555555555556,
"grad_norm": 0.5508742332458496,
"learning_rate": 1.9426879756284656e-10,
"loss": 0.0668,
"step": 280
},
{
"epoch": 9.955555555555556,
"step": 280,
"total_flos": 2.516129863512883e+16,
"train_loss": 0.3581957229546138,
"train_runtime": 398.237,
"train_samples_per_second": 11.3,
"train_steps_per_second": 0.703
}
],
"logging_steps": 10,
"max_steps": 280,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.516129863512883e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}