llama3-8b-fft-coding-11-v1 / trainer_state.json
chansung's picture
Model save
e521d9c verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9925925925925926,
"eval_steps": 500,
"global_step": 67,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.014814814814814815,
"grad_norm": 19.4039760393098,
"learning_rate": 2.8571428571428573e-06,
"loss": 1.5916,
"step": 1
},
{
"epoch": 0.07407407407407407,
"grad_norm": 2.024285471994628,
"learning_rate": 1.4285714285714287e-05,
"loss": 1.4287,
"step": 5
},
{
"epoch": 0.14814814814814814,
"grad_norm": 1.1211804620853234,
"learning_rate": 1.9876883405951378e-05,
"loss": 1.1475,
"step": 10
},
{
"epoch": 0.2222222222222222,
"grad_norm": 1.14441501230299,
"learning_rate": 1.913545457642601e-05,
"loss": 1.0186,
"step": 15
},
{
"epoch": 0.2962962962962963,
"grad_norm": 1.0887457606115902,
"learning_rate": 1.777145961456971e-05,
"loss": 0.9336,
"step": 20
},
{
"epoch": 0.37037037037037035,
"grad_norm": 1.1339781450076967,
"learning_rate": 1.5877852522924733e-05,
"loss": 0.8617,
"step": 25
},
{
"epoch": 0.4444444444444444,
"grad_norm": 1.1324585725260847,
"learning_rate": 1.3583679495453e-05,
"loss": 0.7875,
"step": 30
},
{
"epoch": 0.5185185185185185,
"grad_norm": 1.1020040138984686,
"learning_rate": 1.1045284632676535e-05,
"loss": 0.7257,
"step": 35
},
{
"epoch": 0.5925925925925926,
"grad_norm": 1.105923599687312,
"learning_rate": 8.43565534959769e-06,
"loss": 0.6779,
"step": 40
},
{
"epoch": 0.6666666666666666,
"grad_norm": 1.0653749318761645,
"learning_rate": 5.932633569242e-06,
"loss": 0.636,
"step": 45
},
{
"epoch": 0.7407407407407407,
"grad_norm": 1.0549355896653918,
"learning_rate": 3.7067960895016277e-06,
"loss": 0.6134,
"step": 50
},
{
"epoch": 0.8148148148148148,
"grad_norm": 0.9960657574926672,
"learning_rate": 1.9098300562505266e-06,
"loss": 0.5906,
"step": 55
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.9945434588891916,
"learning_rate": 6.641957350279838e-07,
"loss": 0.5828,
"step": 60
},
{
"epoch": 0.9629629629629629,
"grad_norm": 1.0147750878178303,
"learning_rate": 5.4781046317267103e-08,
"loss": 0.5824,
"step": 65
},
{
"epoch": 0.9925925925925926,
"eval_loss": 1.4180262088775635,
"eval_runtime": 0.7571,
"eval_samples_per_second": 11.888,
"eval_steps_per_second": 1.321,
"step": 67
},
{
"epoch": 0.9925925925925926,
"step": 67,
"total_flos": 27952184033280.0,
"train_loss": 0.8094243202636491,
"train_runtime": 773.5157,
"train_samples_per_second": 44.608,
"train_steps_per_second": 0.087
}
],
"logging_steps": 5,
"max_steps": 67,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 27952184033280.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}