sharkMeow's picture
End of training
0cbf4fc verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 60.0,
"eval_steps": 8874,
"global_step": 88740,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 6.0,
"grad_norm": 2.8650519847869873,
"learning_rate": 9.001014198782962e-06,
"loss": 2.4122,
"step": 8874
},
{
"epoch": 6.0,
"eval_loss": 3.9393680095672607,
"eval_runtime": 31.1663,
"eval_samples_per_second": 311.715,
"eval_steps_per_second": 7.091,
"step": 8874
},
{
"epoch": 12.0,
"grad_norm": 11.994268417358398,
"learning_rate": 8.001239576290288e-06,
"loss": 2.2167,
"step": 17748
},
{
"epoch": 12.0,
"eval_loss": 4.1646857261657715,
"eval_runtime": 31.5415,
"eval_samples_per_second": 308.007,
"eval_steps_per_second": 7.007,
"step": 17748
},
{
"epoch": 18.0,
"grad_norm": 20.35555076599121,
"learning_rate": 7.001690331304937e-06,
"loss": 2.0965,
"step": 26622
},
{
"epoch": 18.0,
"eval_loss": 4.430016994476318,
"eval_runtime": 31.5349,
"eval_samples_per_second": 308.071,
"eval_steps_per_second": 7.008,
"step": 26622
},
{
"epoch": 24.0,
"grad_norm": 3.9490978717803955,
"learning_rate": 6.002141086319586e-06,
"loss": 2.0238,
"step": 35496
},
{
"epoch": 24.0,
"eval_loss": 4.574044227600098,
"eval_runtime": 31.6749,
"eval_samples_per_second": 306.709,
"eval_steps_per_second": 6.977,
"step": 35496
},
{
"epoch": 30.0,
"grad_norm": 9.400227546691895,
"learning_rate": 5.0025918413342355e-06,
"loss": 1.9938,
"step": 44370
},
{
"epoch": 30.0,
"eval_loss": 4.62649393081665,
"eval_runtime": 31.7226,
"eval_samples_per_second": 306.249,
"eval_steps_per_second": 6.967,
"step": 44370
},
{
"epoch": 36.0,
"grad_norm": 5.468742370605469,
"learning_rate": 4.003042596348885e-06,
"loss": 1.973,
"step": 53244
},
{
"epoch": 36.0,
"eval_loss": 4.671382427215576,
"eval_runtime": 31.6041,
"eval_samples_per_second": 307.396,
"eval_steps_per_second": 6.993,
"step": 53244
},
{
"epoch": 42.0,
"grad_norm": 0.6607534289360046,
"learning_rate": 3.003606040117197e-06,
"loss": 1.9583,
"step": 62118
},
{
"epoch": 42.0,
"eval_loss": 4.793060302734375,
"eval_runtime": 31.7099,
"eval_samples_per_second": 306.372,
"eval_steps_per_second": 6.969,
"step": 62118
},
{
"epoch": 48.0,
"grad_norm": 1.4975688457489014,
"learning_rate": 2.004056795131846e-06,
"loss": 1.9466,
"step": 70992
},
{
"epoch": 48.0,
"eval_loss": 4.7913103103637695,
"eval_runtime": 33.0493,
"eval_samples_per_second": 293.955,
"eval_steps_per_second": 6.687,
"step": 70992
},
{
"epoch": 54.0,
"grad_norm": 2.9576919078826904,
"learning_rate": 1.0045075501464953e-06,
"loss": 1.9415,
"step": 79866
},
{
"epoch": 54.0,
"eval_loss": 4.844839572906494,
"eval_runtime": 31.7934,
"eval_samples_per_second": 305.566,
"eval_steps_per_second": 6.951,
"step": 79866
},
{
"epoch": 60.0,
"grad_norm": 0.5801959037780762,
"learning_rate": 5.070993914807302e-09,
"loss": 1.9369,
"step": 88740
},
{
"epoch": 60.0,
"eval_loss": 4.886190891265869,
"eval_runtime": 31.6305,
"eval_samples_per_second": 307.141,
"eval_steps_per_second": 6.987,
"step": 88740
},
{
"epoch": 60.0,
"step": 88740,
"total_flos": 1.4015777943683174e+18,
"train_loss": 2.04994026615675,
"train_runtime": 30910.218,
"train_samples_per_second": 114.776,
"train_steps_per_second": 2.871
}
],
"logging_steps": 8874,
"max_steps": 88740,
"num_input_tokens_seen": 0,
"num_train_epochs": 60,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.4015777943683174e+18,
"train_batch_size": 40,
"trial_name": null,
"trial_params": null
}