thomnis's picture
Training in progress, step 3498
1212ae0 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 11.0,
"eval_steps": 500,
"global_step": 3498,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.9968553459119497,
"grad_norm": 0.429341197013855,
"learning_rate": 0.00013794880138427262,
"loss": 0.3323,
"step": 317
},
{
"epoch": 1.0,
"eval_accuracy": 0.8664516129032258,
"eval_loss": 0.07744947820901871,
"eval_runtime": 2.7326,
"eval_samples_per_second": 1134.466,
"eval_steps_per_second": 23.787,
"step": 318
},
{
"epoch": 1.9937106918238994,
"grad_norm": 0.2267828732728958,
"learning_rate": 0.00014653290645786366,
"loss": 0.0632,
"step": 634
},
{
"epoch": 2.0,
"eval_accuracy": 0.9290322580645162,
"eval_loss": 0.034380555152893066,
"eval_runtime": 2.6579,
"eval_samples_per_second": 1166.326,
"eval_steps_per_second": 24.455,
"step": 636
},
{
"epoch": 2.990566037735849,
"grad_norm": 0.17225514352321625,
"learning_rate": 0.00013031400584782778,
"loss": 0.0332,
"step": 951
},
{
"epoch": 3.0,
"eval_accuracy": 0.937741935483871,
"eval_loss": 0.025218434631824493,
"eval_runtime": 2.6227,
"eval_samples_per_second": 1181.985,
"eval_steps_per_second": 24.784,
"step": 954
},
{
"epoch": 3.9874213836477987,
"grad_norm": 0.10644444078207016,
"learning_rate": 0.0001140951052377919,
"loss": 0.0254,
"step": 1268
},
{
"epoch": 4.0,
"eval_accuracy": 0.9448387096774193,
"eval_loss": 0.02074083872139454,
"eval_runtime": 2.6543,
"eval_samples_per_second": 1167.909,
"eval_steps_per_second": 24.488,
"step": 1272
},
{
"epoch": 4.984276729559748,
"grad_norm": 0.08869536221027374,
"learning_rate": 9.787620462775599e-05,
"loss": 0.0223,
"step": 1585
},
{
"epoch": 5.0,
"eval_accuracy": 0.9412903225806452,
"eval_loss": 0.020231781527400017,
"eval_runtime": 2.6557,
"eval_samples_per_second": 1167.32,
"eval_steps_per_second": 24.476,
"step": 1590
},
{
"epoch": 5.981132075471698,
"grad_norm": 0.08641496300697327,
"learning_rate": 8.16573040177201e-05,
"loss": 0.0201,
"step": 1902
},
{
"epoch": 6.0,
"eval_accuracy": 0.9441935483870968,
"eval_loss": 0.01878916658461094,
"eval_runtime": 2.635,
"eval_samples_per_second": 1176.481,
"eval_steps_per_second": 24.668,
"step": 1908
},
{
"epoch": 6.977987421383648,
"grad_norm": 0.07254982739686966,
"learning_rate": 6.543840340768423e-05,
"loss": 0.0183,
"step": 2219
},
{
"epoch": 7.0,
"eval_accuracy": 0.9467741935483871,
"eval_loss": 0.017883770167827606,
"eval_runtime": 2.651,
"eval_samples_per_second": 1169.38,
"eval_steps_per_second": 24.519,
"step": 2226
},
{
"epoch": 7.9748427672955975,
"grad_norm": 0.0713103711605072,
"learning_rate": 4.9219502797648335e-05,
"loss": 0.0171,
"step": 2536
},
{
"epoch": 8.0,
"eval_accuracy": 0.9493548387096774,
"eval_loss": 0.017295770347118378,
"eval_runtime": 2.6543,
"eval_samples_per_second": 1167.916,
"eval_steps_per_second": 24.489,
"step": 2544
},
{
"epoch": 8.971698113207546,
"grad_norm": 0.0737316906452179,
"learning_rate": 3.3000602187612454e-05,
"loss": 0.0162,
"step": 2853
},
{
"epoch": 9.0,
"eval_accuracy": 0.9480645161290323,
"eval_loss": 0.01655430532991886,
"eval_runtime": 2.6551,
"eval_samples_per_second": 1167.55,
"eval_steps_per_second": 24.481,
"step": 2862
},
{
"epoch": 9.968553459119496,
"grad_norm": 0.06824547797441483,
"learning_rate": 1.6781701577576563e-05,
"loss": 0.0155,
"step": 3170
},
{
"epoch": 10.0,
"eval_accuracy": 0.947741935483871,
"eval_loss": 0.016286808997392654,
"eval_runtime": 2.6473,
"eval_samples_per_second": 1170.992,
"eval_steps_per_second": 24.553,
"step": 3180
},
{
"epoch": 10.965408805031446,
"grad_norm": 0.058784905821084976,
"learning_rate": 5.628009675406775e-07,
"loss": 0.0149,
"step": 3487
}
],
"logging_steps": 317,
"max_steps": 3498,
"num_input_tokens_seen": 0,
"num_train_epochs": 11,
"save_steps": 1000000000.0,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 907183422801936.0,
"train_batch_size": 48,
"trial_name": null,
"trial_params": {
"alpha": 0.5336013283534605,
"fp16": true,
"learning_rate": 0.00016014245712748368,
"lr_scheduler": "cosine",
"num_train_epochs": 11,
"temperature": 9,
"warmup_steps": 368,
"weight_decay": 0.06096677657018104
}
}