|
{ |
|
"best_metric": 0.6091980338096619, |
|
"best_model_checkpoint": "bert_uncased_L-4_H-128_A-2_cola/checkpoint-204", |
|
"epoch": 11.0, |
|
"eval_steps": 500, |
|
"global_step": 374, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.7114630937576294, |
|
"learning_rate": 4.9e-05, |
|
"loss": 0.6362, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.6912751793861389, |
|
"eval_loss": 0.6191275119781494, |
|
"eval_matthews_correlation": 0.0, |
|
"eval_runtime": 0.368, |
|
"eval_samples_per_second": 2834.557, |
|
"eval_steps_per_second": 13.588, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.6178502440452576, |
|
"learning_rate": 4.8e-05, |
|
"loss": 0.608, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.6912751793861389, |
|
"eval_loss": 0.6190881729125977, |
|
"eval_matthews_correlation": 0.0, |
|
"eval_runtime": 0.359, |
|
"eval_samples_per_second": 2905.511, |
|
"eval_steps_per_second": 13.929, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 0.42143625020980835, |
|
"learning_rate": 4.7e-05, |
|
"loss": 0.607, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.6912751793861389, |
|
"eval_loss": 0.616786777973175, |
|
"eval_matthews_correlation": 0.0, |
|
"eval_runtime": 0.3564, |
|
"eval_samples_per_second": 2926.415, |
|
"eval_steps_per_second": 14.029, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 0.5581986904144287, |
|
"learning_rate": 4.600000000000001e-05, |
|
"loss": 0.6055, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.6912751793861389, |
|
"eval_loss": 0.6145115494728088, |
|
"eval_matthews_correlation": 0.0, |
|
"eval_runtime": 0.3604, |
|
"eval_samples_per_second": 2894.371, |
|
"eval_steps_per_second": 13.875, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 0.6049013137817383, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.6009, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.6912751793861389, |
|
"eval_loss": 0.6107176542282104, |
|
"eval_matthews_correlation": 0.0, |
|
"eval_runtime": 0.3593, |
|
"eval_samples_per_second": 2902.792, |
|
"eval_steps_per_second": 13.916, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 0.7587459087371826, |
|
"learning_rate": 4.4000000000000006e-05, |
|
"loss": 0.5939, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.6912751793861389, |
|
"eval_loss": 0.6091980338096619, |
|
"eval_matthews_correlation": 0.0, |
|
"eval_runtime": 0.3601, |
|
"eval_samples_per_second": 2896.749, |
|
"eval_steps_per_second": 13.887, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 1.9767988920211792, |
|
"learning_rate": 4.3e-05, |
|
"loss": 0.5799, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.6951102614402771, |
|
"eval_loss": 0.616756796836853, |
|
"eval_matthews_correlation": 0.0854532539112171, |
|
"eval_runtime": 0.3674, |
|
"eval_samples_per_second": 2838.795, |
|
"eval_steps_per_second": 13.609, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 1.1349046230316162, |
|
"learning_rate": 4.2e-05, |
|
"loss": 0.5679, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.6912751793861389, |
|
"eval_loss": 0.6161894202232361, |
|
"eval_matthews_correlation": 0.08481153787433816, |
|
"eval_runtime": 0.364, |
|
"eval_samples_per_second": 2865.523, |
|
"eval_steps_per_second": 13.737, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"grad_norm": 1.366595983505249, |
|
"learning_rate": 4.1e-05, |
|
"loss": 0.5553, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.6855225563049316, |
|
"eval_loss": 0.623590886592865, |
|
"eval_matthews_correlation": 0.06376490253157786, |
|
"eval_runtime": 0.3742, |
|
"eval_samples_per_second": 2787.105, |
|
"eval_steps_per_second": 13.361, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 1.5425918102264404, |
|
"learning_rate": 4e-05, |
|
"loss": 0.5361, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.6586769223213196, |
|
"eval_loss": 0.6316266655921936, |
|
"eval_matthews_correlation": 0.08369053019265868, |
|
"eval_runtime": 0.3592, |
|
"eval_samples_per_second": 2903.742, |
|
"eval_steps_per_second": 13.92, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"grad_norm": 2.3495326042175293, |
|
"learning_rate": 3.9000000000000006e-05, |
|
"loss": 0.5249, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy": 0.6548417806625366, |
|
"eval_loss": 0.638280987739563, |
|
"eval_matthews_correlation": 0.10313429660779676, |
|
"eval_runtime": 0.3611, |
|
"eval_samples_per_second": 2888.509, |
|
"eval_steps_per_second": 13.847, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"step": 374, |
|
"total_flos": 117043404217344.0, |
|
"train_loss": 0.5832455527973684, |
|
"train_runtime": 45.3448, |
|
"train_samples_per_second": 9428.866, |
|
"train_steps_per_second": 37.491 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 1700, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 50, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 5 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 117043404217344.0, |
|
"train_batch_size": 256, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|