|
{ |
|
"best_metric": 0.5132274753696389, |
|
"best_model_checkpoint": "distilbert-base-uncased-finetuned-cola\\run-4\\checkpoint-1605", |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 1605, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 6.663467884063721, |
|
"learning_rate": 1.1599690135657977e-05, |
|
"loss": 0.5243, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.46048709750175476, |
|
"eval_matthews_correlation": 0.4791261224300467, |
|
"eval_runtime": 0.9073, |
|
"eval_samples_per_second": 1149.535, |
|
"eval_steps_per_second": 72.741, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"grad_norm": 4.0639166831970215, |
|
"learning_rate": 8.063199240640302e-06, |
|
"loss": 0.3712, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.47315478324890137, |
|
"eval_matthews_correlation": 0.5038487938814906, |
|
"eval_runtime": 1.4506, |
|
"eval_samples_per_second": 719.021, |
|
"eval_steps_per_second": 45.499, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"grad_norm": 19.560606002807617, |
|
"learning_rate": 4.5267083456226254e-06, |
|
"loss": 0.2652, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.5683983564376831, |
|
"eval_matthews_correlation": 0.5132274753696389, |
|
"eval_runtime": 1.4702, |
|
"eval_samples_per_second": 709.424, |
|
"eval_steps_per_second": 44.892, |
|
"step": 1605 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 2140, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 500, |
|
"total_flos": 128490789442440.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": { |
|
"learning_rate": 1.5136181030675654e-05, |
|
"num_train_epochs": 4, |
|
"per_device_train_batch_size": 16, |
|
"seed": 7 |
|
} |
|
} |
|
|