shivamklr's picture
Training in progress, epoch 4
b34b04d verified
raw
history blame
2.38 kB
{
"best_metric": 0.3459293076636948,
"best_model_checkpoint": "distilbert-base-uncased-finetuned-cola\\run-0\\checkpoint-3207",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 3207,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.47,
"grad_norm": 3.136430025100708,
"learning_rate": 1.057670968337005e-06,
"loss": 0.6141,
"step": 500
},
{
"epoch": 0.94,
"grad_norm": 3.4976818561553955,
"learning_rate": 9.485201976107919e-07,
"loss": 0.5835,
"step": 1000
},
{
"epoch": 1.0,
"eval_loss": 0.5739973187446594,
"eval_matthews_correlation": 0.0,
"eval_runtime": 2.1959,
"eval_samples_per_second": 474.985,
"eval_steps_per_second": 30.057,
"step": 1069
},
{
"epoch": 1.4,
"grad_norm": 5.231539249420166,
"learning_rate": 8.393694268845788e-07,
"loss": 0.5608,
"step": 1500
},
{
"epoch": 1.87,
"grad_norm": 7.4941792488098145,
"learning_rate": 7.302186561583657e-07,
"loss": 0.5232,
"step": 2000
},
{
"epoch": 2.0,
"eval_loss": 0.5443820953369141,
"eval_matthews_correlation": 0.2045620264226157,
"eval_runtime": 0.9649,
"eval_samples_per_second": 1080.926,
"eval_steps_per_second": 68.4,
"step": 2138
},
{
"epoch": 2.34,
"grad_norm": 7.5233941078186035,
"learning_rate": 6.210678854321524e-07,
"loss": 0.5208,
"step": 2500
},
{
"epoch": 2.81,
"grad_norm": 6.596949100494385,
"learning_rate": 5.119171147059394e-07,
"loss": 0.4937,
"step": 3000
},
{
"epoch": 3.0,
"eval_loss": 0.5387976169586182,
"eval_matthews_correlation": 0.3459293076636948,
"eval_runtime": 0.8998,
"eval_samples_per_second": 1159.164,
"eval_steps_per_second": 73.351,
"step": 3207
}
],
"logging_steps": 500,
"max_steps": 5345,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 113155359519516.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": {
"learning_rate": 1.166821739063218e-06,
"num_train_epochs": 5,
"per_device_train_batch_size": 8,
"seed": 23
}
}