joseph10's picture
Training in progress, epoch 6
6b38de9
raw
history blame
2.89 kB
{
"best_metric": 0.8907678244972578,
"best_model_checkpoint": "distilbert-hate_speech18\\run-4\\checkpoint-480",
"epoch": 6.0,
"eval_steps": 500,
"global_step": 1440,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"learning_rate": 5.8838345155265075e-06,
"loss": 0.549,
"step": 240
},
{
"epoch": 1.0,
"eval_accuracy": 0.8820840950639853,
"eval_loss": 0.4940944314002991,
"eval_runtime": 8.0075,
"eval_samples_per_second": 273.244,
"eval_steps_per_second": 17.109,
"step": 240
},
{
"epoch": 2.0,
"learning_rate": 4.903195429605423e-06,
"loss": 0.4705,
"step": 480
},
{
"epoch": 2.0,
"eval_accuracy": 0.8907678244972578,
"eval_loss": 0.4879714548587799,
"eval_runtime": 7.9604,
"eval_samples_per_second": 274.862,
"eval_steps_per_second": 17.21,
"step": 480
},
{
"epoch": 3.0,
"learning_rate": 3.922556343684338e-06,
"loss": 0.4471,
"step": 720
},
{
"epoch": 3.0,
"eval_accuracy": 0.8893967093235832,
"eval_loss": 0.4937082529067993,
"eval_runtime": 8.0067,
"eval_samples_per_second": 273.273,
"eval_steps_per_second": 17.111,
"step": 720
},
{
"epoch": 4.0,
"learning_rate": 2.9419172577632537e-06,
"loss": 0.4297,
"step": 960
},
{
"epoch": 4.0,
"eval_accuracy": 0.8903107861060329,
"eval_loss": 0.4932664930820465,
"eval_runtime": 8.0508,
"eval_samples_per_second": 271.775,
"eval_steps_per_second": 17.017,
"step": 960
},
{
"epoch": 5.0,
"learning_rate": 1.961278171842169e-06,
"loss": 0.4187,
"step": 1200
},
{
"epoch": 5.0,
"eval_accuracy": 0.8880255941499086,
"eval_loss": 0.49868300557136536,
"eval_runtime": 8.123,
"eval_samples_per_second": 269.359,
"eval_steps_per_second": 16.866,
"step": 1200
},
{
"epoch": 6.0,
"learning_rate": 9.806390859210846e-07,
"loss": 0.4112,
"step": 1440
},
{
"epoch": 6.0,
"eval_accuracy": 0.8875685557586838,
"eval_loss": 0.5080177187919617,
"eval_runtime": 7.9906,
"eval_samples_per_second": 273.822,
"eval_steps_per_second": 17.145,
"step": 1440
}
],
"logging_steps": 500,
"max_steps": 1680,
"num_input_tokens_seen": 0,
"num_train_epochs": 7,
"save_steps": 500,
"total_flos": 977555265925056.0,
"trial_name": null,
"trial_params": {
"alpha": 0.7641900951856909,
"learning_rate": 6.864473601447592e-06,
"num_train_epochs": 7,
"per_device_eval_batch_size": 16,
"per_device_train_batch_size": 32,
"temperature": 23
}
}