|
{ |
|
"best_metric": 0.90164, |
|
"best_model_checkpoint": "../../checkpoint/imdb/roberta-base/checkpoint-14858", |
|
"epoch": 20.0, |
|
"eval_steps": 500, |
|
"global_step": 15640, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.88836, |
|
"eval_loss": 0.2657841444015503, |
|
"eval_runtime": 31.5408, |
|
"eval_samples_per_second": 792.625, |
|
"eval_steps_per_second": 3.107, |
|
"step": 782 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 4.680306905370844e-05, |
|
"loss": 0.3125, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.89232, |
|
"eval_loss": 0.25732555985450745, |
|
"eval_runtime": 31.3388, |
|
"eval_samples_per_second": 797.734, |
|
"eval_steps_per_second": 3.127, |
|
"step": 1564 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 4.360613810741688e-05, |
|
"loss": 0.189, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8944, |
|
"eval_loss": 0.339549720287323, |
|
"eval_runtime": 31.3008, |
|
"eval_samples_per_second": 798.703, |
|
"eval_steps_per_second": 3.131, |
|
"step": 2346 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 4.040920716112532e-05, |
|
"loss": 0.1284, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.89492, |
|
"eval_loss": 0.4028278887271881, |
|
"eval_runtime": 31.3783, |
|
"eval_samples_per_second": 796.729, |
|
"eval_steps_per_second": 3.123, |
|
"step": 3128 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.88992, |
|
"eval_loss": 0.4815801978111267, |
|
"eval_runtime": 31.3726, |
|
"eval_samples_per_second": 796.873, |
|
"eval_steps_per_second": 3.124, |
|
"step": 3910 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"learning_rate": 3.721227621483376e-05, |
|
"loss": 0.09, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.89704, |
|
"eval_loss": 0.40239471197128296, |
|
"eval_runtime": 31.3511, |
|
"eval_samples_per_second": 797.42, |
|
"eval_steps_per_second": 3.126, |
|
"step": 4692 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"learning_rate": 3.40153452685422e-05, |
|
"loss": 0.0632, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.89572, |
|
"eval_loss": 0.4902121424674988, |
|
"eval_runtime": 31.3539, |
|
"eval_samples_per_second": 797.35, |
|
"eval_steps_per_second": 3.126, |
|
"step": 5474 |
|
}, |
|
{ |
|
"epoch": 7.67, |
|
"learning_rate": 3.081841432225064e-05, |
|
"loss": 0.049, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.89104, |
|
"eval_loss": 0.5230616927146912, |
|
"eval_runtime": 31.2816, |
|
"eval_samples_per_second": 799.192, |
|
"eval_steps_per_second": 3.133, |
|
"step": 6256 |
|
}, |
|
{ |
|
"epoch": 8.95, |
|
"learning_rate": 2.7621483375959077e-05, |
|
"loss": 0.0426, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.89764, |
|
"eval_loss": 0.5609263777732849, |
|
"eval_runtime": 31.2704, |
|
"eval_samples_per_second": 799.477, |
|
"eval_steps_per_second": 3.134, |
|
"step": 7038 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.89744, |
|
"eval_loss": 0.590167760848999, |
|
"eval_runtime": 31.3811, |
|
"eval_samples_per_second": 796.658, |
|
"eval_steps_per_second": 3.123, |
|
"step": 7820 |
|
}, |
|
{ |
|
"epoch": 10.23, |
|
"learning_rate": 2.442455242966752e-05, |
|
"loss": 0.0343, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy": 0.89904, |
|
"eval_loss": 0.5456650853157043, |
|
"eval_runtime": 31.3005, |
|
"eval_samples_per_second": 798.71, |
|
"eval_steps_per_second": 3.131, |
|
"step": 8602 |
|
}, |
|
{ |
|
"epoch": 11.51, |
|
"learning_rate": 2.122762148337596e-05, |
|
"loss": 0.0282, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.8972, |
|
"eval_loss": 0.5991714596748352, |
|
"eval_runtime": 31.3289, |
|
"eval_samples_per_second": 797.985, |
|
"eval_steps_per_second": 3.128, |
|
"step": 9384 |
|
}, |
|
{ |
|
"epoch": 12.79, |
|
"learning_rate": 1.80306905370844e-05, |
|
"loss": 0.0237, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_accuracy": 0.8974, |
|
"eval_loss": 0.5605877041816711, |
|
"eval_runtime": 31.2841, |
|
"eval_samples_per_second": 799.128, |
|
"eval_steps_per_second": 3.133, |
|
"step": 10166 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.89972, |
|
"eval_loss": 0.5708814263343811, |
|
"eval_runtime": 31.4954, |
|
"eval_samples_per_second": 793.767, |
|
"eval_steps_per_second": 3.112, |
|
"step": 10948 |
|
}, |
|
{ |
|
"epoch": 14.07, |
|
"learning_rate": 1.483375959079284e-05, |
|
"loss": 0.021, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy": 0.8992, |
|
"eval_loss": 0.616736888885498, |
|
"eval_runtime": 31.4351, |
|
"eval_samples_per_second": 795.29, |
|
"eval_steps_per_second": 3.118, |
|
"step": 11730 |
|
}, |
|
{ |
|
"epoch": 15.35, |
|
"learning_rate": 1.163682864450128e-05, |
|
"loss": 0.019, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.9014, |
|
"eval_loss": 0.5849418640136719, |
|
"eval_runtime": 31.1725, |
|
"eval_samples_per_second": 801.99, |
|
"eval_steps_per_second": 3.144, |
|
"step": 12512 |
|
}, |
|
{ |
|
"epoch": 16.62, |
|
"learning_rate": 8.439897698209718e-06, |
|
"loss": 0.0147, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_accuracy": 0.90072, |
|
"eval_loss": 0.60484778881073, |
|
"eval_runtime": 31.2733, |
|
"eval_samples_per_second": 799.404, |
|
"eval_steps_per_second": 3.134, |
|
"step": 13294 |
|
}, |
|
{ |
|
"epoch": 17.9, |
|
"learning_rate": 5.242966751918159e-06, |
|
"loss": 0.014, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_accuracy": 0.89868, |
|
"eval_loss": 0.6006963849067688, |
|
"eval_runtime": 31.2309, |
|
"eval_samples_per_second": 800.488, |
|
"eval_steps_per_second": 3.138, |
|
"step": 14076 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_accuracy": 0.90164, |
|
"eval_loss": 0.5982226729393005, |
|
"eval_runtime": 31.229, |
|
"eval_samples_per_second": 800.537, |
|
"eval_steps_per_second": 3.138, |
|
"step": 14858 |
|
}, |
|
{ |
|
"epoch": 19.18, |
|
"learning_rate": 2.0460358056265987e-06, |
|
"loss": 0.0129, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy": 0.9014, |
|
"eval_loss": 0.599052906036377, |
|
"eval_runtime": 31.2461, |
|
"eval_samples_per_second": 800.099, |
|
"eval_steps_per_second": 3.136, |
|
"step": 15640 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"step": 15640, |
|
"total_flos": 3.288888192e+16, |
|
"train_loss": 0.0671754312637212, |
|
"train_runtime": 2479.9785, |
|
"train_samples_per_second": 201.615, |
|
"train_steps_per_second": 6.307 |
|
} |
|
], |
|
"logging_steps": 1000, |
|
"max_steps": 15640, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 20, |
|
"save_steps": 500, |
|
"total_flos": 3.288888192e+16, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|