|
{ |
|
"best_metric": 0.8919400441736002, |
|
"best_model_checkpoint": "cls_comment-phobert-base-v2-v2.4/checkpoint-1500", |
|
"epoch": 19.138755980861244, |
|
"eval_steps": 100, |
|
"global_step": 2000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 1.4111480712890625, |
|
"learning_rate": 2.5e-06, |
|
"loss": 1.7008, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"eval_accuracy": 0.46234623462346236, |
|
"eval_f1_score": 0.10914902653284753, |
|
"eval_loss": 1.5259455442428589, |
|
"eval_precision": 0.22956187895212285, |
|
"eval_recall": 0.16870144284128744, |
|
"eval_runtime": 5.8062, |
|
"eval_samples_per_second": 574.041, |
|
"eval_steps_per_second": 9.128, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"grad_norm": 3.172757148742676, |
|
"learning_rate": 5e-06, |
|
"loss": 1.4089, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"eval_accuracy": 0.6567656765676567, |
|
"eval_f1_score": 0.24987557712552566, |
|
"eval_loss": 1.1875011920928955, |
|
"eval_precision": 0.216976056542887, |
|
"eval_recall": 0.2948041271015744, |
|
"eval_runtime": 6.0549, |
|
"eval_samples_per_second": 550.46, |
|
"eval_steps_per_second": 8.753, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"grad_norm": 4.23789644241333, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 1.0776, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"eval_accuracy": 0.8037803780378038, |
|
"eval_f1_score": 0.5304195101025111, |
|
"eval_loss": 0.9009036421775818, |
|
"eval_precision": 0.5333440517242575, |
|
"eval_recall": 0.5308669916547205, |
|
"eval_runtime": 6.0211, |
|
"eval_samples_per_second": 553.553, |
|
"eval_steps_per_second": 8.802, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"grad_norm": 2.9036450386047363, |
|
"learning_rate": 1e-05, |
|
"loss": 0.8625, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"eval_accuracy": 0.8574857485748575, |
|
"eval_f1_score": 0.6321452494594356, |
|
"eval_loss": 0.7617138624191284, |
|
"eval_precision": 0.7107045568063487, |
|
"eval_recall": 0.6372368264036259, |
|
"eval_runtime": 6.0254, |
|
"eval_samples_per_second": 553.154, |
|
"eval_steps_per_second": 8.796, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"grad_norm": 4.648315906524658, |
|
"learning_rate": 9.722222222222223e-06, |
|
"loss": 0.7245, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"eval_accuracy": 0.8817881788178817, |
|
"eval_f1_score": 0.7282115691268598, |
|
"eval_loss": 0.689365804195404, |
|
"eval_precision": 0.8802547295767839, |
|
"eval_recall": 0.7213609117839314, |
|
"eval_runtime": 6.0362, |
|
"eval_samples_per_second": 552.166, |
|
"eval_steps_per_second": 8.78, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 5.74, |
|
"grad_norm": 5.482232093811035, |
|
"learning_rate": 9.444444444444445e-06, |
|
"loss": 0.6573, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 5.74, |
|
"eval_accuracy": 0.8967896789678967, |
|
"eval_f1_score": 0.8406229534596467, |
|
"eval_loss": 0.6651212573051453, |
|
"eval_precision": 0.8770203982052687, |
|
"eval_recall": 0.8212623271516084, |
|
"eval_runtime": 6.089, |
|
"eval_samples_per_second": 547.376, |
|
"eval_steps_per_second": 8.704, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 6.7, |
|
"grad_norm": 4.107127666473389, |
|
"learning_rate": 9.166666666666666e-06, |
|
"loss": 0.6082, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 6.7, |
|
"eval_accuracy": 0.9078907890789079, |
|
"eval_f1_score": 0.8629840378859187, |
|
"eval_loss": 0.6335443258285522, |
|
"eval_precision": 0.8594850910270425, |
|
"eval_recall": 0.866740275628739, |
|
"eval_runtime": 6.012, |
|
"eval_samples_per_second": 554.389, |
|
"eval_steps_per_second": 8.816, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 7.66, |
|
"grad_norm": 4.067190647125244, |
|
"learning_rate": 8.888888888888888e-06, |
|
"loss": 0.5674, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 7.66, |
|
"eval_accuracy": 0.9105910591059105, |
|
"eval_f1_score": 0.8692050710637084, |
|
"eval_loss": 0.636346697807312, |
|
"eval_precision": 0.8621114850690574, |
|
"eval_recall": 0.8795216165515357, |
|
"eval_runtime": 6.0275, |
|
"eval_samples_per_second": 552.965, |
|
"eval_steps_per_second": 8.793, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 8.61, |
|
"grad_norm": 3.8194425106048584, |
|
"learning_rate": 8.611111111111112e-06, |
|
"loss": 0.5477, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 8.61, |
|
"eval_accuracy": 0.9150915091509151, |
|
"eval_f1_score": 0.8776204392865118, |
|
"eval_loss": 0.6268996596336365, |
|
"eval_precision": 0.8876632968569648, |
|
"eval_recall": 0.8693279668307071, |
|
"eval_runtime": 5.982, |
|
"eval_samples_per_second": 557.174, |
|
"eval_steps_per_second": 8.86, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 9.57, |
|
"grad_norm": 4.406583309173584, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.5256, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 9.57, |
|
"eval_accuracy": 0.9204920492049204, |
|
"eval_f1_score": 0.8834661339438045, |
|
"eval_loss": 0.6178370714187622, |
|
"eval_precision": 0.8826304228202296, |
|
"eval_recall": 0.8848781731721993, |
|
"eval_runtime": 6.0125, |
|
"eval_samples_per_second": 554.349, |
|
"eval_steps_per_second": 8.815, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 10.53, |
|
"grad_norm": 3.69624400138855, |
|
"learning_rate": 8.055555555555557e-06, |
|
"loss": 0.5148, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 10.53, |
|
"eval_accuracy": 0.9198919891989199, |
|
"eval_f1_score": 0.879559155366957, |
|
"eval_loss": 0.6214434504508972, |
|
"eval_precision": 0.8761821574730276, |
|
"eval_recall": 0.8838583582269935, |
|
"eval_runtime": 6.0361, |
|
"eval_samples_per_second": 552.174, |
|
"eval_steps_per_second": 8.78, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 11.48, |
|
"grad_norm": 4.3708343505859375, |
|
"learning_rate": 7.77777777777778e-06, |
|
"loss": 0.4999, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 11.48, |
|
"eval_accuracy": 0.9228922892289229, |
|
"eval_f1_score": 0.8856181382103513, |
|
"eval_loss": 0.6158125400543213, |
|
"eval_precision": 0.8862286567723959, |
|
"eval_recall": 0.8853146392819923, |
|
"eval_runtime": 5.9971, |
|
"eval_samples_per_second": 555.768, |
|
"eval_steps_per_second": 8.838, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 12.44, |
|
"grad_norm": 5.073043346405029, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 0.4916, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 12.44, |
|
"eval_accuracy": 0.9231923192319232, |
|
"eval_f1_score": 0.8839178584078228, |
|
"eval_loss": 0.6185785531997681, |
|
"eval_precision": 0.8887592838927109, |
|
"eval_recall": 0.8794527947878894, |
|
"eval_runtime": 6.0868, |
|
"eval_samples_per_second": 547.575, |
|
"eval_steps_per_second": 8.707, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 13.4, |
|
"grad_norm": 3.3584494590759277, |
|
"learning_rate": 7.222222222222223e-06, |
|
"loss": 0.479, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 13.4, |
|
"eval_accuracy": 0.9201920192019202, |
|
"eval_f1_score": 0.8847465693125924, |
|
"eval_loss": 0.6285073161125183, |
|
"eval_precision": 0.8863862882902076, |
|
"eval_recall": 0.8833493957517122, |
|
"eval_runtime": 6.0466, |
|
"eval_samples_per_second": 551.218, |
|
"eval_steps_per_second": 8.765, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 14.35, |
|
"grad_norm": 4.605256080627441, |
|
"learning_rate": 6.944444444444445e-06, |
|
"loss": 0.4812, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 14.35, |
|
"eval_accuracy": 0.9240924092409241, |
|
"eval_f1_score": 0.8919400441736002, |
|
"eval_loss": 0.6176608204841614, |
|
"eval_precision": 0.8911083019322857, |
|
"eval_recall": 0.8930303467825517, |
|
"eval_runtime": 6.0144, |
|
"eval_samples_per_second": 554.172, |
|
"eval_steps_per_second": 8.812, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 15.31, |
|
"grad_norm": 5.669193744659424, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.4667, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 15.31, |
|
"eval_accuracy": 0.9255925592559255, |
|
"eval_f1_score": 0.8847926962066609, |
|
"eval_loss": 0.6205594539642334, |
|
"eval_precision": 0.8843481979823253, |
|
"eval_recall": 0.8852982243831954, |
|
"eval_runtime": 6.1133, |
|
"eval_samples_per_second": 545.203, |
|
"eval_steps_per_second": 8.67, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 16.27, |
|
"grad_norm": 3.3217363357543945, |
|
"learning_rate": 6.3888888888888885e-06, |
|
"loss": 0.4668, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 16.27, |
|
"eval_accuracy": 0.9264926492649265, |
|
"eval_f1_score": 0.8854482169609333, |
|
"eval_loss": 0.6200627684593201, |
|
"eval_precision": 0.8837445827045557, |
|
"eval_recall": 0.8875863206208373, |
|
"eval_runtime": 6.0633, |
|
"eval_samples_per_second": 549.698, |
|
"eval_steps_per_second": 8.741, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 17.22, |
|
"grad_norm": 3.535698652267456, |
|
"learning_rate": 6.111111111111112e-06, |
|
"loss": 0.4635, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 17.22, |
|
"eval_accuracy": 0.9252925292529253, |
|
"eval_f1_score": 0.8901467035784716, |
|
"eval_loss": 0.6252107620239258, |
|
"eval_precision": 0.8926952965988483, |
|
"eval_recall": 0.8877172530901484, |
|
"eval_runtime": 6.0706, |
|
"eval_samples_per_second": 549.043, |
|
"eval_steps_per_second": 8.731, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 18.18, |
|
"grad_norm": 3.0793726444244385, |
|
"learning_rate": 5.833333333333334e-06, |
|
"loss": 0.4593, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 18.18, |
|
"eval_accuracy": 0.9273927392739274, |
|
"eval_f1_score": 0.88911901951679, |
|
"eval_loss": 0.626419723033905, |
|
"eval_precision": 0.8887473645082232, |
|
"eval_recall": 0.8899116044774821, |
|
"eval_runtime": 6.0336, |
|
"eval_samples_per_second": 552.41, |
|
"eval_steps_per_second": 8.784, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 19.14, |
|
"grad_norm": 0.5877706408500671, |
|
"learning_rate": 5.555555555555557e-06, |
|
"loss": 0.4538, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 19.14, |
|
"eval_accuracy": 0.9264926492649265, |
|
"eval_f1_score": 0.8890948519955958, |
|
"eval_loss": 0.6227503418922424, |
|
"eval_precision": 0.8870090750160463, |
|
"eval_recall": 0.8913327030956307, |
|
"eval_runtime": 6.0553, |
|
"eval_samples_per_second": 550.427, |
|
"eval_steps_per_second": 8.753, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 19.14, |
|
"step": 2000, |
|
"total_flos": 4678479916291584.0, |
|
"train_loss": 0.6728471851348877, |
|
"train_runtime": 1885.5017, |
|
"train_samples_per_second": 271.546, |
|
"train_steps_per_second": 2.121 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 4000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 39, |
|
"save_steps": 100, |
|
"total_flos": 4678479916291584.0, |
|
"train_batch_size": 64, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|