w11wo's picture
End of training
74f1b5e
{
"best_metric": 0.8181818181818182,
"best_model_checkpoint": "outputs/xlm-roberta-large-twitter-indonesia-sarcastic/checkpoint-708",
"epoch": 15.0,
"eval_steps": 500,
"global_step": 885,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"learning_rate": 9.997697221833061e-06,
"loss": 0.5862,
"step": 59
},
{
"epoch": 1.0,
"eval_accuracy": 0.75,
"eval_f1": 0.0,
"eval_loss": 0.5304240584373474,
"eval_precision": 0.0,
"eval_recall": 0.0,
"eval_runtime": 3.3745,
"eval_samples_per_second": 79.42,
"eval_steps_per_second": 1.482,
"step": 59
},
{
"epoch": 2.0,
"learning_rate": 9.990951812568578e-06,
"loss": 0.5168,
"step": 118
},
{
"epoch": 2.0,
"eval_accuracy": 0.75,
"eval_f1": 0.0,
"eval_loss": 0.4897027611732483,
"eval_precision": 0.0,
"eval_recall": 0.0,
"eval_runtime": 3.3714,
"eval_samples_per_second": 79.491,
"eval_steps_per_second": 1.483,
"step": 118
},
{
"epoch": 3.0,
"learning_rate": 9.979044933876465e-06,
"loss": 0.4771,
"step": 177
},
{
"epoch": 3.0,
"eval_accuracy": 0.7947761194029851,
"eval_f1": 0.3373493975903615,
"eval_loss": 0.45346158742904663,
"eval_precision": 0.875,
"eval_recall": 0.208955223880597,
"eval_runtime": 3.3741,
"eval_samples_per_second": 79.428,
"eval_steps_per_second": 1.482,
"step": 177
},
{
"epoch": 4.0,
"learning_rate": 9.962224338961997e-06,
"loss": 0.4101,
"step": 236
},
{
"epoch": 4.0,
"eval_accuracy": 0.7910447761194029,
"eval_f1": 0.6585365853658537,
"eval_loss": 0.4235016405582428,
"eval_precision": 0.5567010309278351,
"eval_recall": 0.8059701492537313,
"eval_runtime": 3.3736,
"eval_samples_per_second": 79.44,
"eval_steps_per_second": 1.482,
"step": 236
},
{
"epoch": 5.0,
"learning_rate": 9.940506627721576e-06,
"loss": 0.3225,
"step": 295
},
{
"epoch": 5.0,
"eval_accuracy": 0.8507462686567164,
"eval_f1": 0.5918367346938774,
"eval_loss": 0.47333377599716187,
"eval_precision": 0.9354838709677419,
"eval_recall": 0.43283582089552236,
"eval_runtime": 3.3745,
"eval_samples_per_second": 79.419,
"eval_steps_per_second": 1.482,
"step": 295
},
{
"epoch": 6.0,
"learning_rate": 9.913913232914188e-06,
"loss": 0.2246,
"step": 354
},
{
"epoch": 6.0,
"eval_accuracy": 0.8694029850746269,
"eval_f1": 0.7008547008547008,
"eval_loss": 0.3362283706665039,
"eval_precision": 0.82,
"eval_recall": 0.6119402985074627,
"eval_runtime": 3.3741,
"eval_samples_per_second": 79.428,
"eval_steps_per_second": 1.482,
"step": 354
},
{
"epoch": 7.0,
"learning_rate": 9.882470399009847e-06,
"loss": 0.166,
"step": 413
},
{
"epoch": 7.0,
"eval_accuracy": 0.8768656716417911,
"eval_f1": 0.722689075630252,
"eval_loss": 0.36720752716064453,
"eval_precision": 0.8269230769230769,
"eval_recall": 0.6417910447761194,
"eval_runtime": 3.3726,
"eval_samples_per_second": 79.464,
"eval_steps_per_second": 1.483,
"step": 413
},
{
"epoch": 8.0,
"learning_rate": 9.846863705058136e-06,
"loss": 0.0989,
"step": 472
},
{
"epoch": 8.0,
"eval_accuracy": 0.8768656716417911,
"eval_f1": 0.7625899280575541,
"eval_loss": 0.38347017765045166,
"eval_precision": 0.7361111111111112,
"eval_recall": 0.7910447761194029,
"eval_runtime": 3.3739,
"eval_samples_per_second": 79.433,
"eval_steps_per_second": 1.482,
"step": 472
},
{
"epoch": 9.0,
"learning_rate": 9.805900576231358e-06,
"loss": 0.0797,
"step": 531
},
{
"epoch": 9.0,
"eval_accuracy": 0.8992537313432836,
"eval_f1": 0.7938931297709924,
"eval_loss": 0.43790221214294434,
"eval_precision": 0.8125,
"eval_recall": 0.7761194029850746,
"eval_runtime": 3.3735,
"eval_samples_per_second": 79.442,
"eval_steps_per_second": 1.482,
"step": 531
},
{
"epoch": 10.0,
"learning_rate": 9.760194603759247e-06,
"loss": 0.08,
"step": 590
},
{
"epoch": 10.0,
"eval_accuracy": 0.8544776119402985,
"eval_f1": 0.7450980392156863,
"eval_loss": 0.7676528096199036,
"eval_precision": 0.6627906976744186,
"eval_recall": 0.8507462686567164,
"eval_runtime": 3.3728,
"eval_samples_per_second": 79.46,
"eval_steps_per_second": 1.482,
"step": 590
},
{
"epoch": 11.0,
"learning_rate": 9.709790893918487e-06,
"loss": 0.0505,
"step": 649
},
{
"epoch": 11.0,
"eval_accuracy": 0.8805970149253731,
"eval_f1": 0.7288135593220338,
"eval_loss": 0.7316186428070068,
"eval_precision": 0.8431372549019608,
"eval_recall": 0.6417910447761194,
"eval_runtime": 3.3708,
"eval_samples_per_second": 79.507,
"eval_steps_per_second": 1.483,
"step": 649
},
{
"epoch": 12.0,
"learning_rate": 9.654739189085373e-06,
"loss": 0.073,
"step": 708
},
{
"epoch": 12.0,
"eval_accuracy": 0.9104477611940298,
"eval_f1": 0.8181818181818182,
"eval_loss": 0.47964057326316833,
"eval_precision": 0.8307692307692308,
"eval_recall": 0.8059701492537313,
"eval_runtime": 3.372,
"eval_samples_per_second": 79.477,
"eval_steps_per_second": 1.483,
"step": 708
},
{
"epoch": 13.0,
"learning_rate": 9.595093818646103e-06,
"loss": 0.05,
"step": 767
},
{
"epoch": 13.0,
"eval_accuracy": 0.8694029850746269,
"eval_f1": 0.7058823529411765,
"eval_loss": 0.8468834757804871,
"eval_precision": 0.8076923076923077,
"eval_recall": 0.6268656716417911,
"eval_runtime": 3.3726,
"eval_samples_per_second": 79.464,
"eval_steps_per_second": 1.483,
"step": 767
},
{
"epoch": 14.0,
"learning_rate": 9.530913645380233e-06,
"loss": 0.0583,
"step": 826
},
{
"epoch": 14.0,
"eval_accuracy": 0.8917910447761194,
"eval_f1": 0.7563025210084034,
"eval_loss": 0.7265912294387817,
"eval_precision": 0.8653846153846154,
"eval_recall": 0.6716417910447762,
"eval_runtime": 3.3727,
"eval_samples_per_second": 79.462,
"eval_steps_per_second": 1.483,
"step": 826
},
{
"epoch": 15.0,
"learning_rate": 9.462262007370205e-06,
"loss": 0.0275,
"step": 885
},
{
"epoch": 15.0,
"eval_accuracy": 0.8917910447761194,
"eval_f1": 0.7387387387387387,
"eval_loss": 0.897386908531189,
"eval_precision": 0.9318181818181818,
"eval_recall": 0.6119402985074627,
"eval_runtime": 3.3738,
"eval_samples_per_second": 79.437,
"eval_steps_per_second": 1.482,
"step": 885
},
{
"epoch": 15.0,
"step": 885,
"total_flos": 6563126626237440.0,
"train_loss": 0.21473971275286485,
"train_runtime": 1465.663,
"train_samples_per_second": 128.133,
"train_steps_per_second": 4.025
}
],
"logging_steps": 500,
"max_steps": 5900,
"num_input_tokens_seen": 0,
"num_train_epochs": 100,
"save_steps": 500,
"total_flos": 6563126626237440.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}