Theoreticallyhugo's picture
Training in progress, epoch 1, checkpoint
464f67a verified
raw
history blame
2.33 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 81,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"eval_B-Claim": {
"f1-score": 0.18764302059496568,
"precision": 0.3416666666666667,
"recall": 0.12933753943217666,
"support": 317.0
},
"eval_B-MajorClaim": {
"f1-score": 0.19540229885057472,
"precision": 0.8947368421052632,
"recall": 0.10967741935483871,
"support": 155.0
},
"eval_B-Premise": {
"f1-score": 0.8476339053562143,
"precision": 0.740909090909091,
"recall": 0.9902794653705954,
"support": 823.0
},
"eval_I-Claim": {
"f1-score": 0.4315998169894769,
"precision": 0.6394035246272028,
"recall": 0.3257366482504604,
"support": 4344.0
},
"eval_I-MajorClaim": {
"f1-score": 0.7779799818016379,
"precision": 0.7493426818580193,
"recall": 0.8088930936613056,
"support": 2114.0
},
"eval_I-Premise": {
"f1-score": 0.8948683715038855,
"precision": 0.8376057421174058,
"recall": 0.9605350187403542,
"support": 13607.0
},
"eval_O": {
"f1-score": 0.9980147987727848,
"precision": 0.9970251509961237,
"recall": 0.9990064131514769,
"support": 11071.0
},
"eval_accuracy": 0.8673183065585397,
"eval_loss": 0.3193369209766388,
"eval_macro avg": {
"f1-score": 0.6190203134099342,
"precision": 0.7429556713256817,
"recall": 0.6176379425658869,
"support": 32431.0
},
"eval_runtime": 4.9083,
"eval_samples_per_second": 16.299,
"eval_steps_per_second": 2.037,
"eval_weighted avg": {
"f1-score": 0.8489527906249091,
"precision": 0.8526967051825685,
"recall": 0.8673183065585397,
"support": 32431.0
},
"step": 81
}
],
"logging_steps": 500,
"max_steps": 4050,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 500,
"total_flos": 143790812718000.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}