Theoreticallyhugo's picture
Training in progress, epoch 2, checkpoint
e7514e4 verified
raw
history blame
4.19 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 162,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"eval_B-Claim": {
"f1-score": 0.18764302059496568,
"precision": 0.3416666666666667,
"recall": 0.12933753943217666,
"support": 317.0
},
"eval_B-MajorClaim": {
"f1-score": 0.19540229885057472,
"precision": 0.8947368421052632,
"recall": 0.10967741935483871,
"support": 155.0
},
"eval_B-Premise": {
"f1-score": 0.8476339053562143,
"precision": 0.740909090909091,
"recall": 0.9902794653705954,
"support": 823.0
},
"eval_I-Claim": {
"f1-score": 0.4315998169894769,
"precision": 0.6394035246272028,
"recall": 0.3257366482504604,
"support": 4344.0
},
"eval_I-MajorClaim": {
"f1-score": 0.7779799818016379,
"precision": 0.7493426818580193,
"recall": 0.8088930936613056,
"support": 2114.0
},
"eval_I-Premise": {
"f1-score": 0.8948683715038855,
"precision": 0.8376057421174058,
"recall": 0.9605350187403542,
"support": 13607.0
},
"eval_O": {
"f1-score": 0.9980147987727848,
"precision": 0.9970251509961237,
"recall": 0.9990064131514769,
"support": 11071.0
},
"eval_accuracy": 0.8673183065585397,
"eval_loss": 0.3193369209766388,
"eval_macro avg": {
"f1-score": 0.6190203134099342,
"precision": 0.7429556713256817,
"recall": 0.6176379425658869,
"support": 32431.0
},
"eval_runtime": 4.9083,
"eval_samples_per_second": 16.299,
"eval_steps_per_second": 2.037,
"eval_weighted avg": {
"f1-score": 0.8489527906249091,
"precision": 0.8526967051825685,
"recall": 0.8673183065585397,
"support": 32431.0
},
"step": 81
},
{
"epoch": 2.0,
"eval_B-Claim": {
"f1-score": 0.583941605839416,
"precision": 0.6926406926406926,
"recall": 0.5047318611987381,
"support": 317.0
},
"eval_B-MajorClaim": {
"f1-score": 0.8117647058823529,
"precision": 0.745945945945946,
"recall": 0.8903225806451613,
"support": 155.0
},
"eval_B-Premise": {
"f1-score": 0.8943661971830986,
"precision": 0.8649262202043133,
"recall": 0.9258809234507898,
"support": 823.0
},
"eval_I-Claim": {
"f1-score": 0.6197740770932805,
"precision": 0.6271506009898656,
"recall": 0.6125690607734806,
"support": 4344.0
},
"eval_I-MajorClaim": {
"f1-score": 0.8298737427776589,
"precision": 0.7577178585384916,
"recall": 0.9172185430463576,
"support": 2114.0
},
"eval_I-Premise": {
"f1-score": 0.9013697613125952,
"precision": 0.9106660666066607,
"recall": 0.8922613360770192,
"support": 13607.0
},
"eval_O": {
"f1-score": 0.9961487925331883,
"precision": 0.9993636363636363,
"recall": 0.9929545659831993,
"support": 11071.0
},
"eval_accuracy": 0.887854213561099,
"eval_loss": 0.26407390832901,
"eval_macro avg": {
"f1-score": 0.8053198403745129,
"precision": 0.7997730030413723,
"recall": 0.8194198387392494,
"support": 32431.0
},
"eval_runtime": 4.9329,
"eval_samples_per_second": 16.218,
"eval_steps_per_second": 2.027,
"eval_weighted avg": {
"f1-score": 0.8876368199002541,
"precision": 0.8889201304482091,
"recall": 0.887854213561099,
"support": 32431.0
},
"step": 162
}
],
"logging_steps": 500,
"max_steps": 4050,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 500,
"total_flos": 287581625436000.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}