baptle's picture
Upload folder using huggingface_hub
4b2ba42 verified
raw
history blame
5.36 kB
{
"best_metric": 0.9176467061042786,
"best_model_checkpoint": "FinBERT_market_based/checkpoint-1432",
"epoch": 2.0,
"eval_steps": 500,
"global_step": 1432,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07,
"learning_rate": 1.1162790697674418e-05,
"loss": 1.4577,
"step": 48
},
{
"epoch": 0.13,
"learning_rate": 2.2325581395348837e-05,
"loss": 1.0761,
"step": 96
},
{
"epoch": 0.2,
"learning_rate": 3.348837209302326e-05,
"loss": 1.0211,
"step": 144
},
{
"epoch": 0.27,
"learning_rate": 4.465116279069767e-05,
"loss": 1.0051,
"step": 192
},
{
"epoch": 0.34,
"learning_rate": 4.935333678220383e-05,
"loss": 0.9814,
"step": 240
},
{
"epoch": 0.4,
"learning_rate": 4.811174340403518e-05,
"loss": 0.9718,
"step": 288
},
{
"epoch": 0.47,
"learning_rate": 4.687015002586653e-05,
"loss": 0.9401,
"step": 336
},
{
"epoch": 0.54,
"learning_rate": 4.562855664769788e-05,
"loss": 0.9486,
"step": 384
},
{
"epoch": 0.6,
"learning_rate": 4.438696326952923e-05,
"loss": 0.9477,
"step": 432
},
{
"epoch": 0.67,
"learning_rate": 4.3145369891360584e-05,
"loss": 0.9236,
"step": 480
},
{
"epoch": 0.74,
"learning_rate": 4.190377651319193e-05,
"loss": 0.9295,
"step": 528
},
{
"epoch": 0.8,
"learning_rate": 4.066218313502328e-05,
"loss": 0.9388,
"step": 576
},
{
"epoch": 0.87,
"learning_rate": 3.942058975685463e-05,
"loss": 0.9446,
"step": 624
},
{
"epoch": 0.94,
"learning_rate": 3.8178996378685985e-05,
"loss": 0.9324,
"step": 672
},
{
"epoch": 1.0,
"eval_accuracy": 0.5537056232184504,
"eval_f1_macro": 0.4530869728302888,
"eval_f1_micro": 0.5537056232184504,
"eval_f1_weighted": 0.49220984693785635,
"eval_loss": 0.9287100434303284,
"eval_precision_macro": 0.5454170327986705,
"eval_precision_micro": 0.5537056232184504,
"eval_precision_weighted": 0.5470355528203532,
"eval_recall_macro": 0.49494460688671565,
"eval_recall_micro": 0.5537056232184504,
"eval_recall_weighted": 0.5537056232184504,
"eval_runtime": 2411.8627,
"eval_samples_per_second": 6.4,
"eval_steps_per_second": 0.05,
"step": 716
},
{
"epoch": 1.01,
"learning_rate": 3.693740300051733e-05,
"loss": 0.9273,
"step": 720
},
{
"epoch": 1.07,
"learning_rate": 3.569580962234868e-05,
"loss": 0.901,
"step": 768
},
{
"epoch": 1.14,
"learning_rate": 3.445421624418003e-05,
"loss": 0.8876,
"step": 816
},
{
"epoch": 1.21,
"learning_rate": 3.3212622866011386e-05,
"loss": 0.8826,
"step": 864
},
{
"epoch": 1.27,
"learning_rate": 3.197102948784273e-05,
"loss": 0.8802,
"step": 912
},
{
"epoch": 1.34,
"learning_rate": 3.072943610967408e-05,
"loss": 0.8896,
"step": 960
},
{
"epoch": 1.41,
"learning_rate": 2.9487842731505433e-05,
"loss": 0.885,
"step": 1008
},
{
"epoch": 1.47,
"learning_rate": 2.8246249353336783e-05,
"loss": 0.8706,
"step": 1056
},
{
"epoch": 1.54,
"learning_rate": 2.7004655975168137e-05,
"loss": 0.8925,
"step": 1104
},
{
"epoch": 1.61,
"learning_rate": 2.5763062596999484e-05,
"loss": 0.8952,
"step": 1152
},
{
"epoch": 1.68,
"learning_rate": 2.4521469218830834e-05,
"loss": 0.8921,
"step": 1200
},
{
"epoch": 1.74,
"learning_rate": 2.3279875840662184e-05,
"loss": 0.8934,
"step": 1248
},
{
"epoch": 1.81,
"learning_rate": 2.2038282462493534e-05,
"loss": 0.8923,
"step": 1296
},
{
"epoch": 1.88,
"learning_rate": 2.0796689084324885e-05,
"loss": 0.8778,
"step": 1344
},
{
"epoch": 1.94,
"learning_rate": 1.9555095706156235e-05,
"loss": 0.8738,
"step": 1392
},
{
"epoch": 2.0,
"eval_accuracy": 0.5627105467737756,
"eval_f1_macro": 0.49749240436690023,
"eval_f1_micro": 0.5627105467737756,
"eval_f1_weighted": 0.5279720746084178,
"eval_loss": 0.9176467061042786,
"eval_precision_macro": 0.5386355574899088,
"eval_precision_micro": 0.5627105467737756,
"eval_precision_weighted": 0.5462149036191247,
"eval_recall_macro": 0.517542664344306,
"eval_recall_micro": 0.5627105467737756,
"eval_recall_weighted": 0.5627105467737756,
"eval_runtime": 2502.0236,
"eval_samples_per_second": 6.169,
"eval_steps_per_second": 0.048,
"step": 1432
}
],
"logging_steps": 48,
"max_steps": 2148,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 6027139059895296.0,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}