fine-tuning-llama / checkpoint-110 /trainer_state.json
Andyrasika's picture
Upload folder using huggingface_hub
1e491fd
raw
history blame
14.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 22,
"global_step": 110,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 1.6666666666666667e-05,
"loss": 2.3955,
"step": 1
},
{
"epoch": 0.04,
"learning_rate": 3.3333333333333335e-05,
"loss": 2.3998,
"step": 2
},
{
"epoch": 0.05,
"learning_rate": 5e-05,
"loss": 2.516,
"step": 3
},
{
"epoch": 0.07,
"learning_rate": 6.666666666666667e-05,
"loss": 2.3958,
"step": 4
},
{
"epoch": 0.09,
"learning_rate": 8.333333333333334e-05,
"loss": 2.3902,
"step": 5
},
{
"epoch": 0.11,
"learning_rate": 0.0001,
"loss": 2.299,
"step": 6
},
{
"epoch": 0.13,
"learning_rate": 9.997718922447667e-05,
"loss": 2.1911,
"step": 7
},
{
"epoch": 0.15,
"learning_rate": 9.990877771116589e-05,
"loss": 2.2146,
"step": 8
},
{
"epoch": 0.16,
"learning_rate": 9.979482788085454e-05,
"loss": 2.0744,
"step": 9
},
{
"epoch": 0.18,
"learning_rate": 9.96354437049027e-05,
"loss": 2.0069,
"step": 10
},
{
"epoch": 0.2,
"learning_rate": 9.943077061037671e-05,
"loss": 2.1008,
"step": 11
},
{
"epoch": 0.22,
"learning_rate": 9.918099534735718e-05,
"loss": 1.8843,
"step": 12
},
{
"epoch": 0.24,
"learning_rate": 9.888634581854234e-05,
"loss": 1.8236,
"step": 13
},
{
"epoch": 0.25,
"learning_rate": 9.85470908713026e-05,
"loss": 2.1407,
"step": 14
},
{
"epoch": 0.27,
"learning_rate": 9.816354005237583e-05,
"loss": 2.1441,
"step": 15
},
{
"epoch": 0.29,
"learning_rate": 9.773604332542729e-05,
"loss": 2.0154,
"step": 16
},
{
"epoch": 0.31,
"learning_rate": 9.726499075173201e-05,
"loss": 1.8462,
"step": 17
},
{
"epoch": 0.33,
"learning_rate": 9.675081213427076e-05,
"loss": 2.0586,
"step": 18
},
{
"epoch": 0.35,
"learning_rate": 9.619397662556435e-05,
"loss": 2.0088,
"step": 19
},
{
"epoch": 0.36,
"learning_rate": 9.559499229960451e-05,
"loss": 1.9887,
"step": 20
},
{
"epoch": 0.38,
"learning_rate": 9.495440568827129e-05,
"loss": 2.0002,
"step": 21
},
{
"epoch": 0.4,
"learning_rate": 9.42728012826605e-05,
"loss": 1.9048,
"step": 22
},
{
"epoch": 0.4,
"eval_loss": 1.921988606452942,
"eval_runtime": 62.7536,
"eval_samples_per_second": 1.753,
"eval_steps_per_second": 0.223,
"step": 22
},
{
"epoch": 0.42,
"learning_rate": 9.355080099977578e-05,
"loss": 1.8774,
"step": 23
},
{
"epoch": 0.44,
"learning_rate": 9.278906361507238e-05,
"loss": 1.9554,
"step": 24
},
{
"epoch": 0.45,
"learning_rate": 9.19882841613699e-05,
"loss": 1.8387,
"step": 25
},
{
"epoch": 0.47,
"learning_rate": 9.114919329468282e-05,
"loss": 1.7612,
"step": 26
},
{
"epoch": 0.49,
"learning_rate": 9.02725566275473e-05,
"loss": 2.0803,
"step": 27
},
{
"epoch": 0.51,
"learning_rate": 8.935917403045251e-05,
"loss": 1.9799,
"step": 28
},
{
"epoch": 0.53,
"learning_rate": 8.840987890201403e-05,
"loss": 2.0421,
"step": 29
},
{
"epoch": 0.55,
"learning_rate": 8.742553740855506e-05,
"loss": 1.9403,
"step": 30
},
{
"epoch": 0.56,
"learning_rate": 8.640704769378942e-05,
"loss": 1.8666,
"step": 31
},
{
"epoch": 0.58,
"learning_rate": 8.535533905932738e-05,
"loss": 2.0529,
"step": 32
},
{
"epoch": 0.6,
"learning_rate": 8.427137111675199e-05,
"loss": 1.8445,
"step": 33
},
{
"epoch": 0.62,
"learning_rate": 8.315613291203976e-05,
"loss": 1.8299,
"step": 34
},
{
"epoch": 0.64,
"learning_rate": 8.201064202312441e-05,
"loss": 1.8462,
"step": 35
},
{
"epoch": 0.65,
"learning_rate": 8.083594363142717e-05,
"loss": 1.8504,
"step": 36
},
{
"epoch": 0.67,
"learning_rate": 7.963310956820085e-05,
"loss": 1.7768,
"step": 37
},
{
"epoch": 0.69,
"learning_rate": 7.840323733655778e-05,
"loss": 1.7158,
"step": 38
},
{
"epoch": 0.71,
"learning_rate": 7.714744911007394e-05,
"loss": 1.6559,
"step": 39
},
{
"epoch": 0.73,
"learning_rate": 7.586689070888284e-05,
"loss": 2.0479,
"step": 40
},
{
"epoch": 0.75,
"learning_rate": 7.456273055419388e-05,
"loss": 2.0672,
"step": 41
},
{
"epoch": 0.76,
"learning_rate": 7.323615860218843e-05,
"loss": 2.0359,
"step": 42
},
{
"epoch": 0.78,
"learning_rate": 7.188838525826702e-05,
"loss": 2.0495,
"step": 43
},
{
"epoch": 0.8,
"learning_rate": 7.052064027263786e-05,
"loss": 1.824,
"step": 44
},
{
"epoch": 0.8,
"eval_loss": 1.8808680772781372,
"eval_runtime": 62.6947,
"eval_samples_per_second": 1.755,
"eval_steps_per_second": 0.223,
"step": 44
},
{
"epoch": 0.82,
"learning_rate": 6.91341716182545e-05,
"loss": 1.8313,
"step": 45
},
{
"epoch": 0.84,
"learning_rate": 6.773024435212678e-05,
"loss": 1.9359,
"step": 46
},
{
"epoch": 0.85,
"learning_rate": 6.631013946104347e-05,
"loss": 1.9485,
"step": 47
},
{
"epoch": 0.87,
"learning_rate": 6.487515269276016e-05,
"loss": 1.8043,
"step": 48
},
{
"epoch": 0.89,
"learning_rate": 6.342659337371885e-05,
"loss": 1.9166,
"step": 49
},
{
"epoch": 0.91,
"learning_rate": 6.19657832143779e-05,
"loss": 1.8612,
"step": 50
},
{
"epoch": 0.93,
"learning_rate": 6.049405510324238e-05,
"loss": 1.8936,
"step": 51
},
{
"epoch": 0.95,
"learning_rate": 5.90127518906953e-05,
"loss": 1.6797,
"step": 52
},
{
"epoch": 0.96,
"learning_rate": 5.752322516373916e-05,
"loss": 1.951,
"step": 53
},
{
"epoch": 0.98,
"learning_rate": 5.602683401276615e-05,
"loss": 1.9156,
"step": 54
},
{
"epoch": 1.0,
"learning_rate": 5.45249437914819e-05,
"loss": 1.804,
"step": 55
},
{
"epoch": 1.02,
"learning_rate": 5.3018924871114305e-05,
"loss": 1.9628,
"step": 56
},
{
"epoch": 1.04,
"learning_rate": 5.151015139004445e-05,
"loss": 1.8577,
"step": 57
},
{
"epoch": 1.05,
"learning_rate": 5e-05,
"loss": 2.0029,
"step": 58
},
{
"epoch": 1.07,
"learning_rate": 4.848984860995557e-05,
"loss": 1.765,
"step": 59
},
{
"epoch": 1.09,
"learning_rate": 4.6981075128885693e-05,
"loss": 1.7553,
"step": 60
},
{
"epoch": 1.11,
"learning_rate": 4.547505620851811e-05,
"loss": 1.8855,
"step": 61
},
{
"epoch": 1.13,
"learning_rate": 4.397316598723385e-05,
"loss": 1.8231,
"step": 62
},
{
"epoch": 1.15,
"learning_rate": 4.2476774836260845e-05,
"loss": 1.8003,
"step": 63
},
{
"epoch": 1.16,
"learning_rate": 4.0987248109304714e-05,
"loss": 1.7616,
"step": 64
},
{
"epoch": 1.18,
"learning_rate": 3.950594489675763e-05,
"loss": 1.8023,
"step": 65
},
{
"epoch": 1.2,
"learning_rate": 3.803421678562213e-05,
"loss": 1.6784,
"step": 66
},
{
"epoch": 1.2,
"eval_loss": 1.861940860748291,
"eval_runtime": 62.7157,
"eval_samples_per_second": 1.754,
"eval_steps_per_second": 0.223,
"step": 66
},
{
"epoch": 1.22,
"learning_rate": 3.657340662628116e-05,
"loss": 1.6669,
"step": 67
},
{
"epoch": 1.24,
"learning_rate": 3.512484730723986e-05,
"loss": 1.6333,
"step": 68
},
{
"epoch": 1.25,
"learning_rate": 3.368986053895655e-05,
"loss": 1.9278,
"step": 69
},
{
"epoch": 1.27,
"learning_rate": 3.226975564787322e-05,
"loss": 1.9157,
"step": 70
},
{
"epoch": 1.29,
"learning_rate": 3.086582838174551e-05,
"loss": 1.8783,
"step": 71
},
{
"epoch": 1.31,
"learning_rate": 2.9479359727362173e-05,
"loss": 1.7518,
"step": 72
},
{
"epoch": 1.33,
"learning_rate": 2.811161474173297e-05,
"loss": 1.8523,
"step": 73
},
{
"epoch": 1.35,
"learning_rate": 2.6763841397811573e-05,
"loss": 1.7893,
"step": 74
},
{
"epoch": 1.36,
"learning_rate": 2.5437269445806145e-05,
"loss": 1.7636,
"step": 75
},
{
"epoch": 1.38,
"learning_rate": 2.4133109291117156e-05,
"loss": 1.8228,
"step": 76
},
{
"epoch": 1.4,
"learning_rate": 2.2852550889926067e-05,
"loss": 1.7656,
"step": 77
},
{
"epoch": 1.42,
"learning_rate": 2.1596762663442218e-05,
"loss": 1.746,
"step": 78
},
{
"epoch": 1.44,
"learning_rate": 2.0366890431799167e-05,
"loss": 1.7256,
"step": 79
},
{
"epoch": 1.45,
"learning_rate": 1.9164056368572846e-05,
"loss": 1.6128,
"step": 80
},
{
"epoch": 1.47,
"learning_rate": 1.7989357976875603e-05,
"loss": 1.6727,
"step": 81
},
{
"epoch": 1.49,
"learning_rate": 1.684386708796025e-05,
"loss": 2.0976,
"step": 82
},
{
"epoch": 1.51,
"learning_rate": 1.5728628883248007e-05,
"loss": 1.8464,
"step": 83
},
{
"epoch": 1.53,
"learning_rate": 1.4644660940672627e-05,
"loss": 2.0062,
"step": 84
},
{
"epoch": 1.55,
"learning_rate": 1.3592952306210588e-05,
"loss": 1.837,
"step": 85
},
{
"epoch": 1.56,
"learning_rate": 1.257446259144494e-05,
"loss": 1.8183,
"step": 86
},
{
"epoch": 1.58,
"learning_rate": 1.159012109798598e-05,
"loss": 1.8,
"step": 87
},
{
"epoch": 1.6,
"learning_rate": 1.0640825969547496e-05,
"loss": 1.77,
"step": 88
},
{
"epoch": 1.6,
"eval_loss": 1.8537334203720093,
"eval_runtime": 62.6147,
"eval_samples_per_second": 1.757,
"eval_steps_per_second": 0.224,
"step": 88
},
{
"epoch": 1.62,
"learning_rate": 9.7274433724527e-06,
"loss": 1.914,
"step": 89
},
{
"epoch": 1.64,
"learning_rate": 8.850806705317183e-06,
"loss": 1.8016,
"step": 90
},
{
"epoch": 1.65,
"learning_rate": 8.011715838630107e-06,
"loss": 1.7734,
"step": 91
},
{
"epoch": 1.67,
"learning_rate": 7.21093638492763e-06,
"loss": 1.7373,
"step": 92
},
{
"epoch": 1.69,
"learning_rate": 6.449199000224221e-06,
"loss": 1.7791,
"step": 93
},
{
"epoch": 1.71,
"learning_rate": 5.727198717339511e-06,
"loss": 1.5139,
"step": 94
},
{
"epoch": 1.73,
"learning_rate": 5.045594311728707e-06,
"loss": 1.9854,
"step": 95
},
{
"epoch": 1.75,
"learning_rate": 4.405007700395497e-06,
"loss": 1.9952,
"step": 96
},
{
"epoch": 1.76,
"learning_rate": 3.8060233744356633e-06,
"loss": 1.9239,
"step": 97
},
{
"epoch": 1.78,
"learning_rate": 3.249187865729264e-06,
"loss": 1.8075,
"step": 98
},
{
"epoch": 1.8,
"learning_rate": 2.7350092482679836e-06,
"loss": 1.7052,
"step": 99
},
{
"epoch": 1.82,
"learning_rate": 2.2639566745727205e-06,
"loss": 1.9388,
"step": 100
},
{
"epoch": 1.84,
"learning_rate": 1.8364599476241862e-06,
"loss": 1.8453,
"step": 101
},
{
"epoch": 1.85,
"learning_rate": 1.4529091286973995e-06,
"loss": 1.6221,
"step": 102
},
{
"epoch": 1.87,
"learning_rate": 1.1136541814576573e-06,
"loss": 1.7256,
"step": 103
},
{
"epoch": 1.89,
"learning_rate": 8.190046526428242e-07,
"loss": 1.7185,
"step": 104
},
{
"epoch": 1.91,
"learning_rate": 5.692293896232936e-07,
"loss": 1.7981,
"step": 105
},
{
"epoch": 1.93,
"learning_rate": 3.6455629509730136e-07,
"loss": 1.7508,
"step": 106
},
{
"epoch": 1.95,
"learning_rate": 2.0517211914545254e-07,
"loss": 1.5822,
"step": 107
},
{
"epoch": 1.96,
"learning_rate": 9.12222888341252e-08,
"loss": 1.9108,
"step": 108
},
{
"epoch": 1.98,
"learning_rate": 2.2810775523329773e-08,
"loss": 1.758,
"step": 109
},
{
"epoch": 2.0,
"learning_rate": 0.0,
"loss": 1.6501,
"step": 110
},
{
"epoch": 2.0,
"eval_loss": 1.8522212505340576,
"eval_runtime": 62.6224,
"eval_samples_per_second": 1.757,
"eval_steps_per_second": 0.224,
"step": 110
}
],
"logging_steps": 1,
"max_steps": 110,
"num_train_epochs": 2,
"save_steps": 500,
"total_flos": 1.2865354251706368e+16,
"trial_name": null,
"trial_params": null
}