llama2-PT / checkpoint-200 /trainer_state.json
0x-YuAN's picture
Upload folder using huggingface_hub
01c0c4c
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.11058888581697539,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 4.9999895164082156e-05,
"loss": 1.6349,
"step": 5
},
{
"epoch": 0.01,
"learning_rate": 4.999958065720787e-05,
"loss": 1.6199,
"step": 10
},
{
"epoch": 0.01,
"learning_rate": 4.999905648201487e-05,
"loss": 1.4834,
"step": 15
},
{
"epoch": 0.01,
"learning_rate": 4.999832264289934e-05,
"loss": 1.3882,
"step": 20
},
{
"epoch": 0.01,
"learning_rate": 4.999737914601591e-05,
"loss": 1.3679,
"step": 25
},
{
"epoch": 0.02,
"learning_rate": 4.999622599927756e-05,
"loss": 1.2396,
"step": 30
},
{
"epoch": 0.02,
"learning_rate": 4.999486321235559e-05,
"loss": 1.321,
"step": 35
},
{
"epoch": 0.02,
"learning_rate": 4.9993290796679516e-05,
"loss": 1.2874,
"step": 40
},
{
"epoch": 0.02,
"learning_rate": 4.999150876543699e-05,
"loss": 1.2607,
"step": 45
},
{
"epoch": 0.03,
"learning_rate": 4.9989517133573694e-05,
"loss": 1.2454,
"step": 50
},
{
"epoch": 0.03,
"learning_rate": 4.9987315917793174e-05,
"loss": 1.2799,
"step": 55
},
{
"epoch": 0.03,
"learning_rate": 4.998490513655676e-05,
"loss": 1.2575,
"step": 60
},
{
"epoch": 0.04,
"learning_rate": 4.998228481008337e-05,
"loss": 1.2404,
"step": 65
},
{
"epoch": 0.04,
"learning_rate": 4.997945496034934e-05,
"loss": 1.2219,
"step": 70
},
{
"epoch": 0.04,
"learning_rate": 4.9976415611088267e-05,
"loss": 1.2241,
"step": 75
},
{
"epoch": 0.04,
"learning_rate": 4.997316678779079e-05,
"loss": 1.1716,
"step": 80
},
{
"epoch": 0.05,
"learning_rate": 4.996970851770438e-05,
"loss": 1.1883,
"step": 85
},
{
"epoch": 0.05,
"learning_rate": 4.9966040829833115e-05,
"loss": 1.205,
"step": 90
},
{
"epoch": 0.05,
"learning_rate": 4.9962163754937426e-05,
"loss": 1.1246,
"step": 95
},
{
"epoch": 0.06,
"learning_rate": 4.995807732553384e-05,
"loss": 1.1636,
"step": 100
},
{
"epoch": 0.06,
"learning_rate": 4.9953781575894723e-05,
"loss": 1.158,
"step": 105
},
{
"epoch": 0.06,
"learning_rate": 4.9949276542048e-05,
"loss": 1.1477,
"step": 110
},
{
"epoch": 0.06,
"learning_rate": 4.9944562261776805e-05,
"loss": 1.1678,
"step": 115
},
{
"epoch": 0.07,
"learning_rate": 4.9939638774619216e-05,
"loss": 1.1501,
"step": 120
},
{
"epoch": 0.07,
"learning_rate": 4.99345061218679e-05,
"loss": 1.1955,
"step": 125
},
{
"epoch": 0.07,
"learning_rate": 4.9929164346569756e-05,
"loss": 1.1724,
"step": 130
},
{
"epoch": 0.07,
"learning_rate": 4.9923613493525576e-05,
"loss": 1.177,
"step": 135
},
{
"epoch": 0.08,
"learning_rate": 4.991785360928968e-05,
"loss": 1.1418,
"step": 140
},
{
"epoch": 0.08,
"learning_rate": 4.991188474216947e-05,
"loss": 1.1898,
"step": 145
},
{
"epoch": 0.08,
"learning_rate": 4.9905706942225094e-05,
"loss": 1.1479,
"step": 150
},
{
"epoch": 0.09,
"learning_rate": 4.9899320261268966e-05,
"loss": 1.1356,
"step": 155
},
{
"epoch": 0.09,
"learning_rate": 4.989272475286537e-05,
"loss": 1.1397,
"step": 160
},
{
"epoch": 0.09,
"learning_rate": 4.9885920472330004e-05,
"loss": 1.1215,
"step": 165
},
{
"epoch": 0.09,
"learning_rate": 4.9878907476729516e-05,
"loss": 1.167,
"step": 170
},
{
"epoch": 0.1,
"learning_rate": 4.9871685824881e-05,
"loss": 1.1219,
"step": 175
},
{
"epoch": 0.1,
"learning_rate": 4.9864255577351534e-05,
"loss": 1.0835,
"step": 180
},
{
"epoch": 0.1,
"learning_rate": 4.985661679645769e-05,
"loss": 1.0721,
"step": 185
},
{
"epoch": 0.11,
"learning_rate": 4.9848769546264915e-05,
"loss": 1.0692,
"step": 190
},
{
"epoch": 0.11,
"learning_rate": 4.9840713892587146e-05,
"loss": 1.0488,
"step": 195
},
{
"epoch": 0.11,
"learning_rate": 4.983244990298609e-05,
"loss": 1.1285,
"step": 200
}
],
"max_steps": 5424,
"num_train_epochs": 3,
"total_flos": 6.4994062368768e+16,
"trial_name": null,
"trial_params": null
}