CustomGPT2Conversational / trainer_state.json
ayjays132's picture
Upload 8 files
7453c98 verified
raw
history blame
4.03 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9995835068721366,
"eval_steps": 500,
"global_step": 300,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 3.0000000000000004e-07,
"loss": 11.1022,
"step": 10
},
{
"epoch": 0.07,
"learning_rate": 8.000000000000001e-07,
"loss": 10.5874,
"step": 20
},
{
"epoch": 0.1,
"learning_rate": 1.3e-06,
"loss": 9.359,
"step": 30
},
{
"epoch": 0.13,
"learning_rate": 1.8e-06,
"loss": 7.4948,
"step": 40
},
{
"epoch": 0.17,
"learning_rate": 2.3e-06,
"loss": 5.7244,
"step": 50
},
{
"epoch": 0.2,
"learning_rate": 2.8000000000000003e-06,
"loss": 4.1675,
"step": 60
},
{
"epoch": 0.23,
"learning_rate": 3.3e-06,
"loss": 3.2211,
"step": 70
},
{
"epoch": 0.27,
"learning_rate": 3.8e-06,
"loss": 2.4147,
"step": 80
},
{
"epoch": 0.3,
"learning_rate": 4.2999999999999995e-06,
"loss": 1.7997,
"step": 90
},
{
"epoch": 0.33,
"learning_rate": 4.800000000000001e-06,
"loss": 1.4401,
"step": 100
},
{
"epoch": 0.37,
"learning_rate": 5.3e-06,
"loss": 1.2247,
"step": 110
},
{
"epoch": 0.4,
"learning_rate": 5.8e-06,
"loss": 1.0594,
"step": 120
},
{
"epoch": 0.43,
"learning_rate": 6.300000000000001e-06,
"loss": 0.9899,
"step": 130
},
{
"epoch": 0.47,
"learning_rate": 6.800000000000001e-06,
"loss": 0.8842,
"step": 140
},
{
"epoch": 0.5,
"learning_rate": 7.2999999999999996e-06,
"loss": 0.8798,
"step": 150
},
{
"epoch": 0.53,
"learning_rate": 7.8e-06,
"loss": 0.8872,
"step": 160
},
{
"epoch": 0.57,
"learning_rate": 8.3e-06,
"loss": 0.8889,
"step": 170
},
{
"epoch": 0.6,
"learning_rate": 8.8e-06,
"loss": 0.9344,
"step": 180
},
{
"epoch": 0.63,
"learning_rate": 9.3e-06,
"loss": 0.9867,
"step": 190
},
{
"epoch": 0.67,
"learning_rate": 9.800000000000001e-06,
"loss": 0.8925,
"step": 200
},
{
"epoch": 0.7,
"learning_rate": 1.03e-05,
"loss": 0.7869,
"step": 210
},
{
"epoch": 0.73,
"learning_rate": 1.08e-05,
"loss": 0.8847,
"step": 220
},
{
"epoch": 0.77,
"learning_rate": 1.13e-05,
"loss": 0.8221,
"step": 230
},
{
"epoch": 0.8,
"learning_rate": 1.18e-05,
"loss": 0.8611,
"step": 240
},
{
"epoch": 0.83,
"learning_rate": 1.23e-05,
"loss": 0.8544,
"step": 250
},
{
"epoch": 0.87,
"learning_rate": 1.2800000000000001e-05,
"loss": 0.8061,
"step": 260
},
{
"epoch": 0.9,
"learning_rate": 1.3300000000000001e-05,
"loss": 0.7984,
"step": 270
},
{
"epoch": 0.93,
"learning_rate": 1.3800000000000002e-05,
"loss": 0.7396,
"step": 280
},
{
"epoch": 0.97,
"learning_rate": 1.43e-05,
"loss": 0.8653,
"step": 290
},
{
"epoch": 1.0,
"learning_rate": 1.48e-05,
"loss": 0.8675,
"step": 300
},
{
"epoch": 1.0,
"eval_loss": 0.8561133146286011,
"eval_runtime": 33.8169,
"eval_samples_per_second": 47.343,
"eval_steps_per_second": 7.895,
"step": 300
}
],
"logging_steps": 10,
"max_steps": 1800,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 500,
"total_flos": 3763911720960000.0,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}