bart-base-cantonese-v8 / trainer_state.json
raptorkwok's picture
End of training
9367b3c
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"global_step": 13400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.37,
"learning_rate": 3.7313432835820895e-07,
"loss": 2.3966,
"step": 500
},
{
"epoch": 0.75,
"learning_rate": 7.462686567164179e-07,
"loss": 1.5891,
"step": 1000
},
{
"epoch": 1.12,
"learning_rate": 9.867330016583748e-07,
"loss": 1.401,
"step": 1500
},
{
"epoch": 1.49,
"learning_rate": 9.45273631840796e-07,
"loss": 1.325,
"step": 2000
},
{
"epoch": 1.87,
"learning_rate": 9.038142620232172e-07,
"loss": 1.2832,
"step": 2500
},
{
"epoch": 2.24,
"learning_rate": 8.623548922056384e-07,
"loss": 1.256,
"step": 3000
},
{
"epoch": 2.61,
"learning_rate": 8.208955223880597e-07,
"loss": 1.2343,
"step": 3500
},
{
"epoch": 2.99,
"learning_rate": 7.794361525704809e-07,
"loss": 1.223,
"step": 4000
},
{
"epoch": 3.36,
"learning_rate": 7.379767827529021e-07,
"loss": 1.2113,
"step": 4500
},
{
"epoch": 3.73,
"learning_rate": 6.965174129353234e-07,
"loss": 1.2001,
"step": 5000
},
{
"epoch": 4.1,
"learning_rate": 6.550580431177446e-07,
"loss": 1.1904,
"step": 5500
},
{
"epoch": 4.48,
"learning_rate": 6.135986733001658e-07,
"loss": 1.1879,
"step": 6000
},
{
"epoch": 4.85,
"learning_rate": 5.72139303482587e-07,
"loss": 1.1812,
"step": 6500
},
{
"epoch": 5.22,
"learning_rate": 5.306799336650083e-07,
"loss": 1.1746,
"step": 7000
},
{
"epoch": 5.6,
"learning_rate": 4.892205638474295e-07,
"loss": 1.1716,
"step": 7500
},
{
"epoch": 5.97,
"learning_rate": 4.4776119402985074e-07,
"loss": 1.1661,
"step": 8000
},
{
"epoch": 6.34,
"learning_rate": 4.0630182421227194e-07,
"loss": 1.1627,
"step": 8500
},
{
"epoch": 6.72,
"learning_rate": 3.648424543946932e-07,
"loss": 1.1596,
"step": 9000
},
{
"epoch": 7.09,
"learning_rate": 3.2338308457711446e-07,
"loss": 1.1563,
"step": 9500
},
{
"epoch": 7.46,
"learning_rate": 2.8192371475953566e-07,
"loss": 1.1552,
"step": 10000
},
{
"epoch": 7.84,
"learning_rate": 2.4046434494195686e-07,
"loss": 1.1539,
"step": 10500
},
{
"epoch": 8.21,
"learning_rate": 1.9900497512437812e-07,
"loss": 1.1505,
"step": 11000
},
{
"epoch": 8.58,
"learning_rate": 1.5754560530679933e-07,
"loss": 1.1494,
"step": 11500
},
{
"epoch": 8.96,
"learning_rate": 1.1608623548922056e-07,
"loss": 1.1501,
"step": 12000
},
{
"epoch": 9.33,
"learning_rate": 7.462686567164178e-08,
"loss": 1.1478,
"step": 12500
},
{
"epoch": 9.7,
"learning_rate": 3.316749585406302e-08,
"loss": 1.147,
"step": 13000
},
{
"epoch": 10.0,
"step": 13400,
"total_flos": 1.306695716831232e+17,
"train_loss": 1.2553224705938082,
"train_runtime": 11299.6989,
"train_samples_per_second": 37.931,
"train_steps_per_second": 1.186
}
],
"max_steps": 13400,
"num_train_epochs": 10,
"total_flos": 1.306695716831232e+17,
"trial_name": null,
"trial_params": null
}