bart-base-cantonese-library-v3 / trainer_state.json
raptorkwok's picture
End of training
ba3f21e
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"global_step": 13400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.37,
"learning_rate": 3.7313432835820895e-07,
"loss": 2.3932,
"step": 500
},
{
"epoch": 0.75,
"learning_rate": 7.462686567164179e-07,
"loss": 1.5891,
"step": 1000
},
{
"epoch": 1.12,
"learning_rate": 9.867330016583748e-07,
"loss": 1.3999,
"step": 1500
},
{
"epoch": 1.49,
"learning_rate": 9.45273631840796e-07,
"loss": 1.3236,
"step": 2000
},
{
"epoch": 1.87,
"learning_rate": 9.038142620232172e-07,
"loss": 1.2815,
"step": 2500
},
{
"epoch": 2.24,
"learning_rate": 8.623548922056384e-07,
"loss": 1.2556,
"step": 3000
},
{
"epoch": 2.61,
"learning_rate": 8.208955223880597e-07,
"loss": 1.2359,
"step": 3500
},
{
"epoch": 2.99,
"learning_rate": 7.794361525704809e-07,
"loss": 1.2238,
"step": 4000
},
{
"epoch": 3.36,
"learning_rate": 7.379767827529021e-07,
"loss": 1.2116,
"step": 4500
},
{
"epoch": 3.73,
"learning_rate": 6.965174129353234e-07,
"loss": 1.1993,
"step": 5000
},
{
"epoch": 4.1,
"learning_rate": 6.550580431177446e-07,
"loss": 1.1918,
"step": 5500
},
{
"epoch": 4.48,
"learning_rate": 6.135986733001658e-07,
"loss": 1.1856,
"step": 6000
},
{
"epoch": 4.85,
"learning_rate": 5.72139303482587e-07,
"loss": 1.1776,
"step": 6500
},
{
"epoch": 5.22,
"learning_rate": 5.306799336650083e-07,
"loss": 1.1755,
"step": 7000
},
{
"epoch": 5.6,
"learning_rate": 4.892205638474295e-07,
"loss": 1.1685,
"step": 7500
},
{
"epoch": 5.97,
"learning_rate": 4.4776119402985074e-07,
"loss": 1.1642,
"step": 8000
},
{
"epoch": 6.34,
"learning_rate": 4.0630182421227194e-07,
"loss": 1.1622,
"step": 8500
},
{
"epoch": 6.72,
"learning_rate": 3.648424543946932e-07,
"loss": 1.1584,
"step": 9000
},
{
"epoch": 7.09,
"learning_rate": 3.2338308457711446e-07,
"loss": 1.1558,
"step": 9500
},
{
"epoch": 7.46,
"learning_rate": 2.8192371475953566e-07,
"loss": 1.1551,
"step": 10000
},
{
"epoch": 7.84,
"learning_rate": 2.4046434494195686e-07,
"loss": 1.1541,
"step": 10500
},
{
"epoch": 8.21,
"learning_rate": 1.9900497512437812e-07,
"loss": 1.1495,
"step": 11000
},
{
"epoch": 8.58,
"learning_rate": 1.5754560530679933e-07,
"loss": 1.1479,
"step": 11500
},
{
"epoch": 8.96,
"learning_rate": 1.1608623548922056e-07,
"loss": 1.1503,
"step": 12000
},
{
"epoch": 9.33,
"learning_rate": 7.462686567164178e-08,
"loss": 1.1471,
"step": 12500
},
{
"epoch": 9.7,
"learning_rate": 3.316749585406302e-08,
"loss": 1.1464,
"step": 13000
},
{
"epoch": 10.0,
"step": 13400,
"total_flos": 1.306695716831232e+17,
"train_loss": 1.2545250599419893,
"train_runtime": 11484.6731,
"train_samples_per_second": 37.32,
"train_steps_per_second": 1.167
}
],
"max_steps": 13400,
"num_train_epochs": 10,
"total_flos": 1.306695716831232e+17,
"trial_name": null,
"trial_params": null
}