Mel-Iza0's picture
Training in progress, epoch 4, checkpoint
bc3f42b
{
"best_metric": 0.8322489857673645,
"best_model_checkpoint": "./Zeroshot/01-12-23-mistralai-Mistral-7B-v0.1_multilang-dataset-3.0.3-portuguese-2_epochs-10_batch_3/checkpoints/checkpoint-13546",
"epoch": 4.0,
"eval_steps": 500,
"global_step": 13546,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.15,
"learning_rate": 5.835794447725931e-05,
"loss": 1.4468,
"step": 500
},
{
"epoch": 0.3,
"learning_rate": 0.00011742468989958655,
"loss": 0.9754,
"step": 1000
},
{
"epoch": 0.44,
"learning_rate": 0.00017649143532191377,
"loss": 0.9429,
"step": 1500
},
{
"epoch": 0.59,
"learning_rate": 0.000235558180744241,
"loss": 0.9147,
"step": 2000
},
{
"epoch": 0.74,
"learning_rate": 0.00029462492616656825,
"loss": 0.9067,
"step": 2500
},
{
"epoch": 0.89,
"learning_rate": 0.00035369167158889544,
"loss": 0.8978,
"step": 3000
},
{
"epoch": 1.0,
"eval_loss": 0.8805813789367676,
"eval_runtime": 88.2945,
"eval_samples_per_second": 17.057,
"eval_steps_per_second": 2.141,
"step": 3386
},
{
"epoch": 1.03,
"learning_rate": 0.00039998760393503537,
"loss": 0.895,
"step": 3500
},
{
"epoch": 1.18,
"learning_rate": 0.0003996072594095129,
"loss": 0.8687,
"step": 4000
},
{
"epoch": 1.33,
"learning_rate": 0.00039869668890858337,
"loss": 0.8884,
"step": 4500
},
{
"epoch": 1.48,
"learning_rate": 0.00039725831122269285,
"loss": 0.8715,
"step": 5000
},
{
"epoch": 1.62,
"learning_rate": 0.00039529594718087214,
"loss": 0.8645,
"step": 5500
},
{
"epoch": 1.77,
"learning_rate": 0.0003928148095012922,
"loss": 0.8666,
"step": 6000
},
{
"epoch": 1.92,
"learning_rate": 0.0003898214889444803,
"loss": 0.8719,
"step": 6500
},
{
"epoch": 2.0,
"eval_loss": 0.8552550673484802,
"eval_runtime": 88.3232,
"eval_samples_per_second": 17.051,
"eval_steps_per_second": 2.14,
"step": 6773
},
{
"epoch": 2.07,
"learning_rate": 0.00038632393680597854,
"loss": 0.8438,
"step": 7000
},
{
"epoch": 2.21,
"learning_rate": 0.0003823314437949511,
"loss": 0.8308,
"step": 7500
},
{
"epoch": 2.36,
"learning_rate": 0.00037785461535484375,
"loss": 0.8259,
"step": 8000
},
{
"epoch": 2.51,
"learning_rate": 0.0003729053434916558,
"loss": 0.8324,
"step": 8500
},
{
"epoch": 2.66,
"learning_rate": 0.0003674967751846552,
"loss": 0.8413,
"step": 9000
},
{
"epoch": 2.81,
"learning_rate": 0.0003616554183563445,
"loss": 0.8322,
"step": 9500
},
{
"epoch": 2.95,
"learning_rate": 0.00035537338261496887,
"loss": 0.8368,
"step": 10000
},
{
"epoch": 3.0,
"eval_loss": 0.8443310260772705,
"eval_runtime": 88.3102,
"eval_samples_per_second": 17.054,
"eval_steps_per_second": 2.14,
"step": 10159
},
{
"epoch": 3.1,
"learning_rate": 0.0003486786213865893,
"loss": 0.8088,
"step": 10500
},
{
"epoch": 3.25,
"learning_rate": 0.0003415889182744321,
"loss": 0.8003,
"step": 11000
},
{
"epoch": 3.4,
"learning_rate": 0.0003341231059840768,
"loss": 0.805,
"step": 11500
},
{
"epoch": 3.54,
"learning_rate": 0.0003263010162972709,
"loss": 0.8061,
"step": 12000
},
{
"epoch": 3.69,
"learning_rate": 0.00031814342739185336,
"loss": 0.8008,
"step": 12500
},
{
"epoch": 3.84,
"learning_rate": 0.000309672008647721,
"loss": 0.8029,
"step": 13000
},
{
"epoch": 3.99,
"learning_rate": 0.00030090926308545536,
"loss": 0.8056,
"step": 13500
},
{
"epoch": 4.0,
"eval_loss": 0.8322489857673645,
"eval_runtime": 88.3294,
"eval_samples_per_second": 17.05,
"eval_steps_per_second": 2.14,
"step": 13546
}
],
"logging_steps": 500,
"max_steps": 33860,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 6.802174422960538e+17,
"trial_name": null,
"trial_params": null
}