bloomz-1b1-vn-chat / last-checkpoint /trainer_state.json
Femboyuwu2000's picture
Training in progress, step 360, checkpoint
0c0ebc5 verified
raw
history blame
3.07 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0072,
"eval_steps": 500,
"global_step": 360,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 79.12922668457031,
"learning_rate": 1e-06,
"loss": 4.6771,
"step": 20
},
{
"epoch": 0.0,
"grad_norm": 239.7991943359375,
"learning_rate": 2e-06,
"loss": 4.5023,
"step": 40
},
{
"epoch": 0.0,
"grad_norm": 77.5164794921875,
"learning_rate": 3e-06,
"loss": 4.8171,
"step": 60
},
{
"epoch": 0.0,
"grad_norm": 207.87796020507812,
"learning_rate": 4e-06,
"loss": 4.6765,
"step": 80
},
{
"epoch": 0.0,
"grad_norm": 147.12461853027344,
"learning_rate": 4.9999999999999996e-06,
"loss": 4.7237,
"step": 100
},
{
"epoch": 0.0,
"grad_norm": 115.61023712158203,
"learning_rate": 6e-06,
"loss": 4.4426,
"step": 120
},
{
"epoch": 0.0,
"grad_norm": 43.68627166748047,
"learning_rate": 7e-06,
"loss": 5.0142,
"step": 140
},
{
"epoch": 0.0,
"grad_norm": 37.58155059814453,
"learning_rate": 8e-06,
"loss": 4.8572,
"step": 160
},
{
"epoch": 0.0,
"grad_norm": 86.82076263427734,
"learning_rate": 9e-06,
"loss": 4.6848,
"step": 180
},
{
"epoch": 0.0,
"grad_norm": 289.0603332519531,
"learning_rate": 9.999999999999999e-06,
"loss": 4.6474,
"step": 200
},
{
"epoch": 0.0,
"grad_norm": 69.93185424804688,
"learning_rate": 1.1e-05,
"loss": 4.9724,
"step": 220
},
{
"epoch": 0.0,
"grad_norm": 98.215087890625,
"learning_rate": 1.2e-05,
"loss": 4.4447,
"step": 240
},
{
"epoch": 0.01,
"grad_norm": 92.3516845703125,
"learning_rate": 1.3000000000000001e-05,
"loss": 4.9076,
"step": 260
},
{
"epoch": 0.01,
"grad_norm": 150.6816864013672,
"learning_rate": 1.4e-05,
"loss": 4.8402,
"step": 280
},
{
"epoch": 0.01,
"grad_norm": 162.9401397705078,
"learning_rate": 1.5e-05,
"loss": 4.9279,
"step": 300
},
{
"epoch": 0.01,
"grad_norm": 38.77900695800781,
"learning_rate": 1.6e-05,
"loss": 4.5021,
"step": 320
},
{
"epoch": 0.01,
"grad_norm": 91.0234146118164,
"learning_rate": 1.7e-05,
"loss": 4.5546,
"step": 340
},
{
"epoch": 0.01,
"grad_norm": 78.24981689453125,
"learning_rate": 1.8e-05,
"loss": 4.3795,
"step": 360
}
],
"logging_steps": 20,
"max_steps": 20000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 20,
"total_flos": 127576999796736.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}