philip-hightech's picture
Training in progress, step 250, checkpoint
5c19eba verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.020415662896574253,
"eval_steps": 63,
"global_step": 250,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 8.166265158629701e-05,
"eval_loss": 10.840862274169922,
"eval_runtime": 7.8991,
"eval_samples_per_second": 326.366,
"eval_steps_per_second": 163.183,
"step": 1
},
{
"epoch": 0.0008166265158629701,
"grad_norm": 0.3761455714702606,
"learning_rate": 0.00019979453927503364,
"loss": 10.8359,
"step": 10
},
{
"epoch": 0.0016332530317259401,
"grad_norm": 0.394729346036911,
"learning_rate": 0.00019815591569910654,
"loss": 10.815,
"step": 20
},
{
"epoch": 0.0024498795475889103,
"grad_norm": 0.30855050683021545,
"learning_rate": 0.00019490557470106686,
"loss": 10.7786,
"step": 30
},
{
"epoch": 0.0032665060634518802,
"grad_norm": 0.42155948281288147,
"learning_rate": 0.0001900968867902419,
"loss": 10.7552,
"step": 40
},
{
"epoch": 0.00408313257931485,
"grad_norm": 0.2492990493774414,
"learning_rate": 0.00018380881048918405,
"loss": 10.7528,
"step": 50
},
{
"epoch": 0.0048997590951778205,
"grad_norm": 0.2550627887248993,
"learning_rate": 0.00017614459583691346,
"loss": 10.7484,
"step": 60
},
{
"epoch": 0.005144747049936712,
"eval_loss": 10.749500274658203,
"eval_runtime": 7.3007,
"eval_samples_per_second": 353.116,
"eval_steps_per_second": 176.558,
"step": 63
},
{
"epoch": 0.005716385611040791,
"grad_norm": 0.2560204565525055,
"learning_rate": 0.0001672300890261317,
"loss": 10.7501,
"step": 70
},
{
"epoch": 0.0065330121269037604,
"grad_norm": 0.189836785197258,
"learning_rate": 0.00015721166601221698,
"loss": 10.7455,
"step": 80
},
{
"epoch": 0.007349638642766731,
"grad_norm": 0.23742568492889404,
"learning_rate": 0.00014625382902408356,
"loss": 10.7458,
"step": 90
},
{
"epoch": 0.0081662651586297,
"grad_norm": 0.2744472026824951,
"learning_rate": 0.00013453650544213076,
"loss": 10.7482,
"step": 100
},
{
"epoch": 0.008982891674492672,
"grad_norm": 0.3114999532699585,
"learning_rate": 0.00012225209339563145,
"loss": 10.7417,
"step": 110
},
{
"epoch": 0.009799518190355641,
"grad_norm": 0.24193626642227173,
"learning_rate": 0.00010960230259076818,
"loss": 10.7392,
"step": 120
},
{
"epoch": 0.010289494099873423,
"eval_loss": 10.737412452697754,
"eval_runtime": 7.1792,
"eval_samples_per_second": 359.091,
"eval_steps_per_second": 179.545,
"step": 126
},
{
"epoch": 0.01061614470621861,
"grad_norm": 0.310713529586792,
"learning_rate": 9.679484224283449e-05,
"loss": 10.7422,
"step": 130
},
{
"epoch": 0.011432771222081582,
"grad_norm": 0.2092239260673523,
"learning_rate": 8.404001049666211e-05,
"loss": 10.7337,
"step": 140
},
{
"epoch": 0.012249397737944551,
"grad_norm": 0.19805459678173065,
"learning_rate": 7.154724133689677e-05,
"loss": 10.7418,
"step": 150
},
{
"epoch": 0.013066024253807521,
"grad_norm": 0.179554283618927,
"learning_rate": 5.952166568776062e-05,
"loss": 10.7348,
"step": 160
},
{
"epoch": 0.01388265076967049,
"grad_norm": 0.22178767621517181,
"learning_rate": 4.8160743168947496e-05,
"loss": 10.7329,
"step": 170
},
{
"epoch": 0.014699277285533462,
"grad_norm": 0.23290759325027466,
"learning_rate": 3.7651019814126654e-05,
"loss": 10.7324,
"step": 180
},
{
"epoch": 0.015434241149810134,
"eval_loss": 10.731443405151367,
"eval_runtime": 7.1741,
"eval_samples_per_second": 359.346,
"eval_steps_per_second": 179.673,
"step": 189
},
{
"epoch": 0.015515903801396431,
"grad_norm": 0.2047327607870102,
"learning_rate": 2.8165064990227252e-05,
"loss": 10.7352,
"step": 190
},
{
"epoch": 0.0163325303172594,
"grad_norm": 0.20196911692619324,
"learning_rate": 1.985863781320435e-05,
"loss": 10.7285,
"step": 200
},
{
"epoch": 0.017149156833122372,
"grad_norm": 0.21181835234165192,
"learning_rate": 1.286812958766106e-05,
"loss": 10.7256,
"step": 210
},
{
"epoch": 0.017965783348985343,
"grad_norm": 0.23975443840026855,
"learning_rate": 7.308324265397836e-06,
"loss": 10.7394,
"step": 220
},
{
"epoch": 0.01878240986484831,
"grad_norm": 0.21740056574344635,
"learning_rate": 3.270513696097055e-06,
"loss": 10.7338,
"step": 230
},
{
"epoch": 0.019599036380711282,
"grad_norm": 0.2698234021663666,
"learning_rate": 8.209986176753948e-07,
"loss": 10.7245,
"step": 240
},
{
"epoch": 0.020415662896574253,
"grad_norm": 0.2873691916465759,
"learning_rate": 0.0,
"loss": 10.729,
"step": 250
}
],
"logging_steps": 10,
"max_steps": 250,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 63,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 16031529566208.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}