|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.144082332761578, |
|
"eval_steps": 500, |
|
"global_step": 10000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.951270856073601e-05, |
|
"loss": 1.4381, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.902541712147201e-05, |
|
"loss": 1.3041, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.853812568220802e-05, |
|
"loss": 1.2816, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 4.805083424294402e-05, |
|
"loss": 1.2974, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 4.756354280368003e-05, |
|
"loss": 1.2876, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 4.707625136441603e-05, |
|
"loss": 1.3029, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 4.6588959925152037e-05, |
|
"loss": 1.301, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 4.610166848588804e-05, |
|
"loss": 1.3153, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 4.5614377046624046e-05, |
|
"loss": 1.2989, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 4.512708560736005e-05, |
|
"loss": 1.1589, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 4.4639794168096056e-05, |
|
"loss": 1.1053, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 4.415250272883206e-05, |
|
"loss": 1.1314, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 4.3665211289568066e-05, |
|
"loss": 1.1446, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 4.317791985030407e-05, |
|
"loss": 1.1701, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 4.269062841104008e-05, |
|
"loss": 1.1702, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 4.220333697177608e-05, |
|
"loss": 1.1764, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 4.1716045532512085e-05, |
|
"loss": 1.1933, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 4.122875409324809e-05, |
|
"loss": 1.1964, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 4.0741462653984095e-05, |
|
"loss": 1.1274, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 4.025417121472011e-05, |
|
"loss": 1.0048, |
|
"step": 10000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 51304, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 11, |
|
"save_steps": 10000, |
|
"total_flos": 2.0901272223744e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|