|
{ |
|
"best_metric": 0.3348920941352844, |
|
"best_model_checkpoint": "./vit5_qqp/checkpoint-863", |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 2589, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5793742757821553, |
|
"grad_norm": 2.493626356124878, |
|
"learning_rate": 4.034376207029741e-05, |
|
"loss": 0.4544, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.3348920941352844, |
|
"eval_runtime": 116.9015, |
|
"eval_samples_per_second": 184.446, |
|
"eval_steps_per_second": 4.619, |
|
"step": 863 |
|
}, |
|
{ |
|
"epoch": 1.1587485515643106, |
|
"grad_norm": 0.8472898006439209, |
|
"learning_rate": 3.0687524140594824e-05, |
|
"loss": 0.2056, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.7381228273464657, |
|
"grad_norm": 0.994403600692749, |
|
"learning_rate": 2.103128621089224e-05, |
|
"loss": 0.155, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.3606082797050476, |
|
"eval_runtime": 116.9754, |
|
"eval_samples_per_second": 184.329, |
|
"eval_steps_per_second": 4.616, |
|
"step": 1726 |
|
}, |
|
{ |
|
"epoch": 2.317497103128621, |
|
"grad_norm": 0.6376566886901855, |
|
"learning_rate": 1.1375048281189648e-05, |
|
"loss": 0.125, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.8968713789107765, |
|
"grad_norm": 0.6937958598136902, |
|
"learning_rate": 1.7188103514870608e-06, |
|
"loss": 0.1083, |
|
"step": 2500 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 2589, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8763161692078080.0, |
|
"train_batch_size": 40, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|