|
{ |
|
"best_metric": 0.8084862385321101, |
|
"best_model_checkpoint": "tiny-bert-sst2/run-2/checkpoint-1000", |
|
"epoch": 1.8975332068311195, |
|
"eval_steps": 100, |
|
"global_step": 1000, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.18975332068311196, |
|
"grad_norm": 1.0882924795150757, |
|
"learning_rate": 4.232847990599429e-05, |
|
"loss": 1.5987, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.18975332068311196, |
|
"eval_accuracy": 0.6811926605504587, |
|
"eval_loss": 1.443600058555603, |
|
"eval_runtime": 2.5162, |
|
"eval_samples_per_second": 346.555, |
|
"eval_steps_per_second": 2.782, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.3795066413662239, |
|
"grad_norm": 2.405395984649658, |
|
"learning_rate": 4.145818368618804e-05, |
|
"loss": 1.3874, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.3795066413662239, |
|
"eval_accuracy": 0.7545871559633027, |
|
"eval_loss": 1.168924331665039, |
|
"eval_runtime": 2.4154, |
|
"eval_samples_per_second": 361.016, |
|
"eval_steps_per_second": 2.898, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.5692599620493358, |
|
"grad_norm": 3.982295274734497, |
|
"learning_rate": 3.9736321558974306e-05, |
|
"loss": 1.1384, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.5692599620493358, |
|
"eval_accuracy": 0.7855504587155964, |
|
"eval_loss": 1.0294885635375977, |
|
"eval_runtime": 2.4987, |
|
"eval_samples_per_second": 348.984, |
|
"eval_steps_per_second": 2.801, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.7590132827324478, |
|
"grad_norm": 6.110784530639648, |
|
"learning_rate": 3.723532957026996e-05, |
|
"loss": 1.0003, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.7590132827324478, |
|
"eval_accuracy": 0.7912844036697247, |
|
"eval_loss": 0.9760427474975586, |
|
"eval_runtime": 2.4446, |
|
"eval_samples_per_second": 356.7, |
|
"eval_steps_per_second": 2.863, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.9487666034155597, |
|
"grad_norm": 3.9430387020111084, |
|
"learning_rate": 3.4060420540351555e-05, |
|
"loss": 0.9476, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.9487666034155597, |
|
"eval_accuracy": 0.7912844036697247, |
|
"eval_loss": 0.9497756361961365, |
|
"eval_runtime": 2.4069, |
|
"eval_samples_per_second": 362.295, |
|
"eval_steps_per_second": 2.908, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.1385199240986716, |
|
"grad_norm": 5.821563720703125, |
|
"learning_rate": 3.0345157925106953e-05, |
|
"loss": 0.8797, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.1385199240986716, |
|
"eval_accuracy": 0.7958715596330275, |
|
"eval_loss": 0.9309384226799011, |
|
"eval_runtime": 2.4507, |
|
"eval_samples_per_second": 355.815, |
|
"eval_steps_per_second": 2.856, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.3282732447817835, |
|
"grad_norm": 6.776093482971191, |
|
"learning_rate": 2.6245837010400067e-05, |
|
"loss": 0.807, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.3282732447817835, |
|
"eval_accuracy": 0.7958715596330275, |
|
"eval_loss": 0.9114881157875061, |
|
"eval_runtime": 2.4342, |
|
"eval_samples_per_second": 358.23, |
|
"eval_steps_per_second": 2.876, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.5180265654648957, |
|
"grad_norm": 8.255866050720215, |
|
"learning_rate": 2.1934909813911445e-05, |
|
"loss": 0.7822, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.5180265654648957, |
|
"eval_accuracy": 0.805045871559633, |
|
"eval_loss": 0.886121392250061, |
|
"eval_runtime": 2.4748, |
|
"eval_samples_per_second": 352.348, |
|
"eval_steps_per_second": 2.828, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.7077798861480076, |
|
"grad_norm": 10.156214714050293, |
|
"learning_rate": 1.759373029854779e-05, |
|
"loss": 0.7519, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.7077798861480076, |
|
"eval_accuracy": 0.8084862385321101, |
|
"eval_loss": 0.8783713579177856, |
|
"eval_runtime": 2.4392, |
|
"eval_samples_per_second": 357.494, |
|
"eval_steps_per_second": 2.87, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.8975332068311195, |
|
"grad_norm": 3.3725104331970215, |
|
"learning_rate": 1.3404925094942501e-05, |
|
"loss": 0.7476, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.8975332068311195, |
|
"eval_accuracy": 0.8084862385321101, |
|
"eval_loss": 0.8644981980323792, |
|
"eval_runtime": 2.4269, |
|
"eval_samples_per_second": 359.301, |
|
"eval_steps_per_second": 2.884, |
|
"step": 1000 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 1581, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 15348240276300.0, |
|
"train_batch_size": 128, |
|
"trial_name": null, |
|
"trial_params": { |
|
"alpha": 0.7521996188553136, |
|
"learning_rate": 4.243157651358427e-05, |
|
"num_train_epochs": 3, |
|
"temperature": 6, |
|
"warmup_ratio": 0.03249388147757522 |
|
} |
|
} |
|
|