|
{ |
|
"best_metric": 2.1463205814361572, |
|
"best_model_checkpoint": "./outputs/checkpoint-2500", |
|
"epoch": 1.8214936247723132, |
|
"eval_steps": 100, |
|
"global_step": 2500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0002, |
|
"loss": 2.7401, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_loss": 2.6427574157714844, |
|
"eval_runtime": 205.0145, |
|
"eval_samples_per_second": 30.603, |
|
"eval_steps_per_second": 3.829, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0002, |
|
"loss": 2.6057, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"eval_loss": 2.591986656188965, |
|
"eval_runtime": 205.1897, |
|
"eval_samples_per_second": 30.577, |
|
"eval_steps_per_second": 3.826, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5624, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_loss": 2.5554449558258057, |
|
"eval_runtime": 204.8733, |
|
"eval_samples_per_second": 30.624, |
|
"eval_steps_per_second": 3.832, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5354, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_loss": 2.523040533065796, |
|
"eval_runtime": 205.0355, |
|
"eval_samples_per_second": 30.6, |
|
"eval_steps_per_second": 3.829, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4938, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_loss": 2.497437000274658, |
|
"eval_runtime": 204.8026, |
|
"eval_samples_per_second": 30.634, |
|
"eval_steps_per_second": 3.833, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4668, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_loss": 2.4717156887054443, |
|
"eval_runtime": 204.8144, |
|
"eval_samples_per_second": 30.633, |
|
"eval_steps_per_second": 3.833, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4508, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_loss": 2.449354410171509, |
|
"eval_runtime": 204.5211, |
|
"eval_samples_per_second": 30.677, |
|
"eval_steps_per_second": 3.838, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4278, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"eval_loss": 2.4278199672698975, |
|
"eval_runtime": 204.6341, |
|
"eval_samples_per_second": 30.66, |
|
"eval_steps_per_second": 3.836, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3928, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_loss": 2.4054911136627197, |
|
"eval_runtime": 204.5808, |
|
"eval_samples_per_second": 30.668, |
|
"eval_steps_per_second": 3.837, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0002, |
|
"loss": 2.389, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_loss": 2.3888440132141113, |
|
"eval_runtime": 204.475, |
|
"eval_samples_per_second": 30.683, |
|
"eval_steps_per_second": 3.839, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3831, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_loss": 2.366816997528076, |
|
"eval_runtime": 204.7174, |
|
"eval_samples_per_second": 30.647, |
|
"eval_steps_per_second": 3.835, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002, |
|
"loss": 2.338, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"eval_loss": 2.348148822784424, |
|
"eval_runtime": 204.9159, |
|
"eval_samples_per_second": 30.617, |
|
"eval_steps_per_second": 3.831, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3495, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"eval_loss": 2.331716775894165, |
|
"eval_runtime": 204.6122, |
|
"eval_samples_per_second": 30.663, |
|
"eval_steps_per_second": 3.837, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3005, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_loss": 2.3074026107788086, |
|
"eval_runtime": 204.9029, |
|
"eval_samples_per_second": 30.619, |
|
"eval_steps_per_second": 3.831, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2609, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"eval_loss": 2.2931342124938965, |
|
"eval_runtime": 205.4253, |
|
"eval_samples_per_second": 30.542, |
|
"eval_steps_per_second": 3.821, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2532, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"eval_loss": 2.2731645107269287, |
|
"eval_runtime": 205.0407, |
|
"eval_samples_per_second": 30.599, |
|
"eval_steps_per_second": 3.829, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2527, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"eval_loss": 2.25679087638855, |
|
"eval_runtime": 205.2907, |
|
"eval_samples_per_second": 30.562, |
|
"eval_steps_per_second": 3.824, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2141, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"eval_loss": 2.2405381202697754, |
|
"eval_runtime": 205.0141, |
|
"eval_samples_per_second": 30.603, |
|
"eval_steps_per_second": 3.829, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"eval_loss": 2.2261803150177, |
|
"eval_runtime": 205.38, |
|
"eval_samples_per_second": 30.548, |
|
"eval_steps_per_second": 3.822, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1926, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"eval_loss": 2.211982011795044, |
|
"eval_runtime": 205.3797, |
|
"eval_samples_per_second": 30.548, |
|
"eval_steps_per_second": 3.822, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.0002, |
|
"loss": 2.177, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"eval_loss": 2.1988422870635986, |
|
"eval_runtime": 205.5358, |
|
"eval_samples_per_second": 30.525, |
|
"eval_steps_per_second": 3.819, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1604, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"eval_loss": 2.1834826469421387, |
|
"eval_runtime": 205.7043, |
|
"eval_samples_per_second": 30.5, |
|
"eval_steps_per_second": 3.816, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1481, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"eval_loss": 2.1711061000823975, |
|
"eval_runtime": 205.9233, |
|
"eval_samples_per_second": 30.468, |
|
"eval_steps_per_second": 3.812, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1557, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"eval_loss": 2.1574361324310303, |
|
"eval_runtime": 205.5846, |
|
"eval_samples_per_second": 30.518, |
|
"eval_steps_per_second": 3.818, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1231, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"eval_loss": 2.1463205814361572, |
|
"eval_runtime": 205.712, |
|
"eval_samples_per_second": 30.499, |
|
"eval_steps_per_second": 3.816, |
|
"step": 2500 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 4116, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"total_flos": 7.298808085997568e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|