|
{ |
|
"best_metric": 1.991615891456604, |
|
"best_model_checkpoint": "./outputs/checkpoint-4000", |
|
"epoch": 2.9143897996357016, |
|
"eval_steps": 100, |
|
"global_step": 4000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0002, |
|
"loss": 2.7401, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_loss": 2.6427574157714844, |
|
"eval_runtime": 205.0145, |
|
"eval_samples_per_second": 30.603, |
|
"eval_steps_per_second": 3.829, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0002, |
|
"loss": 2.6057, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"eval_loss": 2.591986656188965, |
|
"eval_runtime": 205.1897, |
|
"eval_samples_per_second": 30.577, |
|
"eval_steps_per_second": 3.826, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5624, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_loss": 2.5554449558258057, |
|
"eval_runtime": 204.8733, |
|
"eval_samples_per_second": 30.624, |
|
"eval_steps_per_second": 3.832, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5354, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_loss": 2.523040533065796, |
|
"eval_runtime": 205.0355, |
|
"eval_samples_per_second": 30.6, |
|
"eval_steps_per_second": 3.829, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4938, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_loss": 2.497437000274658, |
|
"eval_runtime": 204.8026, |
|
"eval_samples_per_second": 30.634, |
|
"eval_steps_per_second": 3.833, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4668, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_loss": 2.4717156887054443, |
|
"eval_runtime": 204.8144, |
|
"eval_samples_per_second": 30.633, |
|
"eval_steps_per_second": 3.833, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4508, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_loss": 2.449354410171509, |
|
"eval_runtime": 204.5211, |
|
"eval_samples_per_second": 30.677, |
|
"eval_steps_per_second": 3.838, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4278, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"eval_loss": 2.4278199672698975, |
|
"eval_runtime": 204.6341, |
|
"eval_samples_per_second": 30.66, |
|
"eval_steps_per_second": 3.836, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3928, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_loss": 2.4054911136627197, |
|
"eval_runtime": 204.5808, |
|
"eval_samples_per_second": 30.668, |
|
"eval_steps_per_second": 3.837, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0002, |
|
"loss": 2.389, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_loss": 2.3888440132141113, |
|
"eval_runtime": 204.475, |
|
"eval_samples_per_second": 30.683, |
|
"eval_steps_per_second": 3.839, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3831, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_loss": 2.366816997528076, |
|
"eval_runtime": 204.7174, |
|
"eval_samples_per_second": 30.647, |
|
"eval_steps_per_second": 3.835, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002, |
|
"loss": 2.338, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"eval_loss": 2.348148822784424, |
|
"eval_runtime": 204.9159, |
|
"eval_samples_per_second": 30.617, |
|
"eval_steps_per_second": 3.831, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3495, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"eval_loss": 2.331716775894165, |
|
"eval_runtime": 204.6122, |
|
"eval_samples_per_second": 30.663, |
|
"eval_steps_per_second": 3.837, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3005, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_loss": 2.3074026107788086, |
|
"eval_runtime": 204.9029, |
|
"eval_samples_per_second": 30.619, |
|
"eval_steps_per_second": 3.831, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2609, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"eval_loss": 2.2931342124938965, |
|
"eval_runtime": 205.4253, |
|
"eval_samples_per_second": 30.542, |
|
"eval_steps_per_second": 3.821, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2532, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"eval_loss": 2.2731645107269287, |
|
"eval_runtime": 205.0407, |
|
"eval_samples_per_second": 30.599, |
|
"eval_steps_per_second": 3.829, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2527, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"eval_loss": 2.25679087638855, |
|
"eval_runtime": 205.2907, |
|
"eval_samples_per_second": 30.562, |
|
"eval_steps_per_second": 3.824, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2141, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"eval_loss": 2.2405381202697754, |
|
"eval_runtime": 205.0141, |
|
"eval_samples_per_second": 30.603, |
|
"eval_steps_per_second": 3.829, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"eval_loss": 2.2261803150177, |
|
"eval_runtime": 205.38, |
|
"eval_samples_per_second": 30.548, |
|
"eval_steps_per_second": 3.822, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1926, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"eval_loss": 2.211982011795044, |
|
"eval_runtime": 205.3797, |
|
"eval_samples_per_second": 30.548, |
|
"eval_steps_per_second": 3.822, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.0002, |
|
"loss": 2.177, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"eval_loss": 2.1988422870635986, |
|
"eval_runtime": 205.5358, |
|
"eval_samples_per_second": 30.525, |
|
"eval_steps_per_second": 3.819, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1604, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"eval_loss": 2.1834826469421387, |
|
"eval_runtime": 205.7043, |
|
"eval_samples_per_second": 30.5, |
|
"eval_steps_per_second": 3.816, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1481, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"eval_loss": 2.1711061000823975, |
|
"eval_runtime": 205.9233, |
|
"eval_samples_per_second": 30.468, |
|
"eval_steps_per_second": 3.812, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1557, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"eval_loss": 2.1574361324310303, |
|
"eval_runtime": 205.5846, |
|
"eval_samples_per_second": 30.518, |
|
"eval_steps_per_second": 3.818, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1231, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"eval_loss": 2.1463205814361572, |
|
"eval_runtime": 205.712, |
|
"eval_samples_per_second": 30.499, |
|
"eval_steps_per_second": 3.816, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1232, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"eval_loss": 2.1318325996398926, |
|
"eval_runtime": 205.4314, |
|
"eval_samples_per_second": 30.541, |
|
"eval_steps_per_second": 3.821, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1077, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"eval_loss": 2.1178267002105713, |
|
"eval_runtime": 205.6052, |
|
"eval_samples_per_second": 30.515, |
|
"eval_steps_per_second": 3.818, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.0002, |
|
"loss": 2.064, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"eval_loss": 2.110224485397339, |
|
"eval_runtime": 206.36, |
|
"eval_samples_per_second": 30.403, |
|
"eval_steps_per_second": 3.804, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 0.0002, |
|
"loss": 2.037, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"eval_loss": 2.0975825786590576, |
|
"eval_runtime": 206.0587, |
|
"eval_samples_per_second": 30.448, |
|
"eval_steps_per_second": 3.81, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0582, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"eval_loss": 2.089369297027588, |
|
"eval_runtime": 206.2047, |
|
"eval_samples_per_second": 30.426, |
|
"eval_steps_per_second": 3.807, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0245, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"eval_loss": 2.076017379760742, |
|
"eval_runtime": 206.0223, |
|
"eval_samples_per_second": 30.453, |
|
"eval_steps_per_second": 3.81, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0405, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"eval_loss": 2.0672309398651123, |
|
"eval_runtime": 206.3429, |
|
"eval_samples_per_second": 30.406, |
|
"eval_steps_per_second": 3.804, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0093, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"eval_loss": 2.05438232421875, |
|
"eval_runtime": 205.9025, |
|
"eval_samples_per_second": 30.471, |
|
"eval_steps_per_second": 3.812, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0032, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"eval_loss": 2.0523083209991455, |
|
"eval_runtime": 205.5591, |
|
"eval_samples_per_second": 30.522, |
|
"eval_steps_per_second": 3.819, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 0.0002, |
|
"loss": 2.001, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"eval_loss": 2.0382425785064697, |
|
"eval_runtime": 205.9189, |
|
"eval_samples_per_second": 30.468, |
|
"eval_steps_per_second": 3.812, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9958, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"eval_loss": 2.029693365097046, |
|
"eval_runtime": 205.9884, |
|
"eval_samples_per_second": 30.458, |
|
"eval_steps_per_second": 3.811, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9759, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"eval_loss": 2.0174672603607178, |
|
"eval_runtime": 206.1463, |
|
"eval_samples_per_second": 30.435, |
|
"eval_steps_per_second": 3.808, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9684, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"eval_loss": 2.0112404823303223, |
|
"eval_runtime": 205.7298, |
|
"eval_samples_per_second": 30.496, |
|
"eval_steps_per_second": 3.816, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9544, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"eval_loss": 1.9985524415969849, |
|
"eval_runtime": 205.5555, |
|
"eval_samples_per_second": 30.522, |
|
"eval_steps_per_second": 3.819, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9554, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"eval_loss": 1.991615891456604, |
|
"eval_runtime": 206.0675, |
|
"eval_samples_per_second": 30.446, |
|
"eval_steps_per_second": 3.809, |
|
"step": 4000 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 4116, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"total_flos": 1.1667957018181632e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|