|
{ |
|
"best_metric": 0.8972761034965515, |
|
"best_model_checkpoint": "data/Llama-31-8B_task-1_60-samples_config-1_full/checkpoint-51", |
|
"epoch": 16.0, |
|
"eval_steps": 500, |
|
"global_step": 92, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.17391304347826086, |
|
"grad_norm": 2.0918707847595215, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 2.45, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.34782608695652173, |
|
"grad_norm": 2.04691481590271, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 2.5615, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.6956521739130435, |
|
"grad_norm": 1.5113601684570312, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 2.5176, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.8695652173913043, |
|
"eval_loss": 2.328240156173706, |
|
"eval_runtime": 4.8422, |
|
"eval_samples_per_second": 2.478, |
|
"eval_steps_per_second": 2.478, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 1.0434782608695652, |
|
"grad_norm": 1.5090464353561401, |
|
"learning_rate": 2.4e-05, |
|
"loss": 2.4243, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 1.391304347826087, |
|
"grad_norm": 1.785285472869873, |
|
"learning_rate": 3.2000000000000005e-05, |
|
"loss": 2.3679, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 1.7391304347826086, |
|
"grad_norm": 1.2606208324432373, |
|
"learning_rate": 4e-05, |
|
"loss": 2.1942, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.9130434782608696, |
|
"eval_loss": 1.9932295083999634, |
|
"eval_runtime": 4.8372, |
|
"eval_samples_per_second": 2.481, |
|
"eval_steps_per_second": 2.481, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 2.0869565217391304, |
|
"grad_norm": 1.6891443729400635, |
|
"learning_rate": 4.8e-05, |
|
"loss": 2.1333, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 2.4347826086956523, |
|
"grad_norm": 0.9376276135444641, |
|
"learning_rate": 5.6000000000000006e-05, |
|
"loss": 1.9619, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 2.782608695652174, |
|
"grad_norm": 0.8051391243934631, |
|
"learning_rate": 6.400000000000001e-05, |
|
"loss": 1.8164, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 2.9565217391304346, |
|
"eval_loss": 1.623587727546692, |
|
"eval_runtime": 4.8368, |
|
"eval_samples_per_second": 2.481, |
|
"eval_steps_per_second": 2.481, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 3.130434782608696, |
|
"grad_norm": 0.9244698286056519, |
|
"learning_rate": 7.2e-05, |
|
"loss": 1.7283, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 3.4782608695652173, |
|
"grad_norm": 1.1509673595428467, |
|
"learning_rate": 8e-05, |
|
"loss": 1.5099, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 3.8260869565217392, |
|
"grad_norm": 1.1495587825775146, |
|
"learning_rate": 8.800000000000001e-05, |
|
"loss": 1.3441, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 1.1448259353637695, |
|
"eval_runtime": 4.8374, |
|
"eval_samples_per_second": 2.481, |
|
"eval_steps_per_second": 2.481, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 4.173913043478261, |
|
"grad_norm": 0.7694088220596313, |
|
"learning_rate": 9.6e-05, |
|
"loss": 1.1809, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 4.521739130434782, |
|
"grad_norm": 0.5819656252861023, |
|
"learning_rate": 9.999512620046522e-05, |
|
"loss": 1.1128, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 4.869565217391305, |
|
"grad_norm": 0.5470612049102783, |
|
"learning_rate": 9.995614150494293e-05, |
|
"loss": 0.987, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 4.869565217391305, |
|
"eval_loss": 1.0040026903152466, |
|
"eval_runtime": 4.8493, |
|
"eval_samples_per_second": 2.475, |
|
"eval_steps_per_second": 2.475, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 5.217391304347826, |
|
"grad_norm": 0.5045129060745239, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 0.9648, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 5.565217391304348, |
|
"grad_norm": 0.48170214891433716, |
|
"learning_rate": 9.976136999909156e-05, |
|
"loss": 0.9783, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 5.913043478260869, |
|
"grad_norm": 0.46512943506240845, |
|
"learning_rate": 9.96057350657239e-05, |
|
"loss": 0.9101, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 5.913043478260869, |
|
"eval_loss": 0.9507834911346436, |
|
"eval_runtime": 4.8456, |
|
"eval_samples_per_second": 2.476, |
|
"eval_steps_per_second": 2.476, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 6.260869565217392, |
|
"grad_norm": 0.43386533856391907, |
|
"learning_rate": 9.941141907232765e-05, |
|
"loss": 0.8386, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 6.608695652173913, |
|
"grad_norm": 0.49891090393066406, |
|
"learning_rate": 9.917857354066931e-05, |
|
"loss": 0.8758, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 6.956521739130435, |
|
"grad_norm": 0.4979923963546753, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 0.8517, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 6.956521739130435, |
|
"eval_loss": 0.9197065234184265, |
|
"eval_runtime": 4.8505, |
|
"eval_samples_per_second": 2.474, |
|
"eval_steps_per_second": 2.474, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 7.304347826086957, |
|
"grad_norm": 0.5483697652816772, |
|
"learning_rate": 9.859805002892732e-05, |
|
"loss": 0.8447, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 7.6521739130434785, |
|
"grad_norm": 0.4813004732131958, |
|
"learning_rate": 9.825082472361557e-05, |
|
"loss": 0.7798, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 0.6186925172805786, |
|
"learning_rate": 9.786597487660337e-05, |
|
"loss": 0.7732, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 0.8985572457313538, |
|
"eval_runtime": 4.8425, |
|
"eval_samples_per_second": 2.478, |
|
"eval_steps_per_second": 2.478, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 8.347826086956522, |
|
"grad_norm": 0.621152937412262, |
|
"learning_rate": 9.744380058222483e-05, |
|
"loss": 0.7231, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 8.695652173913043, |
|
"grad_norm": 0.7259064316749573, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.7365, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 8.869565217391305, |
|
"eval_loss": 0.8972761034965515, |
|
"eval_runtime": 4.8455, |
|
"eval_samples_per_second": 2.477, |
|
"eval_steps_per_second": 2.477, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 9.043478260869565, |
|
"grad_norm": 0.8167423009872437, |
|
"learning_rate": 9.648882429441257e-05, |
|
"loss": 0.6953, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 9.391304347826088, |
|
"grad_norm": 0.9113949537277222, |
|
"learning_rate": 9.595676696276172e-05, |
|
"loss": 0.6369, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 9.73913043478261, |
|
"grad_norm": 0.8619420528411865, |
|
"learning_rate": 9.538887392664544e-05, |
|
"loss": 0.6133, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 9.91304347826087, |
|
"eval_loss": 0.9108675122261047, |
|
"eval_runtime": 4.8391, |
|
"eval_samples_per_second": 2.48, |
|
"eval_steps_per_second": 2.48, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 10.08695652173913, |
|
"grad_norm": 0.7065742015838623, |
|
"learning_rate": 9.478558801197065e-05, |
|
"loss": 0.6317, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 10.434782608695652, |
|
"grad_norm": 0.6940516233444214, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 0.5093, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 10.782608695652174, |
|
"grad_norm": 0.8662077188491821, |
|
"learning_rate": 9.347474647526095e-05, |
|
"loss": 0.5483, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 10.956521739130435, |
|
"eval_loss": 0.9299518465995789, |
|
"eval_runtime": 4.8371, |
|
"eval_samples_per_second": 2.481, |
|
"eval_steps_per_second": 2.481, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 11.130434782608695, |
|
"grad_norm": 0.7917555570602417, |
|
"learning_rate": 9.276821300802534e-05, |
|
"loss": 0.5342, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 11.478260869565217, |
|
"grad_norm": 1.0706347227096558, |
|
"learning_rate": 9.202833017478422e-05, |
|
"loss": 0.4226, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 11.826086956521738, |
|
"grad_norm": 0.8940456509590149, |
|
"learning_rate": 9.125567491391476e-05, |
|
"loss": 0.4109, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_loss": 0.9910491108894348, |
|
"eval_runtime": 4.8689, |
|
"eval_samples_per_second": 2.465, |
|
"eval_steps_per_second": 2.465, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 12.173913043478262, |
|
"grad_norm": 0.8844466805458069, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.3587, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 12.521739130434783, |
|
"grad_norm": 1.2623813152313232, |
|
"learning_rate": 8.961448216775954e-05, |
|
"loss": 0.3159, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 12.869565217391305, |
|
"grad_norm": 1.2042642831802368, |
|
"learning_rate": 8.874722443520899e-05, |
|
"loss": 0.285, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 12.869565217391305, |
|
"eval_loss": 1.0814924240112305, |
|
"eval_runtime": 4.8399, |
|
"eval_samples_per_second": 2.479, |
|
"eval_steps_per_second": 2.479, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 13.217391304347826, |
|
"grad_norm": 1.173231601715088, |
|
"learning_rate": 8.784975278258783e-05, |
|
"loss": 0.2746, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 13.565217391304348, |
|
"grad_norm": 1.5600581169128418, |
|
"learning_rate": 8.692276703129421e-05, |
|
"loss": 0.1938, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 13.91304347826087, |
|
"grad_norm": 1.2864255905151367, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 0.2088, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 13.91304347826087, |
|
"eval_loss": 1.233132004737854, |
|
"eval_runtime": 4.841, |
|
"eval_samples_per_second": 2.479, |
|
"eval_steps_per_second": 2.479, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 14.26086956521739, |
|
"grad_norm": 1.078246831893921, |
|
"learning_rate": 8.498316702566828e-05, |
|
"loss": 0.1525, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 14.608695652173914, |
|
"grad_norm": 0.9325866103172302, |
|
"learning_rate": 8.397206521307584e-05, |
|
"loss": 0.1325, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 14.956521739130435, |
|
"grad_norm": 1.9887562990188599, |
|
"learning_rate": 8.293447300593402e-05, |
|
"loss": 0.1666, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 14.956521739130435, |
|
"eval_loss": 1.4608489274978638, |
|
"eval_runtime": 4.8447, |
|
"eval_samples_per_second": 2.477, |
|
"eval_steps_per_second": 2.477, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 15.304347826086957, |
|
"grad_norm": 0.8603971004486084, |
|
"learning_rate": 8.18711994874345e-05, |
|
"loss": 0.1077, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 15.652173913043478, |
|
"grad_norm": 0.8751214742660522, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 0.0945, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"grad_norm": 0.9891760349273682, |
|
"learning_rate": 7.967094433018508e-05, |
|
"loss": 0.1074, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_loss": 1.5691243410110474, |
|
"eval_runtime": 4.8414, |
|
"eval_samples_per_second": 2.479, |
|
"eval_steps_per_second": 2.479, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"step": 92, |
|
"total_flos": 1.101673640493056e+16, |
|
"train_loss": 0.9230853515150754, |
|
"train_runtime": 865.6365, |
|
"train_samples_per_second": 2.657, |
|
"train_steps_per_second": 0.289 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 250, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 50, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 7, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.101673640493056e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|