|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9941520467836256, |
|
"eval_steps": 500, |
|
"global_step": 576, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05198180636777128, |
|
"grad_norm": 3.0779116014443817, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7363, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.10396361273554255, |
|
"grad_norm": 0.8378025538905853, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6574, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.15594541910331383, |
|
"grad_norm": 1.0326593461431761, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6354, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2079272254710851, |
|
"grad_norm": 0.8798298291096642, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6258, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.2599090318388564, |
|
"grad_norm": 0.7652583221522038, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6081, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.31189083820662766, |
|
"grad_norm": 0.8092758063286689, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5957, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.36387264457439894, |
|
"grad_norm": 0.6377593975168986, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5951, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.4158544509421702, |
|
"grad_norm": 0.8604320761158406, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5853, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.4678362573099415, |
|
"grad_norm": 0.5659478868871771, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5736, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.5198180636777128, |
|
"grad_norm": 0.6666095975840195, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5681, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.571799870045484, |
|
"grad_norm": 0.7108362162785513, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5752, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.6237816764132553, |
|
"grad_norm": 0.5055975881819947, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5661, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.6757634827810266, |
|
"grad_norm": 0.5122594507537827, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5724, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.7277452891487979, |
|
"grad_norm": 0.6645034900787495, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5642, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.7797270955165692, |
|
"grad_norm": 0.4688383993640717, |
|
"learning_rate": 5e-06, |
|
"loss": 0.563, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.8317089018843404, |
|
"grad_norm": 0.5311685960170432, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5636, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.8836907082521117, |
|
"grad_norm": 0.5275216737667017, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5622, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.935672514619883, |
|
"grad_norm": 0.49741573059254773, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5604, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.9876543209876543, |
|
"grad_norm": 0.6752670080708038, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5606, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.9980506822612085, |
|
"eval_loss": 0.5604754090309143, |
|
"eval_runtime": 134.8008, |
|
"eval_samples_per_second": 38.457, |
|
"eval_steps_per_second": 0.601, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 1.0396361273554255, |
|
"grad_norm": 0.4942854982934459, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5581, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.0916179337231968, |
|
"grad_norm": 0.47133396673557604, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5131, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.143599740090968, |
|
"grad_norm": 0.6706210343356649, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5108, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.1955815464587394, |
|
"grad_norm": 0.4990475384686441, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5127, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.2475633528265107, |
|
"grad_norm": 0.6948432011959483, |
|
"learning_rate": 5e-06, |
|
"loss": 0.516, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.299545159194282, |
|
"grad_norm": 0.4617478562618637, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5124, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.3515269655620532, |
|
"grad_norm": 0.40496266862562025, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5169, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.4035087719298245, |
|
"grad_norm": 0.4441934839800834, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5153, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.4554905782975958, |
|
"grad_norm": 0.5551575728453572, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5153, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.507472384665367, |
|
"grad_norm": 0.6109195782274622, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5095, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.5594541910331383, |
|
"grad_norm": 0.501987852874291, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5134, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.6114359974009096, |
|
"grad_norm": 0.42511170270357557, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5165, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.6634178037686809, |
|
"grad_norm": 0.49289651016151736, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5096, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.7153996101364521, |
|
"grad_norm": 0.4308251737050951, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5138, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.7673814165042234, |
|
"grad_norm": 0.5157909639859894, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5171, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.8193632228719947, |
|
"grad_norm": 0.5188996520268809, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5038, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.871345029239766, |
|
"grad_norm": 0.4609663557546815, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5135, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.9233268356075373, |
|
"grad_norm": 0.5130233449439461, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4969, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.9753086419753085, |
|
"grad_norm": 0.47208230393580913, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5116, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.996101364522417, |
|
"eval_loss": 0.5507273077964783, |
|
"eval_runtime": 131.5009, |
|
"eval_samples_per_second": 39.422, |
|
"eval_steps_per_second": 0.616, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 2.02729044834308, |
|
"grad_norm": 0.6127372201164472, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5155, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.079272254710851, |
|
"grad_norm": 0.7411594983025063, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4578, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.1312540610786224, |
|
"grad_norm": 0.7428780446595316, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4704, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 2.1832358674463936, |
|
"grad_norm": 0.5200617783769791, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4637, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.235217673814165, |
|
"grad_norm": 0.6498653019719489, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4622, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 2.287199480181936, |
|
"grad_norm": 0.46276007763796656, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4607, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.3391812865497075, |
|
"grad_norm": 0.5996673377937245, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4622, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.3911630929174787, |
|
"grad_norm": 0.49488878939584074, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4682, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 2.44314489928525, |
|
"grad_norm": 0.4643784900227937, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4594, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 2.4951267056530213, |
|
"grad_norm": 0.44786233784344404, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4675, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 2.5471085120207926, |
|
"grad_norm": 0.6126679422066866, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4659, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 2.599090318388564, |
|
"grad_norm": 0.530097732347591, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4588, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.651072124756335, |
|
"grad_norm": 0.5516810761121376, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4634, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 2.7030539311241064, |
|
"grad_norm": 0.5437842249870648, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4642, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 2.7550357374918777, |
|
"grad_norm": 0.5248669258621385, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4658, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 2.807017543859649, |
|
"grad_norm": 0.4756943098859436, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4712, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 2.8589993502274202, |
|
"grad_norm": 0.5165218818933938, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4672, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 2.9109811565951915, |
|
"grad_norm": 0.5304020568782208, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4682, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 2.962962962962963, |
|
"grad_norm": 0.5954622072015786, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4569, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 2.9941520467836256, |
|
"eval_loss": 0.5534155368804932, |
|
"eval_runtime": 134.2301, |
|
"eval_samples_per_second": 38.62, |
|
"eval_steps_per_second": 0.603, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 2.9941520467836256, |
|
"step": 576, |
|
"total_flos": 964612073717760.0, |
|
"train_loss": 0.5243237420088716, |
|
"train_runtime": 19210.653, |
|
"train_samples_per_second": 15.379, |
|
"train_steps_per_second": 0.03 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 576, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 964612073717760.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|