|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.02345456411158509, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00046909128223170175, |
|
"grad_norm": 0.021555056795477867, |
|
"learning_rate": 5e-05, |
|
"loss": 10.3766, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00046909128223170175, |
|
"eval_loss": 10.375811576843262, |
|
"eval_runtime": 0.1016, |
|
"eval_samples_per_second": 491.894, |
|
"eval_steps_per_second": 127.893, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0009381825644634035, |
|
"grad_norm": 0.021822381764650345, |
|
"learning_rate": 0.0001, |
|
"loss": 10.3759, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0014072738466951054, |
|
"grad_norm": 0.020989451557397842, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 10.3756, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.001876365128926807, |
|
"grad_norm": 0.018662070855498314, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 10.3757, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.002345456411158509, |
|
"grad_norm": 0.01889839768409729, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 10.3743, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.002814547693390211, |
|
"grad_norm": 0.017945516854524612, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 10.3749, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0032836389756219124, |
|
"grad_norm": 0.019389698281884193, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 10.3745, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.003752730257853614, |
|
"grad_norm": 0.018679462373256683, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 10.3746, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.004221821540085316, |
|
"grad_norm": 0.019041912630200386, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 10.374, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.004690912822317018, |
|
"grad_norm": 0.01870439387857914, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 10.3737, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.00516000410454872, |
|
"grad_norm": 0.01823393441736698, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 10.3735, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.005629095386780422, |
|
"grad_norm": 0.02004813961684704, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 10.3734, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.006098186669012123, |
|
"grad_norm": 0.021872414276003838, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 10.3738, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.006567277951243825, |
|
"grad_norm": 0.020817754790186882, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 10.3752, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.007036369233475527, |
|
"grad_norm": 0.02583605982363224, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 10.3738, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.007505460515707228, |
|
"grad_norm": 0.02163218893110752, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 10.3733, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.00797455179793893, |
|
"grad_norm": 0.021959403529763222, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 10.3759, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.008443643080170631, |
|
"grad_norm": 0.020150721073150635, |
|
"learning_rate": 7.75e-05, |
|
"loss": 10.3726, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.008912734362402334, |
|
"grad_norm": 0.019531995058059692, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 10.3739, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.009381825644634035, |
|
"grad_norm": 0.022027425467967987, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 10.3726, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.009850916926865736, |
|
"grad_norm": 0.022007036954164505, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 10.373, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.01032000820909744, |
|
"grad_norm": 0.021248240023851395, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 10.3728, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.01078909949132914, |
|
"grad_norm": 0.02485470473766327, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 10.372, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.011258190773560843, |
|
"grad_norm": 0.026103204116225243, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 10.3701, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.011727282055792545, |
|
"grad_norm": 0.02261376939713955, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 10.3694, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.011727282055792545, |
|
"eval_loss": 10.373571395874023, |
|
"eval_runtime": 0.1011, |
|
"eval_samples_per_second": 494.484, |
|
"eval_steps_per_second": 128.566, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.012196373338024246, |
|
"grad_norm": 0.02532474510371685, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 10.3732, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.012665464620255949, |
|
"grad_norm": 0.024724390357732773, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 10.3739, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.01313455590248765, |
|
"grad_norm": 0.02388211153447628, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 10.3734, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.013603647184719351, |
|
"grad_norm": 0.025568397715687752, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 10.3717, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.014072738466951054, |
|
"grad_norm": 0.024224581196904182, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 10.3734, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.014541829749182755, |
|
"grad_norm": 0.023164361715316772, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 10.3719, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.015010921031414456, |
|
"grad_norm": 0.0261879563331604, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 10.3726, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.015480012313646159, |
|
"grad_norm": 0.024106085300445557, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 10.3721, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.01594910359587786, |
|
"grad_norm": 0.025720128789544106, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 10.3738, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.01641819487810956, |
|
"grad_norm": 0.02623668871819973, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 10.3706, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.016887286160341262, |
|
"grad_norm": 0.026861609891057014, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 10.3714, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.017356377442572967, |
|
"grad_norm": 0.028205374255776405, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 10.3704, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.017825468724804668, |
|
"grad_norm": 0.029525011777877808, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 10.3726, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.01829456000703637, |
|
"grad_norm": 0.028443878516554832, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 10.3732, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.01876365128926807, |
|
"grad_norm": 0.02772536873817444, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 10.3729, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.01923274257149977, |
|
"grad_norm": 0.03239374980330467, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 10.3713, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.019701833853731473, |
|
"grad_norm": 0.029728103429079056, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 10.3728, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.020170925135963177, |
|
"grad_norm": 0.0263677965849638, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 10.3729, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.02064001641819488, |
|
"grad_norm": 0.028645142912864685, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 10.3708, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.02110910770042658, |
|
"grad_norm": 0.03082318603992462, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 10.3732, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.02157819898265828, |
|
"grad_norm": 0.02954714372754097, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 10.3724, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.022047290264889982, |
|
"grad_norm": 0.023500103503465652, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 10.3709, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.022516381547121687, |
|
"grad_norm": 0.031122390180826187, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 10.37, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.022985472829353388, |
|
"grad_norm": 0.02998553402721882, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 10.3728, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.02345456411158509, |
|
"grad_norm": 0.02776142582297325, |
|
"learning_rate": 1e-05, |
|
"loss": 10.3712, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.02345456411158509, |
|
"eval_loss": 10.372435569763184, |
|
"eval_runtime": 0.112, |
|
"eval_samples_per_second": 446.254, |
|
"eval_steps_per_second": 116.026, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 42768059596800.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|