great0001's picture
Training in progress, step 50, checkpoint
9a76686 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.05812263876780006,
"eval_steps": 13,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0011624527753560012,
"grad_norm": 0.9013734459877014,
"learning_rate": 5e-06,
"loss": 3.2382,
"step": 1
},
{
"epoch": 0.0011624527753560012,
"eval_loss": 0.6915596127510071,
"eval_runtime": 18.725,
"eval_samples_per_second": 19.386,
"eval_steps_per_second": 9.72,
"step": 1
},
{
"epoch": 0.0023249055507120024,
"grad_norm": 0.7998698949813843,
"learning_rate": 1e-05,
"loss": 2.4713,
"step": 2
},
{
"epoch": 0.0034873583260680036,
"grad_norm": 0.691608190536499,
"learning_rate": 1.5e-05,
"loss": 2.6607,
"step": 3
},
{
"epoch": 0.004649811101424005,
"grad_norm": 0.9019598960876465,
"learning_rate": 2e-05,
"loss": 3.0399,
"step": 4
},
{
"epoch": 0.005812263876780006,
"grad_norm": 0.6667659878730774,
"learning_rate": 2.5e-05,
"loss": 2.9629,
"step": 5
},
{
"epoch": 0.006974716652136007,
"grad_norm": 0.7605751156806946,
"learning_rate": 3e-05,
"loss": 2.5889,
"step": 6
},
{
"epoch": 0.008137169427492008,
"grad_norm": 0.8346095085144043,
"learning_rate": 3.5e-05,
"loss": 2.8887,
"step": 7
},
{
"epoch": 0.00929962220284801,
"grad_norm": 0.6577608585357666,
"learning_rate": 4e-05,
"loss": 2.8019,
"step": 8
},
{
"epoch": 0.010462074978204011,
"grad_norm": 0.8766710758209229,
"learning_rate": 4.5e-05,
"loss": 3.0156,
"step": 9
},
{
"epoch": 0.011624527753560012,
"grad_norm": 0.8462783098220825,
"learning_rate": 5e-05,
"loss": 2.9731,
"step": 10
},
{
"epoch": 0.012786980528916013,
"grad_norm": 0.8758143782615662,
"learning_rate": 4.99229333433282e-05,
"loss": 2.7932,
"step": 11
},
{
"epoch": 0.013949433304272014,
"grad_norm": 0.9164470434188843,
"learning_rate": 4.9692208514878444e-05,
"loss": 2.5973,
"step": 12
},
{
"epoch": 0.015111886079628016,
"grad_norm": 0.9943875670433044,
"learning_rate": 4.9309248009941914e-05,
"loss": 2.8627,
"step": 13
},
{
"epoch": 0.015111886079628016,
"eval_loss": 0.6720152497291565,
"eval_runtime": 18.6397,
"eval_samples_per_second": 19.475,
"eval_steps_per_second": 9.764,
"step": 13
},
{
"epoch": 0.016274338854984017,
"grad_norm": 1.0360639095306396,
"learning_rate": 4.877641290737884e-05,
"loss": 2.784,
"step": 14
},
{
"epoch": 0.017436791630340016,
"grad_norm": 0.9277502298355103,
"learning_rate": 4.8096988312782174e-05,
"loss": 2.1715,
"step": 15
},
{
"epoch": 0.01859924440569602,
"grad_norm": 1.2977659702301025,
"learning_rate": 4.72751631047092e-05,
"loss": 2.3805,
"step": 16
},
{
"epoch": 0.01976169718105202,
"grad_norm": 1.1046712398529053,
"learning_rate": 4.6316004108852305e-05,
"loss": 2.6283,
"step": 17
},
{
"epoch": 0.020924149956408022,
"grad_norm": 1.2673217058181763,
"learning_rate": 4.522542485937369e-05,
"loss": 3.021,
"step": 18
},
{
"epoch": 0.02208660273176402,
"grad_norm": 0.9734105467796326,
"learning_rate": 4.401014914000078e-05,
"loss": 2.597,
"step": 19
},
{
"epoch": 0.023249055507120024,
"grad_norm": 1.0393081903457642,
"learning_rate": 4.267766952966369e-05,
"loss": 2.5482,
"step": 20
},
{
"epoch": 0.024411508282476024,
"grad_norm": 1.247772455215454,
"learning_rate": 4.123620120825459e-05,
"loss": 2.6122,
"step": 21
},
{
"epoch": 0.025573961057832027,
"grad_norm": 1.2255369424819946,
"learning_rate": 3.969463130731183e-05,
"loss": 2.8942,
"step": 22
},
{
"epoch": 0.026736413833188026,
"grad_norm": 1.286831259727478,
"learning_rate": 3.8062464117898724e-05,
"loss": 2.2942,
"step": 23
},
{
"epoch": 0.02789886660854403,
"grad_norm": 1.4110021591186523,
"learning_rate": 3.634976249348867e-05,
"loss": 2.113,
"step": 24
},
{
"epoch": 0.02906131938390003,
"grad_norm": 1.2104796171188354,
"learning_rate": 3.456708580912725e-05,
"loss": 2.1373,
"step": 25
},
{
"epoch": 0.03022377215925603,
"grad_norm": 1.2794564962387085,
"learning_rate": 3.272542485937369e-05,
"loss": 2.465,
"step": 26
},
{
"epoch": 0.03022377215925603,
"eval_loss": 0.6091086268424988,
"eval_runtime": 18.6804,
"eval_samples_per_second": 19.432,
"eval_steps_per_second": 9.743,
"step": 26
},
{
"epoch": 0.03138622493461203,
"grad_norm": 1.445319414138794,
"learning_rate": 3.083613409639764e-05,
"loss": 2.0072,
"step": 27
},
{
"epoch": 0.032548677709968034,
"grad_norm": 1.4100600481033325,
"learning_rate": 2.8910861626005776e-05,
"loss": 2.9271,
"step": 28
},
{
"epoch": 0.03371113048532404,
"grad_norm": 1.2174726724624634,
"learning_rate": 2.6961477393196126e-05,
"loss": 2.325,
"step": 29
},
{
"epoch": 0.03487358326068003,
"grad_norm": 1.3934134244918823,
"learning_rate": 2.5e-05,
"loss": 2.4305,
"step": 30
},
{
"epoch": 0.036036036036036036,
"grad_norm": 1.353877067565918,
"learning_rate": 2.303852260680388e-05,
"loss": 1.9587,
"step": 31
},
{
"epoch": 0.03719848881139204,
"grad_norm": 1.3453905582427979,
"learning_rate": 2.1089138373994223e-05,
"loss": 2.4014,
"step": 32
},
{
"epoch": 0.03836094158674804,
"grad_norm": 1.4014428853988647,
"learning_rate": 1.9163865903602374e-05,
"loss": 2.4628,
"step": 33
},
{
"epoch": 0.03952339436210404,
"grad_norm": 1.7237356901168823,
"learning_rate": 1.7274575140626318e-05,
"loss": 2.5534,
"step": 34
},
{
"epoch": 0.04068584713746004,
"grad_norm": 1.6201252937316895,
"learning_rate": 1.5432914190872757e-05,
"loss": 2.6219,
"step": 35
},
{
"epoch": 0.041848299912816043,
"grad_norm": 1.7005544900894165,
"learning_rate": 1.3650237506511331e-05,
"loss": 2.2468,
"step": 36
},
{
"epoch": 0.043010752688172046,
"grad_norm": 1.5233935117721558,
"learning_rate": 1.1937535882101281e-05,
"loss": 2.4295,
"step": 37
},
{
"epoch": 0.04417320546352804,
"grad_norm": 1.4735093116760254,
"learning_rate": 1.0305368692688174e-05,
"loss": 2.3018,
"step": 38
},
{
"epoch": 0.045335658238884045,
"grad_norm": 1.803377389907837,
"learning_rate": 8.763798791745411e-06,
"loss": 2.6965,
"step": 39
},
{
"epoch": 0.045335658238884045,
"eval_loss": 0.5883385539054871,
"eval_runtime": 18.5863,
"eval_samples_per_second": 19.531,
"eval_steps_per_second": 9.792,
"step": 39
},
{
"epoch": 0.04649811101424005,
"grad_norm": 1.3496054410934448,
"learning_rate": 7.3223304703363135e-06,
"loss": 2.3177,
"step": 40
},
{
"epoch": 0.047660563789596044,
"grad_norm": 1.4967939853668213,
"learning_rate": 5.989850859999227e-06,
"loss": 2.5048,
"step": 41
},
{
"epoch": 0.04882301656495205,
"grad_norm": 1.4733471870422363,
"learning_rate": 4.7745751406263165e-06,
"loss": 2.2494,
"step": 42
},
{
"epoch": 0.04998546934030805,
"grad_norm": 1.2914958000183105,
"learning_rate": 3.6839958911476957e-06,
"loss": 2.0449,
"step": 43
},
{
"epoch": 0.05114792211566405,
"grad_norm": 1.4443607330322266,
"learning_rate": 2.7248368952908053e-06,
"loss": 2.2681,
"step": 44
},
{
"epoch": 0.05231037489102005,
"grad_norm": 1.3686310052871704,
"learning_rate": 1.9030116872178316e-06,
"loss": 2.5226,
"step": 45
},
{
"epoch": 0.05347282766637605,
"grad_norm": 1.6216455698013306,
"learning_rate": 1.2235870926211619e-06,
"loss": 2.3361,
"step": 46
},
{
"epoch": 0.054635280441732055,
"grad_norm": 2.076577663421631,
"learning_rate": 6.907519900580861e-07,
"loss": 2.9312,
"step": 47
},
{
"epoch": 0.05579773321708806,
"grad_norm": 1.4256387948989868,
"learning_rate": 3.077914851215585e-07,
"loss": 2.2066,
"step": 48
},
{
"epoch": 0.056960185992444054,
"grad_norm": 1.4510419368743896,
"learning_rate": 7.706665667180091e-08,
"loss": 2.1987,
"step": 49
},
{
"epoch": 0.05812263876780006,
"grad_norm": 1.2870044708251953,
"learning_rate": 0.0,
"loss": 2.4237,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 13,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.3803513326862336e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}