afrias5's picture
Upload 13 files
c7db014 verified
raw
history blame
8.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.864864864864865,
"eval_steps": 500,
"global_step": 45,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.10810810810810811,
"grad_norm": 12.665340423583984,
"learning_rate": 2e-05,
"loss": 1.7136,
"step": 1
},
{
"epoch": 0.21621621621621623,
"grad_norm": 16.679874420166016,
"learning_rate": 4e-05,
"loss": 1.8963,
"step": 2
},
{
"epoch": 0.32432432432432434,
"grad_norm": 20.59364891052246,
"learning_rate": 6e-05,
"loss": 1.9249,
"step": 3
},
{
"epoch": 0.43243243243243246,
"grad_norm": 14.520182609558105,
"learning_rate": 8e-05,
"loss": 1.9998,
"step": 4
},
{
"epoch": 0.5405405405405406,
"grad_norm": 12.898869514465332,
"learning_rate": 0.0001,
"loss": 1.7198,
"step": 5
},
{
"epoch": 0.6486486486486487,
"grad_norm": 8.388209342956543,
"learning_rate": 0.00012,
"loss": 1.4897,
"step": 6
},
{
"epoch": 0.7567567567567568,
"grad_norm": 11.714303016662598,
"learning_rate": 0.00014,
"loss": 1.496,
"step": 7
},
{
"epoch": 0.8648648648648649,
"grad_norm": 9.486503601074219,
"learning_rate": 0.00016,
"loss": 1.2274,
"step": 8
},
{
"epoch": 0.972972972972973,
"grad_norm": 5.287358283996582,
"learning_rate": 0.00018,
"loss": 0.9407,
"step": 9
},
{
"epoch": 1.0810810810810811,
"grad_norm": 8.37257194519043,
"learning_rate": 0.0002,
"loss": 1.345,
"step": 10
},
{
"epoch": 1.1891891891891893,
"grad_norm": 4.060468673706055,
"learning_rate": 0.0001999229036240723,
"loss": 0.4743,
"step": 11
},
{
"epoch": 1.2972972972972974,
"grad_norm": 5.808952808380127,
"learning_rate": 0.0001996917333733128,
"loss": 0.3448,
"step": 12
},
{
"epoch": 1.4054054054054055,
"grad_norm": 3.857832193374634,
"learning_rate": 0.00019930684569549264,
"loss": 0.2794,
"step": 13
},
{
"epoch": 1.5135135135135136,
"grad_norm": 17.618928909301758,
"learning_rate": 0.00019876883405951377,
"loss": 0.2564,
"step": 14
},
{
"epoch": 1.6216216216216215,
"grad_norm": 3.731938362121582,
"learning_rate": 0.00019807852804032305,
"loss": 0.127,
"step": 15
},
{
"epoch": 1.7297297297297298,
"grad_norm": 10.822015762329102,
"learning_rate": 0.00019723699203976766,
"loss": 0.1671,
"step": 16
},
{
"epoch": 1.8378378378378377,
"grad_norm": 6.625267028808594,
"learning_rate": 0.00019624552364536473,
"loss": 0.165,
"step": 17
},
{
"epoch": 1.945945945945946,
"grad_norm": 3.4650943279266357,
"learning_rate": 0.00019510565162951537,
"loss": 0.1427,
"step": 18
},
{
"epoch": 2.054054054054054,
"grad_norm": 2.571530818939209,
"learning_rate": 0.00019381913359224842,
"loss": 0.2318,
"step": 19
},
{
"epoch": 2.1621621621621623,
"grad_norm": 1.4535036087036133,
"learning_rate": 0.0001923879532511287,
"loss": 0.0737,
"step": 20
},
{
"epoch": 2.27027027027027,
"grad_norm": 3.276848077774048,
"learning_rate": 0.00019081431738250814,
"loss": 0.1144,
"step": 21
},
{
"epoch": 2.3783783783783785,
"grad_norm": 1.9437713623046875,
"learning_rate": 0.0001891006524188368,
"loss": 0.1057,
"step": 22
},
{
"epoch": 2.4864864864864864,
"grad_norm": 0.7327317595481873,
"learning_rate": 0.00018724960070727972,
"loss": 0.0499,
"step": 23
},
{
"epoch": 2.5945945945945947,
"grad_norm": 0.6805225610733032,
"learning_rate": 0.00018526401643540922,
"loss": 0.0393,
"step": 24
},
{
"epoch": 2.7027027027027026,
"grad_norm": 1.9485719203948975,
"learning_rate": 0.00018314696123025454,
"loss": 0.1071,
"step": 25
},
{
"epoch": 2.810810810810811,
"grad_norm": 1.6188167333602905,
"learning_rate": 0.00018090169943749476,
"loss": 0.076,
"step": 26
},
{
"epoch": 2.918918918918919,
"grad_norm": 0.5011986494064331,
"learning_rate": 0.00017853169308807448,
"loss": 0.0431,
"step": 27
},
{
"epoch": 3.027027027027027,
"grad_norm": 3.750688314437866,
"learning_rate": 0.0001760405965600031,
"loss": 0.151,
"step": 28
},
{
"epoch": 3.135135135135135,
"grad_norm": 1.4113177061080933,
"learning_rate": 0.00017343225094356855,
"loss": 0.0692,
"step": 29
},
{
"epoch": 3.2432432432432434,
"grad_norm": 1.220698595046997,
"learning_rate": 0.00017071067811865476,
"loss": 0.0671,
"step": 30
},
{
"epoch": 3.3513513513513513,
"grad_norm": 0.5514506101608276,
"learning_rate": 0.0001678800745532942,
"loss": 0.0592,
"step": 31
},
{
"epoch": 3.4594594594594597,
"grad_norm": 1.1301401853561401,
"learning_rate": 0.00016494480483301836,
"loss": 0.0709,
"step": 32
},
{
"epoch": 3.5675675675675675,
"grad_norm": 2.738895893096924,
"learning_rate": 0.00016190939493098344,
"loss": 0.0752,
"step": 33
},
{
"epoch": 3.6756756756756754,
"grad_norm": 1.05312180519104,
"learning_rate": 0.00015877852522924732,
"loss": 0.044,
"step": 34
},
{
"epoch": 3.7837837837837838,
"grad_norm": 0.8468568325042725,
"learning_rate": 0.00015555702330196023,
"loss": 0.053,
"step": 35
},
{
"epoch": 3.891891891891892,
"grad_norm": 0.8362758159637451,
"learning_rate": 0.0001522498564715949,
"loss": 0.0694,
"step": 36
},
{
"epoch": 4.0,
"grad_norm": 0.9148968458175659,
"learning_rate": 0.00014886212414969553,
"loss": 0.0675,
"step": 37
},
{
"epoch": 4.108108108108108,
"grad_norm": 0.5609957575798035,
"learning_rate": 0.00014539904997395468,
"loss": 0.0585,
"step": 38
},
{
"epoch": 4.216216216216216,
"grad_norm": 0.7747177481651306,
"learning_rate": 0.0001418659737537428,
"loss": 0.0389,
"step": 39
},
{
"epoch": 4.324324324324325,
"grad_norm": 0.40356096625328064,
"learning_rate": 0.000138268343236509,
"loss": 0.0367,
"step": 40
},
{
"epoch": 4.4324324324324325,
"grad_norm": 0.8019518256187439,
"learning_rate": 0.0001346117057077493,
"loss": 0.048,
"step": 41
},
{
"epoch": 4.54054054054054,
"grad_norm": 0.746637761592865,
"learning_rate": 0.00013090169943749476,
"loss": 0.0484,
"step": 42
},
{
"epoch": 4.648648648648649,
"grad_norm": 0.9343522191047668,
"learning_rate": 0.00012714404498650743,
"loss": 0.0457,
"step": 43
},
{
"epoch": 4.756756756756757,
"grad_norm": 0.5715629458427429,
"learning_rate": 0.00012334453638559057,
"loss": 0.04,
"step": 44
},
{
"epoch": 4.864864864864865,
"grad_norm": 0.2774278223514557,
"learning_rate": 0.00011950903220161285,
"loss": 0.0369,
"step": 45
}
],
"logging_steps": 1,
"max_steps": 90,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 9,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.970590133387264e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}