JW17's picture
Add files using upload-large-folder tool
8571b9e verified
raw
history blame
15.9 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.001,
"eval_steps": 500,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1e-05,
"grad_norm": 1.7129806011276105,
"learning_rate": 3e-06,
"loss": 10.8348,
"step": 1
},
{
"epoch": 2e-05,
"grad_norm": 1.6872753303603527,
"learning_rate": 6e-06,
"loss": 10.8356,
"step": 2
},
{
"epoch": 3e-05,
"grad_norm": 1.6970020903903387,
"learning_rate": 9e-06,
"loss": 10.834,
"step": 3
},
{
"epoch": 4e-05,
"grad_norm": 1.690199421583159,
"learning_rate": 1.2e-05,
"loss": 10.8334,
"step": 4
},
{
"epoch": 5e-05,
"grad_norm": 1.6936208883930068,
"learning_rate": 1.5e-05,
"loss": 10.8294,
"step": 5
},
{
"epoch": 6e-05,
"grad_norm": 1.6935154610161474,
"learning_rate": 1.8e-05,
"loss": 10.8281,
"step": 6
},
{
"epoch": 7e-05,
"grad_norm": 1.6333694173725648,
"learning_rate": 2.1000000000000002e-05,
"loss": 10.8133,
"step": 7
},
{
"epoch": 8e-05,
"grad_norm": 1.4463755011186001,
"learning_rate": 2.4e-05,
"loss": 10.783,
"step": 8
},
{
"epoch": 9e-05,
"grad_norm": 1.3815123169360315,
"learning_rate": 2.7e-05,
"loss": 10.7779,
"step": 9
},
{
"epoch": 0.0001,
"grad_norm": 1.3507621465484316,
"learning_rate": 3e-05,
"loss": 10.7629,
"step": 10
},
{
"epoch": 0.00011,
"grad_norm": 1.257508561634155,
"learning_rate": 3.2999999999999996e-05,
"loss": 10.7454,
"step": 11
},
{
"epoch": 0.00012,
"grad_norm": 1.224298046820689,
"learning_rate": 3.6e-05,
"loss": 10.7321,
"step": 12
},
{
"epoch": 0.00013,
"grad_norm": 1.1609107458726389,
"learning_rate": 3.9e-05,
"loss": 10.7098,
"step": 13
},
{
"epoch": 0.00014,
"grad_norm": 1.1251765756585856,
"learning_rate": 4.2000000000000004e-05,
"loss": 10.6986,
"step": 14
},
{
"epoch": 0.00015,
"grad_norm": 1.1021031797679595,
"learning_rate": 4.4999999999999996e-05,
"loss": 10.6882,
"step": 15
},
{
"epoch": 0.00016,
"grad_norm": 1.05231707077907,
"learning_rate": 4.8e-05,
"loss": 10.6681,
"step": 16
},
{
"epoch": 0.00017,
"grad_norm": 1.0082613504118885,
"learning_rate": 5.1000000000000006e-05,
"loss": 10.6513,
"step": 17
},
{
"epoch": 0.00018,
"grad_norm": 0.9840324393168475,
"learning_rate": 5.4e-05,
"loss": 10.6344,
"step": 18
},
{
"epoch": 0.00019,
"grad_norm": 0.953923239589578,
"learning_rate": 5.7e-05,
"loss": 10.6196,
"step": 19
},
{
"epoch": 0.0002,
"grad_norm": 0.9458057853193742,
"learning_rate": 6e-05,
"loss": 10.6069,
"step": 20
},
{
"epoch": 0.00021,
"grad_norm": 0.9177457999897578,
"learning_rate": 6.3e-05,
"loss": 10.5922,
"step": 21
},
{
"epoch": 0.00022,
"grad_norm": 0.9134874433162486,
"learning_rate": 6.599999999999999e-05,
"loss": 10.576,
"step": 22
},
{
"epoch": 0.00023,
"grad_norm": 0.908696989628468,
"learning_rate": 6.9e-05,
"loss": 10.5635,
"step": 23
},
{
"epoch": 0.00024,
"grad_norm": 0.9051143007426985,
"learning_rate": 7.2e-05,
"loss": 10.5499,
"step": 24
},
{
"epoch": 0.00025,
"grad_norm": 0.9082451576693834,
"learning_rate": 7.500000000000001e-05,
"loss": 10.5361,
"step": 25
},
{
"epoch": 0.00026,
"grad_norm": 0.9099344486243927,
"learning_rate": 7.8e-05,
"loss": 10.521,
"step": 26
},
{
"epoch": 0.00027,
"grad_norm": 0.9053293550746107,
"learning_rate": 8.1e-05,
"loss": 10.5103,
"step": 27
},
{
"epoch": 0.00028,
"grad_norm": 0.9002471153364864,
"learning_rate": 8.400000000000001e-05,
"loss": 10.4955,
"step": 28
},
{
"epoch": 0.00029,
"grad_norm": 0.9068699186733776,
"learning_rate": 8.7e-05,
"loss": 10.4811,
"step": 29
},
{
"epoch": 0.0003,
"grad_norm": 0.9095271030063902,
"learning_rate": 8.999999999999999e-05,
"loss": 10.4648,
"step": 30
},
{
"epoch": 0.00031,
"grad_norm": 0.9097010936405139,
"learning_rate": 9.3e-05,
"loss": 10.4503,
"step": 31
},
{
"epoch": 0.00032,
"grad_norm": 0.9047462396891427,
"learning_rate": 9.6e-05,
"loss": 10.4348,
"step": 32
},
{
"epoch": 0.00033,
"grad_norm": 0.9068703333942145,
"learning_rate": 9.900000000000001e-05,
"loss": 10.418,
"step": 33
},
{
"epoch": 0.00034,
"grad_norm": 0.9072965837486595,
"learning_rate": 0.00010200000000000001,
"loss": 10.4,
"step": 34
},
{
"epoch": 0.00035,
"grad_norm": 0.9110003633890357,
"learning_rate": 0.00010500000000000002,
"loss": 10.3835,
"step": 35
},
{
"epoch": 0.00036,
"grad_norm": 0.9049119959927198,
"learning_rate": 0.000108,
"loss": 10.3652,
"step": 36
},
{
"epoch": 0.00037,
"grad_norm": 0.8970709544624084,
"learning_rate": 0.000111,
"loss": 10.3479,
"step": 37
},
{
"epoch": 0.00038,
"grad_norm": 0.8959068278842482,
"learning_rate": 0.000114,
"loss": 10.3275,
"step": 38
},
{
"epoch": 0.00039,
"grad_norm": 0.9005947927478184,
"learning_rate": 0.000117,
"loss": 10.3069,
"step": 39
},
{
"epoch": 0.0004,
"grad_norm": 0.9014442598894896,
"learning_rate": 0.00012,
"loss": 10.2842,
"step": 40
},
{
"epoch": 0.00041,
"grad_norm": 0.8992939718171602,
"learning_rate": 0.000123,
"loss": 10.2657,
"step": 41
},
{
"epoch": 0.00042,
"grad_norm": 0.8994818536906172,
"learning_rate": 0.000126,
"loss": 10.2444,
"step": 42
},
{
"epoch": 0.00043,
"grad_norm": 0.9062946670458473,
"learning_rate": 0.000129,
"loss": 10.2208,
"step": 43
},
{
"epoch": 0.00044,
"grad_norm": 0.9072550424345267,
"learning_rate": 0.00013199999999999998,
"loss": 10.1985,
"step": 44
},
{
"epoch": 0.00045,
"grad_norm": 0.908308760029939,
"learning_rate": 0.000135,
"loss": 10.1758,
"step": 45
},
{
"epoch": 0.00046,
"grad_norm": 0.8994605508976834,
"learning_rate": 0.000138,
"loss": 10.1528,
"step": 46
},
{
"epoch": 0.00047,
"grad_norm": 0.904955141258698,
"learning_rate": 0.000141,
"loss": 10.1274,
"step": 47
},
{
"epoch": 0.00048,
"grad_norm": 0.9044693581157806,
"learning_rate": 0.000144,
"loss": 10.1031,
"step": 48
},
{
"epoch": 0.00049,
"grad_norm": 0.8992120995192336,
"learning_rate": 0.000147,
"loss": 10.0777,
"step": 49
},
{
"epoch": 0.0005,
"grad_norm": 0.905676588399281,
"learning_rate": 0.00015000000000000001,
"loss": 10.0519,
"step": 50
},
{
"epoch": 0.00051,
"grad_norm": 0.9066841497261428,
"learning_rate": 0.000153,
"loss": 10.0251,
"step": 51
},
{
"epoch": 0.00052,
"grad_norm": 0.9046656683417261,
"learning_rate": 0.000156,
"loss": 9.9981,
"step": 52
},
{
"epoch": 0.00053,
"grad_norm": 0.8943714853313668,
"learning_rate": 0.000159,
"loss": 9.974,
"step": 53
},
{
"epoch": 0.00054,
"grad_norm": 0.9141658233846578,
"learning_rate": 0.000162,
"loss": 9.9419,
"step": 54
},
{
"epoch": 0.00055,
"grad_norm": 0.9035944774643171,
"learning_rate": 0.000165,
"loss": 9.9169,
"step": 55
},
{
"epoch": 0.00056,
"grad_norm": 0.895407870582166,
"learning_rate": 0.00016800000000000002,
"loss": 9.8872,
"step": 56
},
{
"epoch": 0.00057,
"grad_norm": 0.9021731997760362,
"learning_rate": 0.000171,
"loss": 9.8601,
"step": 57
},
{
"epoch": 0.00058,
"grad_norm": 0.8980871554912008,
"learning_rate": 0.000174,
"loss": 9.8343,
"step": 58
},
{
"epoch": 0.00059,
"grad_norm": 0.9073832283363998,
"learning_rate": 0.000177,
"loss": 9.8028,
"step": 59
},
{
"epoch": 0.0006,
"grad_norm": 0.8921071838486323,
"learning_rate": 0.00017999999999999998,
"loss": 9.777,
"step": 60
},
{
"epoch": 0.00061,
"grad_norm": 0.8918001641348363,
"learning_rate": 0.000183,
"loss": 9.7484,
"step": 61
},
{
"epoch": 0.00062,
"grad_norm": 0.897401330332219,
"learning_rate": 0.000186,
"loss": 9.717,
"step": 62
},
{
"epoch": 0.00063,
"grad_norm": 0.8914516241190131,
"learning_rate": 0.000189,
"loss": 9.6894,
"step": 63
},
{
"epoch": 0.00064,
"grad_norm": 0.8896652156254973,
"learning_rate": 0.000192,
"loss": 9.6587,
"step": 64
},
{
"epoch": 0.00065,
"grad_norm": 0.8995447585153489,
"learning_rate": 0.00019500000000000002,
"loss": 9.6261,
"step": 65
},
{
"epoch": 0.00066,
"grad_norm": 0.8896939041293862,
"learning_rate": 0.00019800000000000002,
"loss": 9.6026,
"step": 66
},
{
"epoch": 0.00067,
"grad_norm": 0.8935314234316469,
"learning_rate": 0.000201,
"loss": 9.5723,
"step": 67
},
{
"epoch": 0.00068,
"grad_norm": 0.8971584182008717,
"learning_rate": 0.00020400000000000003,
"loss": 9.5393,
"step": 68
},
{
"epoch": 0.00069,
"grad_norm": 0.8806455604370673,
"learning_rate": 0.00020700000000000002,
"loss": 9.5119,
"step": 69
},
{
"epoch": 0.0007,
"grad_norm": 0.892956094531968,
"learning_rate": 0.00021000000000000004,
"loss": 9.4751,
"step": 70
},
{
"epoch": 0.00071,
"grad_norm": 0.8848452972865632,
"learning_rate": 0.00021299999999999997,
"loss": 9.4495,
"step": 71
},
{
"epoch": 0.00072,
"grad_norm": 0.8831148731992822,
"learning_rate": 0.000216,
"loss": 9.4223,
"step": 72
},
{
"epoch": 0.00073,
"grad_norm": 0.887150899449638,
"learning_rate": 0.00021899999999999998,
"loss": 9.3879,
"step": 73
},
{
"epoch": 0.00074,
"grad_norm": 0.8878619769377328,
"learning_rate": 0.000222,
"loss": 9.3616,
"step": 74
},
{
"epoch": 0.00075,
"grad_norm": 0.8808154408936898,
"learning_rate": 0.000225,
"loss": 9.3275,
"step": 75
},
{
"epoch": 0.00076,
"grad_norm": 0.8908035269749474,
"learning_rate": 0.000228,
"loss": 9.3009,
"step": 76
},
{
"epoch": 0.00077,
"grad_norm": 0.884531047332737,
"learning_rate": 0.000231,
"loss": 9.2727,
"step": 77
},
{
"epoch": 0.00078,
"grad_norm": 0.8838664917591654,
"learning_rate": 0.000234,
"loss": 9.2422,
"step": 78
},
{
"epoch": 0.00079,
"grad_norm": 0.8858668201182466,
"learning_rate": 0.00023700000000000001,
"loss": 9.2056,
"step": 79
},
{
"epoch": 0.0008,
"grad_norm": 0.8856967305037482,
"learning_rate": 0.00024,
"loss": 9.1711,
"step": 80
},
{
"epoch": 0.00081,
"grad_norm": 0.8942846826675519,
"learning_rate": 0.00024300000000000002,
"loss": 9.1382,
"step": 81
},
{
"epoch": 0.00082,
"grad_norm": 0.897767651472895,
"learning_rate": 0.000246,
"loss": 9.1142,
"step": 82
},
{
"epoch": 0.00083,
"grad_norm": 0.8951752702012633,
"learning_rate": 0.00024900000000000004,
"loss": 9.0778,
"step": 83
},
{
"epoch": 0.00084,
"grad_norm": 0.8980395289622467,
"learning_rate": 0.000252,
"loss": 9.0469,
"step": 84
},
{
"epoch": 0.00085,
"grad_norm": 0.8894006576183595,
"learning_rate": 0.000255,
"loss": 9.0242,
"step": 85
},
{
"epoch": 0.00086,
"grad_norm": 0.8907945566480024,
"learning_rate": 0.000258,
"loss": 8.9886,
"step": 86
},
{
"epoch": 0.00087,
"grad_norm": 0.8869170795764568,
"learning_rate": 0.000261,
"loss": 8.9664,
"step": 87
},
{
"epoch": 0.00088,
"grad_norm": 0.8860541210154026,
"learning_rate": 0.00026399999999999997,
"loss": 8.9293,
"step": 88
},
{
"epoch": 0.00089,
"grad_norm": 0.8822605600899943,
"learning_rate": 0.000267,
"loss": 8.9037,
"step": 89
},
{
"epoch": 0.0009,
"grad_norm": 0.8817151929172502,
"learning_rate": 0.00027,
"loss": 8.8766,
"step": 90
},
{
"epoch": 0.00091,
"grad_norm": 0.877617615465877,
"learning_rate": 0.000273,
"loss": 8.8478,
"step": 91
},
{
"epoch": 0.00092,
"grad_norm": 0.8822716293479064,
"learning_rate": 0.000276,
"loss": 8.8156,
"step": 92
},
{
"epoch": 0.00093,
"grad_norm": 0.8823661552266111,
"learning_rate": 0.000279,
"loss": 8.7863,
"step": 93
},
{
"epoch": 0.00094,
"grad_norm": 0.8830384482321385,
"learning_rate": 0.000282,
"loss": 8.7609,
"step": 94
},
{
"epoch": 0.00095,
"grad_norm": 0.8735042737334501,
"learning_rate": 0.000285,
"loss": 8.7321,
"step": 95
},
{
"epoch": 0.00096,
"grad_norm": 0.8799214796836804,
"learning_rate": 0.000288,
"loss": 8.7028,
"step": 96
},
{
"epoch": 0.00097,
"grad_norm": 0.8704594748643596,
"learning_rate": 0.000291,
"loss": 8.6791,
"step": 97
},
{
"epoch": 0.00098,
"grad_norm": 0.8706415983834461,
"learning_rate": 0.000294,
"loss": 8.642,
"step": 98
},
{
"epoch": 0.00099,
"grad_norm": 0.8683426041650804,
"learning_rate": 0.000297,
"loss": 8.62,
"step": 99
},
{
"epoch": 0.001,
"grad_norm": 0.8690594926543161,
"learning_rate": 0.00030000000000000003,
"loss": 8.5941,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3964364285542400.0,
"train_batch_size": 512,
"trial_name": null,
"trial_params": null
}