|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.01075, |
|
"eval_steps": 500, |
|
"global_step": 1075, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1e-05, |
|
"grad_norm": 1.7129806011276105, |
|
"learning_rate": 3e-06, |
|
"loss": 10.8348, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 2e-05, |
|
"grad_norm": 1.6872753303603527, |
|
"learning_rate": 6e-06, |
|
"loss": 10.8356, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 3e-05, |
|
"grad_norm": 1.6970020903903387, |
|
"learning_rate": 9e-06, |
|
"loss": 10.834, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 4e-05, |
|
"grad_norm": 1.690199421583159, |
|
"learning_rate": 1.2e-05, |
|
"loss": 10.8334, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 5e-05, |
|
"grad_norm": 1.6936208883930068, |
|
"learning_rate": 1.5e-05, |
|
"loss": 10.8294, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 6e-05, |
|
"grad_norm": 1.6935154610161474, |
|
"learning_rate": 1.8e-05, |
|
"loss": 10.8281, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 7e-05, |
|
"grad_norm": 1.6333694173725648, |
|
"learning_rate": 2.1000000000000002e-05, |
|
"loss": 10.8133, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 8e-05, |
|
"grad_norm": 1.4463755011186001, |
|
"learning_rate": 2.4e-05, |
|
"loss": 10.783, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 9e-05, |
|
"grad_norm": 1.3815123169360315, |
|
"learning_rate": 2.7e-05, |
|
"loss": 10.7779, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0001, |
|
"grad_norm": 1.3507621465484316, |
|
"learning_rate": 3e-05, |
|
"loss": 10.7629, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.00011, |
|
"grad_norm": 1.257508561634155, |
|
"learning_rate": 3.2999999999999996e-05, |
|
"loss": 10.7454, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.00012, |
|
"grad_norm": 1.224298046820689, |
|
"learning_rate": 3.6e-05, |
|
"loss": 10.7321, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.00013, |
|
"grad_norm": 1.1609107458726389, |
|
"learning_rate": 3.9e-05, |
|
"loss": 10.7098, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.00014, |
|
"grad_norm": 1.1251765756585856, |
|
"learning_rate": 4.2000000000000004e-05, |
|
"loss": 10.6986, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.00015, |
|
"grad_norm": 1.1021031797679595, |
|
"learning_rate": 4.4999999999999996e-05, |
|
"loss": 10.6882, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.00016, |
|
"grad_norm": 1.05231707077907, |
|
"learning_rate": 4.8e-05, |
|
"loss": 10.6681, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.00017, |
|
"grad_norm": 1.0082613504118885, |
|
"learning_rate": 5.1000000000000006e-05, |
|
"loss": 10.6513, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.00018, |
|
"grad_norm": 0.9840324393168475, |
|
"learning_rate": 5.4e-05, |
|
"loss": 10.6344, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.00019, |
|
"grad_norm": 0.953923239589578, |
|
"learning_rate": 5.7e-05, |
|
"loss": 10.6196, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0002, |
|
"grad_norm": 0.9458057853193742, |
|
"learning_rate": 6e-05, |
|
"loss": 10.6069, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.00021, |
|
"grad_norm": 0.9177457999897578, |
|
"learning_rate": 6.3e-05, |
|
"loss": 10.5922, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.00022, |
|
"grad_norm": 0.9134874433162486, |
|
"learning_rate": 6.599999999999999e-05, |
|
"loss": 10.576, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.00023, |
|
"grad_norm": 0.908696989628468, |
|
"learning_rate": 6.9e-05, |
|
"loss": 10.5635, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.00024, |
|
"grad_norm": 0.9051143007426985, |
|
"learning_rate": 7.2e-05, |
|
"loss": 10.5499, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.00025, |
|
"grad_norm": 0.9082451576693834, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 10.5361, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.00026, |
|
"grad_norm": 0.9099344486243927, |
|
"learning_rate": 7.8e-05, |
|
"loss": 10.521, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.00027, |
|
"grad_norm": 0.9053293550746107, |
|
"learning_rate": 8.1e-05, |
|
"loss": 10.5103, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.00028, |
|
"grad_norm": 0.9002471153364864, |
|
"learning_rate": 8.400000000000001e-05, |
|
"loss": 10.4955, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.00029, |
|
"grad_norm": 0.9068699186733776, |
|
"learning_rate": 8.7e-05, |
|
"loss": 10.4811, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0003, |
|
"grad_norm": 0.9095271030063902, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 10.4648, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.00031, |
|
"grad_norm": 0.9097010936405139, |
|
"learning_rate": 9.3e-05, |
|
"loss": 10.4503, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.00032, |
|
"grad_norm": 0.9047462396891427, |
|
"learning_rate": 9.6e-05, |
|
"loss": 10.4348, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.00033, |
|
"grad_norm": 0.9068703333942145, |
|
"learning_rate": 9.900000000000001e-05, |
|
"loss": 10.418, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.00034, |
|
"grad_norm": 0.9072965837486595, |
|
"learning_rate": 0.00010200000000000001, |
|
"loss": 10.4, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.00035, |
|
"grad_norm": 0.9110003633890357, |
|
"learning_rate": 0.00010500000000000002, |
|
"loss": 10.3835, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.00036, |
|
"grad_norm": 0.9049119959927198, |
|
"learning_rate": 0.000108, |
|
"loss": 10.3652, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.00037, |
|
"grad_norm": 0.8970709544624084, |
|
"learning_rate": 0.000111, |
|
"loss": 10.3479, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.00038, |
|
"grad_norm": 0.8959068278842482, |
|
"learning_rate": 0.000114, |
|
"loss": 10.3275, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.00039, |
|
"grad_norm": 0.9005947927478184, |
|
"learning_rate": 0.000117, |
|
"loss": 10.3069, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.0004, |
|
"grad_norm": 0.9014442598894896, |
|
"learning_rate": 0.00012, |
|
"loss": 10.2842, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.00041, |
|
"grad_norm": 0.8992939718171602, |
|
"learning_rate": 0.000123, |
|
"loss": 10.2657, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.00042, |
|
"grad_norm": 0.8994818536906172, |
|
"learning_rate": 0.000126, |
|
"loss": 10.2444, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.00043, |
|
"grad_norm": 0.9062946670458473, |
|
"learning_rate": 0.000129, |
|
"loss": 10.2208, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.00044, |
|
"grad_norm": 0.9072550424345267, |
|
"learning_rate": 0.00013199999999999998, |
|
"loss": 10.1985, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.00045, |
|
"grad_norm": 0.908308760029939, |
|
"learning_rate": 0.000135, |
|
"loss": 10.1758, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.00046, |
|
"grad_norm": 0.8994605508976834, |
|
"learning_rate": 0.000138, |
|
"loss": 10.1528, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.00047, |
|
"grad_norm": 0.904955141258698, |
|
"learning_rate": 0.000141, |
|
"loss": 10.1274, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.00048, |
|
"grad_norm": 0.9044693581157806, |
|
"learning_rate": 0.000144, |
|
"loss": 10.1031, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.00049, |
|
"grad_norm": 0.8992120995192336, |
|
"learning_rate": 0.000147, |
|
"loss": 10.0777, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0005, |
|
"grad_norm": 0.905676588399281, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 10.0519, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.00051, |
|
"grad_norm": 0.9066841497261428, |
|
"learning_rate": 0.000153, |
|
"loss": 10.0251, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.00052, |
|
"grad_norm": 0.9046656683417261, |
|
"learning_rate": 0.000156, |
|
"loss": 9.9981, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.00053, |
|
"grad_norm": 0.8943714853313668, |
|
"learning_rate": 0.000159, |
|
"loss": 9.974, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.00054, |
|
"grad_norm": 0.9141658233846578, |
|
"learning_rate": 0.000162, |
|
"loss": 9.9419, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.00055, |
|
"grad_norm": 0.9035944774643171, |
|
"learning_rate": 0.000165, |
|
"loss": 9.9169, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.00056, |
|
"grad_norm": 0.895407870582166, |
|
"learning_rate": 0.00016800000000000002, |
|
"loss": 9.8872, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.00057, |
|
"grad_norm": 0.9021731997760362, |
|
"learning_rate": 0.000171, |
|
"loss": 9.8601, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.00058, |
|
"grad_norm": 0.8980871554912008, |
|
"learning_rate": 0.000174, |
|
"loss": 9.8343, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.00059, |
|
"grad_norm": 0.9073832283363998, |
|
"learning_rate": 0.000177, |
|
"loss": 9.8028, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.0006, |
|
"grad_norm": 0.8921071838486323, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 9.777, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.00061, |
|
"grad_norm": 0.8918001641348363, |
|
"learning_rate": 0.000183, |
|
"loss": 9.7484, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.00062, |
|
"grad_norm": 0.897401330332219, |
|
"learning_rate": 0.000186, |
|
"loss": 9.717, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.00063, |
|
"grad_norm": 0.8914516241190131, |
|
"learning_rate": 0.000189, |
|
"loss": 9.6894, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.00064, |
|
"grad_norm": 0.8896652156254973, |
|
"learning_rate": 0.000192, |
|
"loss": 9.6587, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.00065, |
|
"grad_norm": 0.8995447585153489, |
|
"learning_rate": 0.00019500000000000002, |
|
"loss": 9.6261, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.00066, |
|
"grad_norm": 0.8896939041293862, |
|
"learning_rate": 0.00019800000000000002, |
|
"loss": 9.6026, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.00067, |
|
"grad_norm": 0.8935314234316469, |
|
"learning_rate": 0.000201, |
|
"loss": 9.5723, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.00068, |
|
"grad_norm": 0.8971584182008717, |
|
"learning_rate": 0.00020400000000000003, |
|
"loss": 9.5393, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.00069, |
|
"grad_norm": 0.8806455604370673, |
|
"learning_rate": 0.00020700000000000002, |
|
"loss": 9.5119, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.0007, |
|
"grad_norm": 0.892956094531968, |
|
"learning_rate": 0.00021000000000000004, |
|
"loss": 9.4751, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.00071, |
|
"grad_norm": 0.8848452972865632, |
|
"learning_rate": 0.00021299999999999997, |
|
"loss": 9.4495, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.00072, |
|
"grad_norm": 0.8831148731992822, |
|
"learning_rate": 0.000216, |
|
"loss": 9.4223, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.00073, |
|
"grad_norm": 0.887150899449638, |
|
"learning_rate": 0.00021899999999999998, |
|
"loss": 9.3879, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.00074, |
|
"grad_norm": 0.8878619769377328, |
|
"learning_rate": 0.000222, |
|
"loss": 9.3616, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.00075, |
|
"grad_norm": 0.8808154408936898, |
|
"learning_rate": 0.000225, |
|
"loss": 9.3275, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.00076, |
|
"grad_norm": 0.8908035269749474, |
|
"learning_rate": 0.000228, |
|
"loss": 9.3009, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.00077, |
|
"grad_norm": 0.884531047332737, |
|
"learning_rate": 0.000231, |
|
"loss": 9.2727, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.00078, |
|
"grad_norm": 0.8838664917591654, |
|
"learning_rate": 0.000234, |
|
"loss": 9.2422, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.00079, |
|
"grad_norm": 0.8858668201182466, |
|
"learning_rate": 0.00023700000000000001, |
|
"loss": 9.2056, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.0008, |
|
"grad_norm": 0.8856967305037482, |
|
"learning_rate": 0.00024, |
|
"loss": 9.1711, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.00081, |
|
"grad_norm": 0.8942846826675519, |
|
"learning_rate": 0.00024300000000000002, |
|
"loss": 9.1382, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.00082, |
|
"grad_norm": 0.897767651472895, |
|
"learning_rate": 0.000246, |
|
"loss": 9.1142, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.00083, |
|
"grad_norm": 0.8951752702012633, |
|
"learning_rate": 0.00024900000000000004, |
|
"loss": 9.0778, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.00084, |
|
"grad_norm": 0.8980395289622467, |
|
"learning_rate": 0.000252, |
|
"loss": 9.0469, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.00085, |
|
"grad_norm": 0.8894006576183595, |
|
"learning_rate": 0.000255, |
|
"loss": 9.0242, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.00086, |
|
"grad_norm": 0.8907945566480024, |
|
"learning_rate": 0.000258, |
|
"loss": 8.9886, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.00087, |
|
"grad_norm": 0.8869170795764568, |
|
"learning_rate": 0.000261, |
|
"loss": 8.9664, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.00088, |
|
"grad_norm": 0.8860541210154026, |
|
"learning_rate": 0.00026399999999999997, |
|
"loss": 8.9293, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.00089, |
|
"grad_norm": 0.8822605600899943, |
|
"learning_rate": 0.000267, |
|
"loss": 8.9037, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.0009, |
|
"grad_norm": 0.8817151929172502, |
|
"learning_rate": 0.00027, |
|
"loss": 8.8766, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.00091, |
|
"grad_norm": 0.877617615465877, |
|
"learning_rate": 0.000273, |
|
"loss": 8.8478, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.00092, |
|
"grad_norm": 0.8822716293479064, |
|
"learning_rate": 0.000276, |
|
"loss": 8.8156, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.00093, |
|
"grad_norm": 0.8823661552266111, |
|
"learning_rate": 0.000279, |
|
"loss": 8.7863, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.00094, |
|
"grad_norm": 0.8830384482321385, |
|
"learning_rate": 0.000282, |
|
"loss": 8.7609, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.00095, |
|
"grad_norm": 0.8735042737334501, |
|
"learning_rate": 0.000285, |
|
"loss": 8.7321, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.00096, |
|
"grad_norm": 0.8799214796836804, |
|
"learning_rate": 0.000288, |
|
"loss": 8.7028, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.00097, |
|
"grad_norm": 0.8704594748643596, |
|
"learning_rate": 0.000291, |
|
"loss": 8.6791, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.00098, |
|
"grad_norm": 0.8706415983834461, |
|
"learning_rate": 0.000294, |
|
"loss": 8.642, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.00099, |
|
"grad_norm": 0.8683426041650804, |
|
"learning_rate": 0.000297, |
|
"loss": 8.62, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.001, |
|
"grad_norm": 0.8690594926543161, |
|
"learning_rate": 0.00030000000000000003, |
|
"loss": 8.5941, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.00101, |
|
"grad_norm": 0.8744725161423202, |
|
"learning_rate": 0.00030300000000000005, |
|
"loss": 8.5597, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.00102, |
|
"grad_norm": 0.8626538117869429, |
|
"learning_rate": 0.000306, |
|
"loss": 8.5407, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.00103, |
|
"grad_norm": 0.8630292491448714, |
|
"learning_rate": 0.000309, |
|
"loss": 8.5165, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.00104, |
|
"grad_norm": 0.8566580756350954, |
|
"learning_rate": 0.000312, |
|
"loss": 8.4948, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.00105, |
|
"grad_norm": 0.8588931967033124, |
|
"learning_rate": 0.000315, |
|
"loss": 8.4689, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.00106, |
|
"grad_norm": 0.8531867230327145, |
|
"learning_rate": 0.000318, |
|
"loss": 8.4397, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.00107, |
|
"grad_norm": 0.8474808010000593, |
|
"learning_rate": 0.000321, |
|
"loss": 8.4149, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.00108, |
|
"grad_norm": 0.858890949163445, |
|
"learning_rate": 0.000324, |
|
"loss": 8.3866, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.00109, |
|
"grad_norm": 0.862504115159085, |
|
"learning_rate": 0.000327, |
|
"loss": 8.3673, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.0011, |
|
"grad_norm": 0.8797254817902618, |
|
"learning_rate": 0.00033, |
|
"loss": 8.3453, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.00111, |
|
"grad_norm": 0.8938450574121157, |
|
"learning_rate": 0.000333, |
|
"loss": 8.3162, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.00112, |
|
"grad_norm": 0.8984693362159062, |
|
"learning_rate": 0.00033600000000000004, |
|
"loss": 8.2961, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.00113, |
|
"grad_norm": 0.8741969532880793, |
|
"learning_rate": 0.000339, |
|
"loss": 8.2543, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.00114, |
|
"grad_norm": 0.8263135137442741, |
|
"learning_rate": 0.000342, |
|
"loss": 8.2446, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.00115, |
|
"grad_norm": 0.8311105019716521, |
|
"learning_rate": 0.00034500000000000004, |
|
"loss": 8.2164, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.00116, |
|
"grad_norm": 0.8585271561560018, |
|
"learning_rate": 0.000348, |
|
"loss": 8.1938, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.00117, |
|
"grad_norm": 0.8687047969468357, |
|
"learning_rate": 0.000351, |
|
"loss": 8.1623, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.00118, |
|
"grad_norm": 0.8460500876754268, |
|
"learning_rate": 0.000354, |
|
"loss": 8.1456, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.00119, |
|
"grad_norm": 0.80734714043103, |
|
"learning_rate": 0.000357, |
|
"loss": 8.131, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.0012, |
|
"grad_norm": 0.7912470130916918, |
|
"learning_rate": 0.00035999999999999997, |
|
"loss": 8.103, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.00121, |
|
"grad_norm": 0.811181199244069, |
|
"learning_rate": 0.000363, |
|
"loss": 8.0751, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.00122, |
|
"grad_norm": 0.8641427784894331, |
|
"learning_rate": 0.000366, |
|
"loss": 8.0581, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.00123, |
|
"grad_norm": 1.059706847686213, |
|
"learning_rate": 0.000369, |
|
"loss": 8.038, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.00124, |
|
"grad_norm": 1.1727027216994725, |
|
"learning_rate": 0.000372, |
|
"loss": 8.021, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.00125, |
|
"grad_norm": 0.8130002892684417, |
|
"learning_rate": 0.000375, |
|
"loss": 7.9874, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.00126, |
|
"grad_norm": 0.9195848585380069, |
|
"learning_rate": 0.000378, |
|
"loss": 7.9767, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.00127, |
|
"grad_norm": 1.0843433185909894, |
|
"learning_rate": 0.000381, |
|
"loss": 7.9572, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.00128, |
|
"grad_norm": 0.7822919696307823, |
|
"learning_rate": 0.000384, |
|
"loss": 7.9285, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.00129, |
|
"grad_norm": 0.8822428605457112, |
|
"learning_rate": 0.00038700000000000003, |
|
"loss": 7.9179, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.0013, |
|
"grad_norm": 0.844355538937723, |
|
"learning_rate": 0.00039000000000000005, |
|
"loss": 7.8895, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.00131, |
|
"grad_norm": 0.7902535444057679, |
|
"learning_rate": 0.000393, |
|
"loss": 7.8528, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.00132, |
|
"grad_norm": 0.8777082094723105, |
|
"learning_rate": 0.00039600000000000003, |
|
"loss": 7.8441, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.00133, |
|
"grad_norm": 0.7973277807473979, |
|
"learning_rate": 0.00039900000000000005, |
|
"loss": 7.8195, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.00134, |
|
"grad_norm": 0.7889088832890946, |
|
"learning_rate": 0.000402, |
|
"loss": 7.8056, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.00135, |
|
"grad_norm": 0.7461125825498439, |
|
"learning_rate": 0.00040500000000000003, |
|
"loss": 7.7735, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.00136, |
|
"grad_norm": 0.7727896835908762, |
|
"learning_rate": 0.00040800000000000005, |
|
"loss": 7.7579, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.00137, |
|
"grad_norm": 0.6932995987295251, |
|
"learning_rate": 0.000411, |
|
"loss": 7.7341, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.00138, |
|
"grad_norm": 0.758084762416224, |
|
"learning_rate": 0.00041400000000000003, |
|
"loss": 7.7117, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.00139, |
|
"grad_norm": 0.7171019453691133, |
|
"learning_rate": 0.00041700000000000005, |
|
"loss": 7.6963, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.0014, |
|
"grad_norm": 0.6814920611933867, |
|
"learning_rate": 0.00042000000000000007, |
|
"loss": 7.6775, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.00141, |
|
"grad_norm": 0.7091532995122851, |
|
"learning_rate": 0.000423, |
|
"loss": 7.6638, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.00142, |
|
"grad_norm": 0.6928279523561562, |
|
"learning_rate": 0.00042599999999999995, |
|
"loss": 7.6501, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.00143, |
|
"grad_norm": 0.6614572727332786, |
|
"learning_rate": 0.00042899999999999997, |
|
"loss": 7.6195, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.00144, |
|
"grad_norm": 0.6903462553659518, |
|
"learning_rate": 0.000432, |
|
"loss": 7.6015, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.00145, |
|
"grad_norm": 0.690019772183536, |
|
"learning_rate": 0.000435, |
|
"loss": 7.5953, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.00146, |
|
"grad_norm": 0.6908198257220046, |
|
"learning_rate": 0.00043799999999999997, |
|
"loss": 7.5557, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.00147, |
|
"grad_norm": 0.7009866965495668, |
|
"learning_rate": 0.000441, |
|
"loss": 7.5482, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.00148, |
|
"grad_norm": 0.6832764187147686, |
|
"learning_rate": 0.000444, |
|
"loss": 7.5366, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.00149, |
|
"grad_norm": 0.59797192318343, |
|
"learning_rate": 0.00044699999999999997, |
|
"loss": 7.5272, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.0015, |
|
"grad_norm": 0.6655702435683013, |
|
"learning_rate": 0.00045, |
|
"loss": 7.4963, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.00151, |
|
"grad_norm": 0.732396941583091, |
|
"learning_rate": 0.000453, |
|
"loss": 7.48, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.00152, |
|
"grad_norm": 0.5836278900992692, |
|
"learning_rate": 0.000456, |
|
"loss": 7.4694, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.00153, |
|
"grad_norm": 0.6777912087785298, |
|
"learning_rate": 0.000459, |
|
"loss": 7.4588, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.00154, |
|
"grad_norm": 0.727978180952039, |
|
"learning_rate": 0.000462, |
|
"loss": 7.442, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.00155, |
|
"grad_norm": 0.7368922682622268, |
|
"learning_rate": 0.000465, |
|
"loss": 7.4241, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.00156, |
|
"grad_norm": 0.8391325557731037, |
|
"learning_rate": 0.000468, |
|
"loss": 7.4013, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.00157, |
|
"grad_norm": 0.8289929528374833, |
|
"learning_rate": 0.000471, |
|
"loss": 7.3995, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.00158, |
|
"grad_norm": 0.5070337070851558, |
|
"learning_rate": 0.00047400000000000003, |
|
"loss": 7.3713, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.00159, |
|
"grad_norm": 0.783946493417518, |
|
"learning_rate": 0.000477, |
|
"loss": 7.3668, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.0016, |
|
"grad_norm": 0.6957053326984224, |
|
"learning_rate": 0.00048, |
|
"loss": 7.3475, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.00161, |
|
"grad_norm": 0.547833885334286, |
|
"learning_rate": 0.00048300000000000003, |
|
"loss": 7.3204, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.00162, |
|
"grad_norm": 0.8547649122041628, |
|
"learning_rate": 0.00048600000000000005, |
|
"loss": 7.3325, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.00163, |
|
"grad_norm": 0.8673949773728752, |
|
"learning_rate": 0.0004890000000000001, |
|
"loss": 7.316, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.00164, |
|
"grad_norm": 1.0619539506126108, |
|
"learning_rate": 0.000492, |
|
"loss": 7.3191, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.00165, |
|
"grad_norm": 0.6324744711420325, |
|
"learning_rate": 0.000495, |
|
"loss": 7.2864, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.00166, |
|
"grad_norm": 0.5662260261966024, |
|
"learning_rate": 0.0004980000000000001, |
|
"loss": 7.26, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.00167, |
|
"grad_norm": 0.7262900850309921, |
|
"learning_rate": 0.000501, |
|
"loss": 7.2554, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.00168, |
|
"grad_norm": 0.6121691436587496, |
|
"learning_rate": 0.000504, |
|
"loss": 7.2353, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.00169, |
|
"grad_norm": 0.5390794603769147, |
|
"learning_rate": 0.0005070000000000001, |
|
"loss": 7.2263, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.0017, |
|
"grad_norm": 0.5999036994585554, |
|
"learning_rate": 0.00051, |
|
"loss": 7.213, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.00171, |
|
"grad_norm": 0.4637320512013434, |
|
"learning_rate": 0.000513, |
|
"loss": 7.1933, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.00172, |
|
"grad_norm": 0.5250975302523401, |
|
"learning_rate": 0.000516, |
|
"loss": 7.1953, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.00173, |
|
"grad_norm": 0.40559164125903624, |
|
"learning_rate": 0.0005189999999999999, |
|
"loss": 7.1764, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.00174, |
|
"grad_norm": 0.4505921111310584, |
|
"learning_rate": 0.000522, |
|
"loss": 7.1953, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.00175, |
|
"grad_norm": 0.4234331150208657, |
|
"learning_rate": 0.000525, |
|
"loss": 7.1572, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.00176, |
|
"grad_norm": 0.3852967422981744, |
|
"learning_rate": 0.0005279999999999999, |
|
"loss": 7.1322, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.00177, |
|
"grad_norm": 0.3685443025565043, |
|
"learning_rate": 0.000531, |
|
"loss": 7.1378, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.00178, |
|
"grad_norm": 0.44280593644992733, |
|
"learning_rate": 0.000534, |
|
"loss": 7.1301, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.00179, |
|
"grad_norm": 0.3638226120256115, |
|
"learning_rate": 0.000537, |
|
"loss": 7.1191, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.0018, |
|
"grad_norm": 0.37841703582661185, |
|
"learning_rate": 0.00054, |
|
"loss": 7.0921, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.00181, |
|
"grad_norm": 0.3275750999054276, |
|
"learning_rate": 0.000543, |
|
"loss": 7.0801, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.00182, |
|
"grad_norm": 0.3469517461565544, |
|
"learning_rate": 0.000546, |
|
"loss": 7.0774, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.00183, |
|
"grad_norm": 0.3965623823212328, |
|
"learning_rate": 0.000549, |
|
"loss": 7.0674, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.00184, |
|
"grad_norm": 0.47244712960577356, |
|
"learning_rate": 0.000552, |
|
"loss": 7.0582, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.00185, |
|
"grad_norm": 0.7068086356604425, |
|
"learning_rate": 0.000555, |
|
"loss": 7.0369, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.00186, |
|
"grad_norm": 0.9665650200874053, |
|
"learning_rate": 0.000558, |
|
"loss": 7.0604, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.00187, |
|
"grad_norm": 1.1379887499575514, |
|
"learning_rate": 0.000561, |
|
"loss": 7.0366, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.00188, |
|
"grad_norm": 0.5005933831438132, |
|
"learning_rate": 0.000564, |
|
"loss": 7.0008, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.00189, |
|
"grad_norm": 0.4490325126563288, |
|
"learning_rate": 0.000567, |
|
"loss": 6.997, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.0019, |
|
"grad_norm": 0.6949112483193859, |
|
"learning_rate": 0.00057, |
|
"loss": 6.9846, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.00191, |
|
"grad_norm": 0.4887612962658467, |
|
"learning_rate": 0.000573, |
|
"loss": 6.9724, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.00192, |
|
"grad_norm": 0.5374763602633008, |
|
"learning_rate": 0.000576, |
|
"loss": 6.9655, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.00193, |
|
"grad_norm": 0.4491815623326969, |
|
"learning_rate": 0.000579, |
|
"loss": 6.9637, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.00194, |
|
"grad_norm": 0.4044031823800156, |
|
"learning_rate": 0.000582, |
|
"loss": 6.9565, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.00195, |
|
"grad_norm": 0.5115147380417242, |
|
"learning_rate": 0.000585, |
|
"loss": 6.9386, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.00196, |
|
"grad_norm": 0.45947827433809557, |
|
"learning_rate": 0.000588, |
|
"loss": 6.9258, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.00197, |
|
"grad_norm": 0.5289316721591154, |
|
"learning_rate": 0.000591, |
|
"loss": 6.9226, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.00198, |
|
"grad_norm": 0.4416511613975406, |
|
"learning_rate": 0.000594, |
|
"loss": 6.9132, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.00199, |
|
"grad_norm": 0.36314866008934127, |
|
"learning_rate": 0.0005970000000000001, |
|
"loss": 6.8916, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.002, |
|
"grad_norm": 0.4299454881914127, |
|
"learning_rate": 0.0006000000000000001, |
|
"loss": 6.8932, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.00201, |
|
"grad_norm": 0.2786890859011363, |
|
"learning_rate": 0.000603, |
|
"loss": 6.8645, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 0.00202, |
|
"grad_norm": 0.4553990879307791, |
|
"learning_rate": 0.0006060000000000001, |
|
"loss": 6.855, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 0.00203, |
|
"grad_norm": 0.49491513980041124, |
|
"learning_rate": 0.0006090000000000001, |
|
"loss": 6.8592, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 0.00204, |
|
"grad_norm": 0.5750090076580947, |
|
"learning_rate": 0.000612, |
|
"loss": 6.8457, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.00205, |
|
"grad_norm": 0.6904749130436038, |
|
"learning_rate": 0.000615, |
|
"loss": 6.8381, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.00206, |
|
"grad_norm": 0.7582947936777445, |
|
"learning_rate": 0.000618, |
|
"loss": 6.83, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 0.00207, |
|
"grad_norm": 0.728748472942146, |
|
"learning_rate": 0.000621, |
|
"loss": 6.8214, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 0.00208, |
|
"grad_norm": 0.5163586812963157, |
|
"learning_rate": 0.000624, |
|
"loss": 6.8116, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.00209, |
|
"grad_norm": 0.5726761174567752, |
|
"learning_rate": 0.000627, |
|
"loss": 6.7933, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 0.0021, |
|
"grad_norm": 0.6890311463252623, |
|
"learning_rate": 0.00063, |
|
"loss": 6.7854, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.00211, |
|
"grad_norm": 0.9174002778722206, |
|
"learning_rate": 0.000633, |
|
"loss": 6.7849, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 0.00212, |
|
"grad_norm": 0.8086617968740898, |
|
"learning_rate": 0.000636, |
|
"loss": 6.7808, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 0.00213, |
|
"grad_norm": 0.6685717599500662, |
|
"learning_rate": 0.000639, |
|
"loss": 6.7542, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 0.00214, |
|
"grad_norm": 0.511917016650173, |
|
"learning_rate": 0.000642, |
|
"loss": 6.7483, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 0.00215, |
|
"grad_norm": 0.5132261185164837, |
|
"learning_rate": 0.000645, |
|
"loss": 6.7465, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.00216, |
|
"grad_norm": 0.3896647006337605, |
|
"learning_rate": 0.000648, |
|
"loss": 6.7354, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.00217, |
|
"grad_norm": 0.53153444147609, |
|
"learning_rate": 0.000651, |
|
"loss": 6.7114, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 0.00218, |
|
"grad_norm": 0.4560253950102483, |
|
"learning_rate": 0.000654, |
|
"loss": 6.7136, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 0.00219, |
|
"grad_norm": 0.38246603109839156, |
|
"learning_rate": 0.000657, |
|
"loss": 6.6847, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 0.0022, |
|
"grad_norm": 0.502249830770979, |
|
"learning_rate": 0.00066, |
|
"loss": 6.7061, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.00221, |
|
"grad_norm": 0.555840042257826, |
|
"learning_rate": 0.0006630000000000001, |
|
"loss": 6.6817, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 0.00222, |
|
"grad_norm": 0.7008290795132841, |
|
"learning_rate": 0.000666, |
|
"loss": 6.6751, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 0.00223, |
|
"grad_norm": 0.9665649898158697, |
|
"learning_rate": 0.000669, |
|
"loss": 6.6759, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 0.00224, |
|
"grad_norm": 1.0460190685952617, |
|
"learning_rate": 0.0006720000000000001, |
|
"loss": 6.6821, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.00225, |
|
"grad_norm": 0.9709238336565439, |
|
"learning_rate": 0.000675, |
|
"loss": 6.6643, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.00226, |
|
"grad_norm": 0.9675609159629996, |
|
"learning_rate": 0.000678, |
|
"loss": 6.6602, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 0.00227, |
|
"grad_norm": 0.6069059963900193, |
|
"learning_rate": 0.0006810000000000001, |
|
"loss": 6.6251, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 0.00228, |
|
"grad_norm": 0.6661980684886607, |
|
"learning_rate": 0.000684, |
|
"loss": 6.6314, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.00229, |
|
"grad_norm": 0.5755819115869941, |
|
"learning_rate": 0.000687, |
|
"loss": 6.6231, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 0.0023, |
|
"grad_norm": 0.48612508126201437, |
|
"learning_rate": 0.0006900000000000001, |
|
"loss": 6.6015, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.00231, |
|
"grad_norm": 0.42902206098374773, |
|
"learning_rate": 0.000693, |
|
"loss": 6.5844, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 0.00232, |
|
"grad_norm": 0.4617696239896233, |
|
"learning_rate": 0.000696, |
|
"loss": 6.5964, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.00233, |
|
"grad_norm": 0.397560103207551, |
|
"learning_rate": 0.0006990000000000001, |
|
"loss": 6.5819, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 0.00234, |
|
"grad_norm": 0.39436477469071923, |
|
"learning_rate": 0.000702, |
|
"loss": 6.5732, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 0.00235, |
|
"grad_norm": 0.37818254545129, |
|
"learning_rate": 0.000705, |
|
"loss": 6.5584, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.00236, |
|
"grad_norm": 0.39793300295732814, |
|
"learning_rate": 0.000708, |
|
"loss": 6.539, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 0.00237, |
|
"grad_norm": 0.32880148477167265, |
|
"learning_rate": 0.0007109999999999999, |
|
"loss": 6.5486, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 0.00238, |
|
"grad_norm": 0.33186649759843, |
|
"learning_rate": 0.000714, |
|
"loss": 6.5374, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 0.00239, |
|
"grad_norm": 0.3861082150171924, |
|
"learning_rate": 0.000717, |
|
"loss": 6.5195, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 0.0024, |
|
"grad_norm": 0.3818382545466509, |
|
"learning_rate": 0.0007199999999999999, |
|
"loss": 6.5368, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.00241, |
|
"grad_norm": 0.26517003068907236, |
|
"learning_rate": 0.000723, |
|
"loss": 6.5167, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 0.00242, |
|
"grad_norm": 0.30652105574179844, |
|
"learning_rate": 0.000726, |
|
"loss": 6.4934, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 0.00243, |
|
"grad_norm": 0.3382942246099826, |
|
"learning_rate": 0.000729, |
|
"loss": 6.4799, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 0.00244, |
|
"grad_norm": 0.4164388540502762, |
|
"learning_rate": 0.000732, |
|
"loss": 6.4843, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 0.00245, |
|
"grad_norm": 0.4035007765909141, |
|
"learning_rate": 0.000735, |
|
"loss": 6.4741, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.00246, |
|
"grad_norm": 0.4484502106991885, |
|
"learning_rate": 0.000738, |
|
"loss": 6.458, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 0.00247, |
|
"grad_norm": 0.6057401118193197, |
|
"learning_rate": 0.000741, |
|
"loss": 6.4543, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 0.00248, |
|
"grad_norm": 1.1775332556723501, |
|
"learning_rate": 0.000744, |
|
"loss": 6.4781, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 0.00249, |
|
"grad_norm": 1.0888595785598245, |
|
"learning_rate": 0.000747, |
|
"loss": 6.4635, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 0.0025, |
|
"grad_norm": 0.536872031808477, |
|
"learning_rate": 0.00075, |
|
"loss": 6.4479, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.00251, |
|
"grad_norm": 0.7926645181932281, |
|
"learning_rate": 0.000753, |
|
"loss": 6.4169, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 0.00252, |
|
"grad_norm": 0.6863348407685264, |
|
"learning_rate": 0.000756, |
|
"loss": 6.4273, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 0.00253, |
|
"grad_norm": 0.7123800606299509, |
|
"learning_rate": 0.000759, |
|
"loss": 6.4195, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 0.00254, |
|
"grad_norm": 0.839405849029746, |
|
"learning_rate": 0.000762, |
|
"loss": 6.4177, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 0.00255, |
|
"grad_norm": 0.76501143998226, |
|
"learning_rate": 0.0007650000000000001, |
|
"loss": 6.4159, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.00256, |
|
"grad_norm": 0.597608858095952, |
|
"learning_rate": 0.000768, |
|
"loss": 6.3889, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 0.00257, |
|
"grad_norm": 0.6526176951631347, |
|
"learning_rate": 0.000771, |
|
"loss": 6.3981, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 0.00258, |
|
"grad_norm": 0.651228257980475, |
|
"learning_rate": 0.0007740000000000001, |
|
"loss": 6.3725, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 0.00259, |
|
"grad_norm": 0.5603901273931662, |
|
"learning_rate": 0.000777, |
|
"loss": 6.3719, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 0.0026, |
|
"grad_norm": 0.41845020316479425, |
|
"learning_rate": 0.0007800000000000001, |
|
"loss": 6.3536, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.00261, |
|
"grad_norm": 0.5144884019867095, |
|
"learning_rate": 0.0007830000000000001, |
|
"loss": 6.3665, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 0.00262, |
|
"grad_norm": 0.5548811083770797, |
|
"learning_rate": 0.000786, |
|
"loss": 6.3412, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 0.00263, |
|
"grad_norm": 0.702068573310266, |
|
"learning_rate": 0.0007890000000000001, |
|
"loss": 6.353, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 0.00264, |
|
"grad_norm": 0.9481897048028406, |
|
"learning_rate": 0.0007920000000000001, |
|
"loss": 6.3404, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.00265, |
|
"grad_norm": 1.2297805755386195, |
|
"learning_rate": 0.000795, |
|
"loss": 6.3478, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.00266, |
|
"grad_norm": 0.5034998722006886, |
|
"learning_rate": 0.0007980000000000001, |
|
"loss": 6.3233, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 0.00267, |
|
"grad_norm": 0.8457797300321087, |
|
"learning_rate": 0.0008010000000000001, |
|
"loss": 6.3268, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 0.00268, |
|
"grad_norm": 0.7624901894608749, |
|
"learning_rate": 0.000804, |
|
"loss": 6.3226, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 0.00269, |
|
"grad_norm": 0.6803898428780553, |
|
"learning_rate": 0.0008070000000000001, |
|
"loss": 6.3045, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 0.0027, |
|
"grad_norm": 0.5891673657315365, |
|
"learning_rate": 0.0008100000000000001, |
|
"loss": 6.3, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.00271, |
|
"grad_norm": 0.6321969571669588, |
|
"learning_rate": 0.000813, |
|
"loss": 6.3081, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 0.00272, |
|
"grad_norm": 0.6080230974854919, |
|
"learning_rate": 0.0008160000000000001, |
|
"loss": 6.2911, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 0.00273, |
|
"grad_norm": 0.577176950863229, |
|
"learning_rate": 0.0008190000000000001, |
|
"loss": 6.2786, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 0.00274, |
|
"grad_norm": 0.46970800663022055, |
|
"learning_rate": 0.000822, |
|
"loss": 6.2573, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 0.00275, |
|
"grad_norm": 0.5095773122618286, |
|
"learning_rate": 0.0008250000000000001, |
|
"loss": 6.2676, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.00276, |
|
"grad_norm": 0.421367493059458, |
|
"learning_rate": 0.0008280000000000001, |
|
"loss": 6.2547, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 0.00277, |
|
"grad_norm": 0.4229723742956301, |
|
"learning_rate": 0.0008310000000000001, |
|
"loss": 6.2503, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 0.00278, |
|
"grad_norm": 0.4631977178825306, |
|
"learning_rate": 0.0008340000000000001, |
|
"loss": 6.2346, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 0.00279, |
|
"grad_norm": 0.41870110969580987, |
|
"learning_rate": 0.0008370000000000001, |
|
"loss": 6.2332, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 0.0028, |
|
"grad_norm": 0.4083314739680453, |
|
"learning_rate": 0.0008400000000000001, |
|
"loss": 6.2161, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.00281, |
|
"grad_norm": 0.42451645247510567, |
|
"learning_rate": 0.0008430000000000001, |
|
"loss": 6.2058, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 0.00282, |
|
"grad_norm": 0.4811013283391871, |
|
"learning_rate": 0.000846, |
|
"loss": 6.206, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 0.00283, |
|
"grad_norm": 0.6798083705841664, |
|
"learning_rate": 0.0008489999999999999, |
|
"loss": 6.2015, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 0.00284, |
|
"grad_norm": 1.0382201143248402, |
|
"learning_rate": 0.0008519999999999999, |
|
"loss": 6.2055, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 0.00285, |
|
"grad_norm": 1.115942818342409, |
|
"learning_rate": 0.000855, |
|
"loss": 6.2129, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.00286, |
|
"grad_norm": 0.8889955339821247, |
|
"learning_rate": 0.0008579999999999999, |
|
"loss": 6.187, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 0.00287, |
|
"grad_norm": 1.2422801265585652, |
|
"learning_rate": 0.000861, |
|
"loss": 6.209, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 0.00288, |
|
"grad_norm": 0.8315932342234975, |
|
"learning_rate": 0.000864, |
|
"loss": 6.174, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 0.00289, |
|
"grad_norm": 1.2914759013339998, |
|
"learning_rate": 0.0008669999999999999, |
|
"loss": 6.2078, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 0.0029, |
|
"grad_norm": 0.8376507056381004, |
|
"learning_rate": 0.00087, |
|
"loss": 6.1757, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.00291, |
|
"grad_norm": 0.8412780961911104, |
|
"learning_rate": 0.000873, |
|
"loss": 6.1658, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 0.00292, |
|
"grad_norm": 1.047021583757866, |
|
"learning_rate": 0.0008759999999999999, |
|
"loss": 6.1758, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 0.00293, |
|
"grad_norm": 0.8919470282886952, |
|
"learning_rate": 0.000879, |
|
"loss": 6.151, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 0.00294, |
|
"grad_norm": 0.665529628519212, |
|
"learning_rate": 0.000882, |
|
"loss": 6.159, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 0.00295, |
|
"grad_norm": 0.5169660787787601, |
|
"learning_rate": 0.0008849999999999999, |
|
"loss": 6.1239, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.00296, |
|
"grad_norm": 0.5611538425989948, |
|
"learning_rate": 0.000888, |
|
"loss": 6.1363, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 0.00297, |
|
"grad_norm": 0.46398604023920087, |
|
"learning_rate": 0.000891, |
|
"loss": 6.1045, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 0.00298, |
|
"grad_norm": 0.4361556326298739, |
|
"learning_rate": 0.0008939999999999999, |
|
"loss": 6.1198, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 0.00299, |
|
"grad_norm": 0.4319584905904094, |
|
"learning_rate": 0.000897, |
|
"loss": 6.0941, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 0.003, |
|
"grad_norm": 0.4255386299160817, |
|
"learning_rate": 0.0009, |
|
"loss": 6.0936, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.00301, |
|
"grad_norm": 0.3316584659066082, |
|
"learning_rate": 0.0009029999999999999, |
|
"loss": 6.0857, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 0.00302, |
|
"grad_norm": 0.37299869635167304, |
|
"learning_rate": 0.000906, |
|
"loss": 6.0685, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 0.00303, |
|
"grad_norm": 0.40148217950038195, |
|
"learning_rate": 0.000909, |
|
"loss": 6.0805, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 0.00304, |
|
"grad_norm": 0.420191340163935, |
|
"learning_rate": 0.000912, |
|
"loss": 6.0758, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 0.00305, |
|
"grad_norm": 0.45307668264044143, |
|
"learning_rate": 0.000915, |
|
"loss": 6.0736, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.00306, |
|
"grad_norm": 0.6122731249830943, |
|
"learning_rate": 0.000918, |
|
"loss": 6.0651, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 0.00307, |
|
"grad_norm": 0.851197326038436, |
|
"learning_rate": 0.000921, |
|
"loss": 6.0633, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 0.00308, |
|
"grad_norm": 1.1284591769922636, |
|
"learning_rate": 0.000924, |
|
"loss": 6.0582, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 0.00309, |
|
"grad_norm": 0.9596545216263644, |
|
"learning_rate": 0.000927, |
|
"loss": 6.0709, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 0.0031, |
|
"grad_norm": 1.039007932956353, |
|
"learning_rate": 0.00093, |
|
"loss": 6.0624, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.00311, |
|
"grad_norm": 0.9855401820369791, |
|
"learning_rate": 0.000933, |
|
"loss": 6.0524, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 0.00312, |
|
"grad_norm": 1.0163701418335827, |
|
"learning_rate": 0.000936, |
|
"loss": 6.041, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 0.00313, |
|
"grad_norm": 1.0223663613867633, |
|
"learning_rate": 0.0009390000000000001, |
|
"loss": 6.0491, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 0.00314, |
|
"grad_norm": 1.0997292958340223, |
|
"learning_rate": 0.000942, |
|
"loss": 6.0641, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 0.00315, |
|
"grad_norm": 0.932276773939602, |
|
"learning_rate": 0.000945, |
|
"loss": 6.0354, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.00316, |
|
"grad_norm": 0.8624268848533463, |
|
"learning_rate": 0.0009480000000000001, |
|
"loss": 6.0096, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 0.00317, |
|
"grad_norm": 0.6867359398602113, |
|
"learning_rate": 0.000951, |
|
"loss": 6.0237, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 0.00318, |
|
"grad_norm": 0.5777711812516898, |
|
"learning_rate": 0.000954, |
|
"loss": 6.014, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 0.00319, |
|
"grad_norm": 0.5907201170091796, |
|
"learning_rate": 0.0009570000000000001, |
|
"loss": 6.0042, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 0.0032, |
|
"grad_norm": 0.5929910498481646, |
|
"learning_rate": 0.00096, |
|
"loss": 6.0021, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.00321, |
|
"grad_norm": 0.6559636432249029, |
|
"learning_rate": 0.000963, |
|
"loss": 5.9891, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 0.00322, |
|
"grad_norm": 0.5844133161497509, |
|
"learning_rate": 0.0009660000000000001, |
|
"loss": 5.9766, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 0.00323, |
|
"grad_norm": 0.5466606066369618, |
|
"learning_rate": 0.000969, |
|
"loss": 5.9736, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 0.00324, |
|
"grad_norm": 0.563270781105711, |
|
"learning_rate": 0.0009720000000000001, |
|
"loss": 5.9778, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 0.00325, |
|
"grad_norm": 0.5312990845923178, |
|
"learning_rate": 0.0009750000000000001, |
|
"loss": 5.9405, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.00326, |
|
"grad_norm": 0.5118566622058196, |
|
"learning_rate": 0.0009780000000000001, |
|
"loss": 5.9566, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 0.00327, |
|
"grad_norm": 0.5259315695578027, |
|
"learning_rate": 0.000981, |
|
"loss": 5.9568, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 0.00328, |
|
"grad_norm": 0.5410551164101933, |
|
"learning_rate": 0.000984, |
|
"loss": 5.9324, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 0.00329, |
|
"grad_norm": 0.48301414107035934, |
|
"learning_rate": 0.000987, |
|
"loss": 5.931, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 0.0033, |
|
"grad_norm": 0.5975532498257996, |
|
"learning_rate": 0.00099, |
|
"loss": 5.9265, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.00331, |
|
"grad_norm": 0.9746373555768076, |
|
"learning_rate": 0.0009930000000000002, |
|
"loss": 5.9381, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 0.00332, |
|
"grad_norm": 1.1994973147641799, |
|
"learning_rate": 0.0009960000000000001, |
|
"loss": 5.9385, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 0.00333, |
|
"grad_norm": 0.5144992648158865, |
|
"learning_rate": 0.000999, |
|
"loss": 5.8989, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 0.00334, |
|
"grad_norm": 0.9073672400240862, |
|
"learning_rate": 0.001002, |
|
"loss": 5.9262, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 0.00335, |
|
"grad_norm": 0.7561451103694465, |
|
"learning_rate": 0.001005, |
|
"loss": 5.9186, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.00336, |
|
"grad_norm": 0.7872757919528415, |
|
"learning_rate": 0.001008, |
|
"loss": 5.9134, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 0.00337, |
|
"grad_norm": 0.7536020827923614, |
|
"learning_rate": 0.0010110000000000002, |
|
"loss": 5.8884, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 0.00338, |
|
"grad_norm": 1.0523353883962923, |
|
"learning_rate": 0.0010140000000000001, |
|
"loss": 5.9132, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 0.00339, |
|
"grad_norm": 1.2857238182949966, |
|
"learning_rate": 0.0010170000000000001, |
|
"loss": 5.9334, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 0.0034, |
|
"grad_norm": 0.7361708327689132, |
|
"learning_rate": 0.00102, |
|
"loss": 5.885, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.00341, |
|
"grad_norm": 0.6901997441262301, |
|
"learning_rate": 0.001023, |
|
"loss": 5.869, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 0.00342, |
|
"grad_norm": 0.6381033550571615, |
|
"learning_rate": 0.001026, |
|
"loss": 5.8702, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 0.00343, |
|
"grad_norm": 0.6071718978500397, |
|
"learning_rate": 0.0010290000000000002, |
|
"loss": 5.8743, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 0.00344, |
|
"grad_norm": 0.5857796625429044, |
|
"learning_rate": 0.001032, |
|
"loss": 5.861, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 0.00345, |
|
"grad_norm": 0.626640702848716, |
|
"learning_rate": 0.001035, |
|
"loss": 5.8537, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.00346, |
|
"grad_norm": 0.6755670022907736, |
|
"learning_rate": 0.0010379999999999999, |
|
"loss": 5.8603, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 0.00347, |
|
"grad_norm": 0.9144508249400731, |
|
"learning_rate": 0.001041, |
|
"loss": 5.8338, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 0.00348, |
|
"grad_norm": 1.2125876856754099, |
|
"learning_rate": 0.001044, |
|
"loss": 5.8634, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 0.00349, |
|
"grad_norm": 0.6928695941460523, |
|
"learning_rate": 0.001047, |
|
"loss": 5.8236, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 0.0035, |
|
"grad_norm": 0.7654262923967496, |
|
"learning_rate": 0.00105, |
|
"loss": 5.8502, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.00351, |
|
"grad_norm": 0.8300223804260752, |
|
"learning_rate": 0.001053, |
|
"loss": 5.8507, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 0.00352, |
|
"grad_norm": 1.1393832643973667, |
|
"learning_rate": 0.0010559999999999999, |
|
"loss": 5.841, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 0.00353, |
|
"grad_norm": 0.7670875434573843, |
|
"learning_rate": 0.001059, |
|
"loss": 5.8302, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 0.00354, |
|
"grad_norm": 0.8617169684849714, |
|
"learning_rate": 0.001062, |
|
"loss": 5.8072, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 0.00355, |
|
"grad_norm": 0.8787230305505044, |
|
"learning_rate": 0.001065, |
|
"loss": 5.8221, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.00356, |
|
"grad_norm": 0.9037602020080988, |
|
"learning_rate": 0.001068, |
|
"loss": 5.8164, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 0.00357, |
|
"grad_norm": 0.785887699185926, |
|
"learning_rate": 0.001071, |
|
"loss": 5.8055, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 0.00358, |
|
"grad_norm": 0.6152742029666318, |
|
"learning_rate": 0.001074, |
|
"loss": 5.7887, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 0.00359, |
|
"grad_norm": 0.5372063086433791, |
|
"learning_rate": 0.001077, |
|
"loss": 5.78, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 0.0036, |
|
"grad_norm": 0.5078257678271803, |
|
"learning_rate": 0.00108, |
|
"loss": 5.7825, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.00361, |
|
"grad_norm": 0.4885651334266738, |
|
"learning_rate": 0.001083, |
|
"loss": 5.7748, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 0.00362, |
|
"grad_norm": 0.5495429650143561, |
|
"learning_rate": 0.001086, |
|
"loss": 5.7596, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 0.00363, |
|
"grad_norm": 0.5626950540152672, |
|
"learning_rate": 0.001089, |
|
"loss": 5.7515, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 0.00364, |
|
"grad_norm": 0.6199658617744055, |
|
"learning_rate": 0.001092, |
|
"loss": 5.766, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 0.00365, |
|
"grad_norm": 0.7392438146286566, |
|
"learning_rate": 0.001095, |
|
"loss": 5.7655, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.00366, |
|
"grad_norm": 0.9864875055616179, |
|
"learning_rate": 0.001098, |
|
"loss": 5.7524, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 0.00367, |
|
"grad_norm": 1.1449768044417052, |
|
"learning_rate": 0.001101, |
|
"loss": 5.7648, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 0.00368, |
|
"grad_norm": 0.9013400729864322, |
|
"learning_rate": 0.001104, |
|
"loss": 5.755, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 0.00369, |
|
"grad_norm": 0.9093377711089596, |
|
"learning_rate": 0.001107, |
|
"loss": 5.7659, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 0.0037, |
|
"grad_norm": 0.575923493278037, |
|
"learning_rate": 0.00111, |
|
"loss": 5.7328, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.00371, |
|
"grad_norm": 0.6737016310188764, |
|
"learning_rate": 0.001113, |
|
"loss": 5.7102, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 0.00372, |
|
"grad_norm": 0.4833347808689347, |
|
"learning_rate": 0.001116, |
|
"loss": 5.7236, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 0.00373, |
|
"grad_norm": 0.6361357392920576, |
|
"learning_rate": 0.001119, |
|
"loss": 5.7181, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 0.00374, |
|
"grad_norm": 0.6286357986456886, |
|
"learning_rate": 0.001122, |
|
"loss": 5.7192, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 0.00375, |
|
"grad_norm": 0.7140127832546589, |
|
"learning_rate": 0.0011250000000000001, |
|
"loss": 5.7248, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.00376, |
|
"grad_norm": 0.728891228424708, |
|
"learning_rate": 0.001128, |
|
"loss": 5.7207, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 0.00377, |
|
"grad_norm": 0.7251122752592066, |
|
"learning_rate": 0.001131, |
|
"loss": 5.7141, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 0.00378, |
|
"grad_norm": 0.8109517942362439, |
|
"learning_rate": 0.001134, |
|
"loss": 5.7121, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 0.00379, |
|
"grad_norm": 0.7548765882892476, |
|
"learning_rate": 0.001137, |
|
"loss": 5.6981, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 0.0038, |
|
"grad_norm": 0.5982490555873449, |
|
"learning_rate": 0.00114, |
|
"loss": 5.7128, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.00381, |
|
"grad_norm": 0.5479723067602768, |
|
"learning_rate": 0.0011430000000000001, |
|
"loss": 5.6793, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 0.00382, |
|
"grad_norm": 0.5400365110175976, |
|
"learning_rate": 0.001146, |
|
"loss": 5.6631, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 0.00383, |
|
"grad_norm": 0.4406698702316126, |
|
"learning_rate": 0.001149, |
|
"loss": 5.673, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 0.00384, |
|
"grad_norm": 0.5495584711003424, |
|
"learning_rate": 0.001152, |
|
"loss": 5.6782, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 0.00385, |
|
"grad_norm": 0.7047837665038742, |
|
"learning_rate": 0.001155, |
|
"loss": 5.6686, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.00386, |
|
"grad_norm": 1.0039450355838517, |
|
"learning_rate": 0.001158, |
|
"loss": 5.6846, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 0.00387, |
|
"grad_norm": 1.2552299335364856, |
|
"learning_rate": 0.0011610000000000001, |
|
"loss": 5.6713, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 0.00388, |
|
"grad_norm": 1.0630057286422998, |
|
"learning_rate": 0.001164, |
|
"loss": 5.6615, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 0.00389, |
|
"grad_norm": 1.4085777330550793, |
|
"learning_rate": 0.001167, |
|
"loss": 5.6963, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 0.0039, |
|
"grad_norm": 0.7893066659624004, |
|
"learning_rate": 0.00117, |
|
"loss": 5.651, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.00391, |
|
"grad_norm": 0.8891270576556106, |
|
"learning_rate": 0.001173, |
|
"loss": 5.666, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 0.00392, |
|
"grad_norm": 1.1058013192110903, |
|
"learning_rate": 0.001176, |
|
"loss": 5.6796, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 0.00393, |
|
"grad_norm": 1.170614508410806, |
|
"learning_rate": 0.0011790000000000001, |
|
"loss": 5.6646, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 0.00394, |
|
"grad_norm": 0.8391276502601887, |
|
"learning_rate": 0.001182, |
|
"loss": 5.6402, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 0.00395, |
|
"grad_norm": 0.9435882620236007, |
|
"learning_rate": 0.001185, |
|
"loss": 5.6277, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.00396, |
|
"grad_norm": 0.7925001626557522, |
|
"learning_rate": 0.001188, |
|
"loss": 5.6404, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 0.00397, |
|
"grad_norm": 0.8633162203152536, |
|
"learning_rate": 0.001191, |
|
"loss": 5.6366, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 0.00398, |
|
"grad_norm": 0.9359127674730449, |
|
"learning_rate": 0.0011940000000000002, |
|
"loss": 5.6437, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 0.00399, |
|
"grad_norm": 1.0926478209626875, |
|
"learning_rate": 0.0011970000000000001, |
|
"loss": 5.6494, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 0.004, |
|
"grad_norm": 0.8943926064407558, |
|
"learning_rate": 0.0012000000000000001, |
|
"loss": 5.6306, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.00401, |
|
"grad_norm": 1.211025202532141, |
|
"learning_rate": 0.001203, |
|
"loss": 5.6241, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 0.00402, |
|
"grad_norm": 0.8585006093020132, |
|
"learning_rate": 0.001206, |
|
"loss": 5.6196, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 0.00403, |
|
"grad_norm": 0.8708424012246267, |
|
"learning_rate": 0.001209, |
|
"loss": 5.618, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 0.00404, |
|
"grad_norm": 0.6771235232466043, |
|
"learning_rate": 0.0012120000000000002, |
|
"loss": 5.6057, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 0.00405, |
|
"grad_norm": 0.5089480196948696, |
|
"learning_rate": 0.0012150000000000002, |
|
"loss": 5.5986, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.00406, |
|
"grad_norm": 0.5324784457955185, |
|
"learning_rate": 0.0012180000000000001, |
|
"loss": 5.583, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 0.00407, |
|
"grad_norm": 0.4806328504890235, |
|
"learning_rate": 0.0012209999999999999, |
|
"loss": 5.575, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 0.00408, |
|
"grad_norm": 0.5340674298116082, |
|
"learning_rate": 0.001224, |
|
"loss": 5.5941, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 0.00409, |
|
"grad_norm": 0.6817510563164704, |
|
"learning_rate": 0.001227, |
|
"loss": 5.5739, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 0.0041, |
|
"grad_norm": 0.8230982603577015, |
|
"learning_rate": 0.00123, |
|
"loss": 5.5739, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.00411, |
|
"grad_norm": 0.7982833366881755, |
|
"learning_rate": 0.001233, |
|
"loss": 5.5886, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 0.00412, |
|
"grad_norm": 0.7882120301866252, |
|
"learning_rate": 0.001236, |
|
"loss": 5.5767, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 0.00413, |
|
"grad_norm": 0.9078160072473371, |
|
"learning_rate": 0.0012389999999999999, |
|
"loss": 5.5798, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 0.00414, |
|
"grad_norm": 0.8046291024914881, |
|
"learning_rate": 0.001242, |
|
"loss": 5.5582, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 0.00415, |
|
"grad_norm": 0.9449024826812693, |
|
"learning_rate": 0.001245, |
|
"loss": 5.5633, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.00416, |
|
"grad_norm": 0.9578358959807691, |
|
"learning_rate": 0.001248, |
|
"loss": 5.5489, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 0.00417, |
|
"grad_norm": 0.7364680005190741, |
|
"learning_rate": 0.001251, |
|
"loss": 5.575, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 0.00418, |
|
"grad_norm": 0.5600093653837771, |
|
"learning_rate": 0.001254, |
|
"loss": 5.5419, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 0.00419, |
|
"grad_norm": 0.7369458002937045, |
|
"learning_rate": 0.0012569999999999999, |
|
"loss": 5.535, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 0.0042, |
|
"grad_norm": 0.7566412883958042, |
|
"learning_rate": 0.00126, |
|
"loss": 5.5478, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.00421, |
|
"grad_norm": 0.9341471688658377, |
|
"learning_rate": 0.001263, |
|
"loss": 5.5468, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 0.00422, |
|
"grad_norm": 0.9387048270351058, |
|
"learning_rate": 0.001266, |
|
"loss": 5.5395, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 0.00423, |
|
"grad_norm": 0.738543672170714, |
|
"learning_rate": 0.001269, |
|
"loss": 5.5309, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 0.00424, |
|
"grad_norm": 0.879163854006119, |
|
"learning_rate": 0.001272, |
|
"loss": 5.5379, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 0.00425, |
|
"grad_norm": 0.878245832078137, |
|
"learning_rate": 0.001275, |
|
"loss": 5.5393, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.00426, |
|
"grad_norm": 0.8393572375675296, |
|
"learning_rate": 0.001278, |
|
"loss": 5.5388, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 0.00427, |
|
"grad_norm": 0.8175993205302655, |
|
"learning_rate": 0.001281, |
|
"loss": 5.5188, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 0.00428, |
|
"grad_norm": 0.8492227718152501, |
|
"learning_rate": 0.001284, |
|
"loss": 5.5011, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 0.00429, |
|
"grad_norm": 0.8455500448937461, |
|
"learning_rate": 0.001287, |
|
"loss": 5.5167, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 0.0043, |
|
"grad_norm": 0.9588196540360735, |
|
"learning_rate": 0.00129, |
|
"loss": 5.5126, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.00431, |
|
"grad_norm": 1.0358439149859766, |
|
"learning_rate": 0.001293, |
|
"loss": 5.5121, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 0.00432, |
|
"grad_norm": 0.853137595287236, |
|
"learning_rate": 0.001296, |
|
"loss": 5.5152, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 0.00433, |
|
"grad_norm": 0.9144896540159448, |
|
"learning_rate": 0.001299, |
|
"loss": 5.5075, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 0.00434, |
|
"grad_norm": 1.0340397416077374, |
|
"learning_rate": 0.001302, |
|
"loss": 5.5131, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 0.00435, |
|
"grad_norm": 1.1136200661191735, |
|
"learning_rate": 0.001305, |
|
"loss": 5.5153, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.00436, |
|
"grad_norm": 0.7998503424321469, |
|
"learning_rate": 0.001308, |
|
"loss": 5.4814, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 0.00437, |
|
"grad_norm": 0.8862208467810537, |
|
"learning_rate": 0.001311, |
|
"loss": 5.5052, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 0.00438, |
|
"grad_norm": 0.85557749799579, |
|
"learning_rate": 0.001314, |
|
"loss": 5.4855, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 0.00439, |
|
"grad_norm": 0.6596001138977952, |
|
"learning_rate": 0.001317, |
|
"loss": 5.5056, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 0.0044, |
|
"grad_norm": 0.5461926920380444, |
|
"learning_rate": 0.00132, |
|
"loss": 5.4734, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.00441, |
|
"grad_norm": 0.5325344576484976, |
|
"learning_rate": 0.001323, |
|
"loss": 5.4692, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 0.00442, |
|
"grad_norm": 0.46029396349038315, |
|
"learning_rate": 0.0013260000000000001, |
|
"loss": 5.4603, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 0.00443, |
|
"grad_norm": 0.5200620875251907, |
|
"learning_rate": 0.001329, |
|
"loss": 5.4641, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 0.00444, |
|
"grad_norm": 0.511034817927936, |
|
"learning_rate": 0.001332, |
|
"loss": 5.4632, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 0.00445, |
|
"grad_norm": 0.61375364791033, |
|
"learning_rate": 0.001335, |
|
"loss": 5.4483, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.00446, |
|
"grad_norm": 0.7540282970336214, |
|
"learning_rate": 0.001338, |
|
"loss": 5.4549, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 0.00447, |
|
"grad_norm": 0.7743861790351634, |
|
"learning_rate": 0.001341, |
|
"loss": 5.456, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 0.00448, |
|
"grad_norm": 0.6949785247448689, |
|
"learning_rate": 0.0013440000000000001, |
|
"loss": 5.4375, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 0.00449, |
|
"grad_norm": 0.8972954522362333, |
|
"learning_rate": 0.001347, |
|
"loss": 5.4453, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 0.0045, |
|
"grad_norm": 1.0136292885280909, |
|
"learning_rate": 0.00135, |
|
"loss": 5.4524, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.00451, |
|
"grad_norm": 0.7959348815359711, |
|
"learning_rate": 0.001353, |
|
"loss": 5.4372, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 0.00452, |
|
"grad_norm": 0.750530581913797, |
|
"learning_rate": 0.001356, |
|
"loss": 5.4212, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 0.00453, |
|
"grad_norm": 0.718332283553841, |
|
"learning_rate": 0.001359, |
|
"loss": 5.4094, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 0.00454, |
|
"grad_norm": 0.8243339574967999, |
|
"learning_rate": 0.0013620000000000001, |
|
"loss": 5.4327, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 0.00455, |
|
"grad_norm": 0.8060545663764288, |
|
"learning_rate": 0.0013650000000000001, |
|
"loss": 5.4278, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.00456, |
|
"grad_norm": 0.9387057405661987, |
|
"learning_rate": 0.001368, |
|
"loss": 5.4287, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 0.00457, |
|
"grad_norm": 1.110172512819111, |
|
"learning_rate": 0.001371, |
|
"loss": 5.4304, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 0.00458, |
|
"grad_norm": 0.7485802071411273, |
|
"learning_rate": 0.001374, |
|
"loss": 5.4279, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 0.00459, |
|
"grad_norm": 0.846395295484429, |
|
"learning_rate": 0.0013770000000000002, |
|
"loss": 5.4177, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 0.0046, |
|
"grad_norm": 1.2095188964594632, |
|
"learning_rate": 0.0013800000000000002, |
|
"loss": 5.4166, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.00461, |
|
"grad_norm": 1.1548058188436976, |
|
"learning_rate": 0.0013830000000000001, |
|
"loss": 5.417, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 0.00462, |
|
"grad_norm": 0.9626057997692408, |
|
"learning_rate": 0.001386, |
|
"loss": 5.4177, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 0.00463, |
|
"grad_norm": 1.1365427244526745, |
|
"learning_rate": 0.001389, |
|
"loss": 5.4083, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 0.00464, |
|
"grad_norm": 0.7154214826672701, |
|
"learning_rate": 0.001392, |
|
"loss": 5.4147, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 0.00465, |
|
"grad_norm": 0.5933778225768791, |
|
"learning_rate": 0.0013950000000000002, |
|
"loss": 5.3806, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.00466, |
|
"grad_norm": 0.6213055581786315, |
|
"learning_rate": 0.0013980000000000002, |
|
"loss": 5.3973, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 0.00467, |
|
"grad_norm": 0.5608640811659587, |
|
"learning_rate": 0.0014010000000000001, |
|
"loss": 5.3871, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 0.00468, |
|
"grad_norm": 0.4459725756410885, |
|
"learning_rate": 0.001404, |
|
"loss": 5.3713, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 0.00469, |
|
"grad_norm": 0.46857319789964524, |
|
"learning_rate": 0.001407, |
|
"loss": 5.3733, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 0.0047, |
|
"grad_norm": 0.4864537455422831, |
|
"learning_rate": 0.00141, |
|
"loss": 5.3823, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.00471, |
|
"grad_norm": 0.5233417273033707, |
|
"learning_rate": 0.001413, |
|
"loss": 5.3595, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 0.00472, |
|
"grad_norm": 0.7276814872840428, |
|
"learning_rate": 0.001416, |
|
"loss": 5.376, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 0.00473, |
|
"grad_norm": 0.9313958457119089, |
|
"learning_rate": 0.001419, |
|
"loss": 5.3908, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 0.00474, |
|
"grad_norm": 0.9969581851520253, |
|
"learning_rate": 0.0014219999999999999, |
|
"loss": 5.3782, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 0.00475, |
|
"grad_norm": 0.7172709684261298, |
|
"learning_rate": 0.001425, |
|
"loss": 5.3626, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.00476, |
|
"grad_norm": 0.749267541315669, |
|
"learning_rate": 0.001428, |
|
"loss": 5.395, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 0.00477, |
|
"grad_norm": 0.7586220730764037, |
|
"learning_rate": 0.001431, |
|
"loss": 5.35, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 0.00478, |
|
"grad_norm": 0.8456343691773762, |
|
"learning_rate": 0.001434, |
|
"loss": 5.378, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 0.00479, |
|
"grad_norm": 1.0643326937248148, |
|
"learning_rate": 0.001437, |
|
"loss": 5.3628, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 0.0048, |
|
"grad_norm": 0.9414664669888669, |
|
"learning_rate": 0.0014399999999999999, |
|
"loss": 5.3552, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.00481, |
|
"grad_norm": 0.9003358375425434, |
|
"learning_rate": 0.001443, |
|
"loss": 5.3593, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 0.00482, |
|
"grad_norm": 0.8978331557974801, |
|
"learning_rate": 0.001446, |
|
"loss": 5.3398, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 0.00483, |
|
"grad_norm": 1.1289938883763697, |
|
"learning_rate": 0.001449, |
|
"loss": 5.3578, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 0.00484, |
|
"grad_norm": 1.1013965257300222, |
|
"learning_rate": 0.001452, |
|
"loss": 5.3611, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 0.00485, |
|
"grad_norm": 0.9590590729365723, |
|
"learning_rate": 0.001455, |
|
"loss": 5.3653, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.00486, |
|
"grad_norm": 0.8987758447866343, |
|
"learning_rate": 0.001458, |
|
"loss": 5.3363, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 0.00487, |
|
"grad_norm": 1.1924974582473045, |
|
"learning_rate": 0.001461, |
|
"loss": 5.3676, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 0.00488, |
|
"grad_norm": 0.945268328759551, |
|
"learning_rate": 0.001464, |
|
"loss": 5.3393, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 0.00489, |
|
"grad_norm": 0.9295239913179795, |
|
"learning_rate": 0.001467, |
|
"loss": 5.3304, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 0.0049, |
|
"grad_norm": 0.8440135575742712, |
|
"learning_rate": 0.00147, |
|
"loss": 5.3401, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.00491, |
|
"grad_norm": 0.8518595255895461, |
|
"learning_rate": 0.001473, |
|
"loss": 5.3387, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 0.00492, |
|
"grad_norm": 0.8521564820940866, |
|
"learning_rate": 0.001476, |
|
"loss": 5.3306, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 0.00493, |
|
"grad_norm": 1.037786125041147, |
|
"learning_rate": 0.001479, |
|
"loss": 5.337, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 0.00494, |
|
"grad_norm": 0.8602402216970677, |
|
"learning_rate": 0.001482, |
|
"loss": 5.3365, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 0.00495, |
|
"grad_norm": 0.833325441881814, |
|
"learning_rate": 0.001485, |
|
"loss": 5.3402, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.00496, |
|
"grad_norm": 0.7919536178225737, |
|
"learning_rate": 0.001488, |
|
"loss": 5.324, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 0.00497, |
|
"grad_norm": 0.873050532284563, |
|
"learning_rate": 0.001491, |
|
"loss": 5.3076, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 0.00498, |
|
"grad_norm": 0.8401174504098841, |
|
"learning_rate": 0.001494, |
|
"loss": 5.3331, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 0.00499, |
|
"grad_norm": 0.7077904347820982, |
|
"learning_rate": 0.001497, |
|
"loss": 5.3023, |
|
"step": 499 |
|
}, |
|
{ |
|
"epoch": 0.005, |
|
"grad_norm": 0.722193970534152, |
|
"learning_rate": 0.0015, |
|
"loss": 5.3084, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.00501, |
|
"grad_norm": 0.6788524699363854, |
|
"learning_rate": 0.001503, |
|
"loss": 5.3068, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 0.00502, |
|
"grad_norm": 0.4688637134987383, |
|
"learning_rate": 0.001506, |
|
"loss": 5.3027, |
|
"step": 502 |
|
}, |
|
{ |
|
"epoch": 0.00503, |
|
"grad_norm": 0.5347885266184398, |
|
"learning_rate": 0.0015090000000000001, |
|
"loss": 5.3107, |
|
"step": 503 |
|
}, |
|
{ |
|
"epoch": 0.00504, |
|
"grad_norm": 0.4568589587316421, |
|
"learning_rate": 0.001512, |
|
"loss": 5.2883, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 0.00505, |
|
"grad_norm": 0.6059364079243326, |
|
"learning_rate": 0.001515, |
|
"loss": 5.2843, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.00506, |
|
"grad_norm": 0.5690871559871206, |
|
"learning_rate": 0.001518, |
|
"loss": 5.2813, |
|
"step": 506 |
|
}, |
|
{ |
|
"epoch": 0.00507, |
|
"grad_norm": 0.6408278920334604, |
|
"learning_rate": 0.001521, |
|
"loss": 5.279, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 0.00508, |
|
"grad_norm": 0.6374798421453328, |
|
"learning_rate": 0.001524, |
|
"loss": 5.2759, |
|
"step": 508 |
|
}, |
|
{ |
|
"epoch": 0.00509, |
|
"grad_norm": 0.5508180980448808, |
|
"learning_rate": 0.0015270000000000001, |
|
"loss": 5.2603, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 0.0051, |
|
"grad_norm": 0.4678264081626914, |
|
"learning_rate": 0.0015300000000000001, |
|
"loss": 5.2734, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.00511, |
|
"grad_norm": 0.4947375080326457, |
|
"learning_rate": 0.001533, |
|
"loss": 5.252, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 0.00512, |
|
"grad_norm": 0.4646141343190718, |
|
"learning_rate": 0.001536, |
|
"loss": 5.2454, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 0.00513, |
|
"grad_norm": 0.4614608859817223, |
|
"learning_rate": 0.001539, |
|
"loss": 5.2616, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 0.00514, |
|
"grad_norm": 0.47514560040822135, |
|
"learning_rate": 0.001542, |
|
"loss": 5.2462, |
|
"step": 514 |
|
}, |
|
{ |
|
"epoch": 0.00515, |
|
"grad_norm": 0.5516172292908152, |
|
"learning_rate": 0.0015450000000000001, |
|
"loss": 5.2495, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.00516, |
|
"grad_norm": 0.6762883843976203, |
|
"learning_rate": 0.0015480000000000001, |
|
"loss": 5.2514, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 0.00517, |
|
"grad_norm": 0.8220059659316783, |
|
"learning_rate": 0.001551, |
|
"loss": 5.2491, |
|
"step": 517 |
|
}, |
|
{ |
|
"epoch": 0.00518, |
|
"grad_norm": 0.8472303407674783, |
|
"learning_rate": 0.001554, |
|
"loss": 5.2384, |
|
"step": 518 |
|
}, |
|
{ |
|
"epoch": 0.00519, |
|
"grad_norm": 0.8352554081583162, |
|
"learning_rate": 0.001557, |
|
"loss": 5.2403, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 0.0052, |
|
"grad_norm": 0.9289527635802941, |
|
"learning_rate": 0.0015600000000000002, |
|
"loss": 5.2527, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.00521, |
|
"grad_norm": 1.112613217213008, |
|
"learning_rate": 0.0015630000000000002, |
|
"loss": 5.242, |
|
"step": 521 |
|
}, |
|
{ |
|
"epoch": 0.00522, |
|
"grad_norm": 0.9402831205824588, |
|
"learning_rate": 0.0015660000000000001, |
|
"loss": 5.2388, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 0.00523, |
|
"grad_norm": 1.103453237567065, |
|
"learning_rate": 0.001569, |
|
"loss": 5.2627, |
|
"step": 523 |
|
}, |
|
{ |
|
"epoch": 0.00524, |
|
"grad_norm": 1.0118007660209627, |
|
"learning_rate": 0.001572, |
|
"loss": 5.268, |
|
"step": 524 |
|
}, |
|
{ |
|
"epoch": 0.00525, |
|
"grad_norm": 0.9435955195760587, |
|
"learning_rate": 0.001575, |
|
"loss": 5.2606, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.00526, |
|
"grad_norm": 0.8731456366832249, |
|
"learning_rate": 0.0015780000000000002, |
|
"loss": 5.2355, |
|
"step": 526 |
|
}, |
|
{ |
|
"epoch": 0.00527, |
|
"grad_norm": 0.7556141048030679, |
|
"learning_rate": 0.0015810000000000002, |
|
"loss": 5.244, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 0.00528, |
|
"grad_norm": 0.8959181971092488, |
|
"learning_rate": 0.0015840000000000001, |
|
"loss": 5.2287, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 0.00529, |
|
"grad_norm": 1.10956087267792, |
|
"learning_rate": 0.001587, |
|
"loss": 5.2485, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 0.0053, |
|
"grad_norm": 0.7373688170406403, |
|
"learning_rate": 0.00159, |
|
"loss": 5.2473, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.00531, |
|
"grad_norm": 0.9440929198745536, |
|
"learning_rate": 0.001593, |
|
"loss": 5.2364, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 0.00532, |
|
"grad_norm": 1.2761807884581948, |
|
"learning_rate": 0.0015960000000000002, |
|
"loss": 5.2627, |
|
"step": 532 |
|
}, |
|
{ |
|
"epoch": 0.00533, |
|
"grad_norm": 0.7254918561906474, |
|
"learning_rate": 0.0015990000000000002, |
|
"loss": 5.2411, |
|
"step": 533 |
|
}, |
|
{ |
|
"epoch": 0.00534, |
|
"grad_norm": 0.9458095799105631, |
|
"learning_rate": 0.0016020000000000001, |
|
"loss": 5.2364, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 0.00535, |
|
"grad_norm": 1.1518877149230409, |
|
"learning_rate": 0.001605, |
|
"loss": 5.2538, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.00536, |
|
"grad_norm": 1.0003795894462209, |
|
"learning_rate": 0.001608, |
|
"loss": 5.2516, |
|
"step": 536 |
|
}, |
|
{ |
|
"epoch": 0.00537, |
|
"grad_norm": 1.0837750602504435, |
|
"learning_rate": 0.0016110000000000002, |
|
"loss": 5.245, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 0.00538, |
|
"grad_norm": 0.8341741073056014, |
|
"learning_rate": 0.0016140000000000002, |
|
"loss": 5.2215, |
|
"step": 538 |
|
}, |
|
{ |
|
"epoch": 0.00539, |
|
"grad_norm": 0.8405248729905289, |
|
"learning_rate": 0.0016170000000000002, |
|
"loss": 5.218, |
|
"step": 539 |
|
}, |
|
{ |
|
"epoch": 0.0054, |
|
"grad_norm": 1.0652634270364731, |
|
"learning_rate": 0.0016200000000000001, |
|
"loss": 5.2348, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.00541, |
|
"grad_norm": 1.02504417487003, |
|
"learning_rate": 0.001623, |
|
"loss": 5.2275, |
|
"step": 541 |
|
}, |
|
{ |
|
"epoch": 0.00542, |
|
"grad_norm": 1.0410836109463524, |
|
"learning_rate": 0.001626, |
|
"loss": 5.2461, |
|
"step": 542 |
|
}, |
|
{ |
|
"epoch": 0.00543, |
|
"grad_norm": 1.1820143116937272, |
|
"learning_rate": 0.0016290000000000002, |
|
"loss": 5.2251, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 0.00544, |
|
"grad_norm": 0.7766425096978492, |
|
"learning_rate": 0.0016320000000000002, |
|
"loss": 5.227, |
|
"step": 544 |
|
}, |
|
{ |
|
"epoch": 0.00545, |
|
"grad_norm": 0.7162190734488068, |
|
"learning_rate": 0.0016350000000000002, |
|
"loss": 5.2344, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.00546, |
|
"grad_norm": 0.6138609629035112, |
|
"learning_rate": 0.0016380000000000001, |
|
"loss": 5.1997, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 0.00547, |
|
"grad_norm": 0.5845964576550613, |
|
"learning_rate": 0.001641, |
|
"loss": 5.199, |
|
"step": 547 |
|
}, |
|
{ |
|
"epoch": 0.00548, |
|
"grad_norm": 0.5000045746748227, |
|
"learning_rate": 0.001644, |
|
"loss": 5.1903, |
|
"step": 548 |
|
}, |
|
{ |
|
"epoch": 0.00549, |
|
"grad_norm": 0.45001097530426093, |
|
"learning_rate": 0.0016470000000000002, |
|
"loss": 5.1848, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 0.0055, |
|
"grad_norm": 0.48852089918970865, |
|
"learning_rate": 0.0016500000000000002, |
|
"loss": 5.1966, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.00551, |
|
"grad_norm": 0.4504277526474317, |
|
"learning_rate": 0.0016530000000000002, |
|
"loss": 5.1901, |
|
"step": 551 |
|
}, |
|
{ |
|
"epoch": 0.00552, |
|
"grad_norm": 0.3849177997836439, |
|
"learning_rate": 0.0016560000000000001, |
|
"loss": 5.1829, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 0.00553, |
|
"grad_norm": 0.3648459807382568, |
|
"learning_rate": 0.001659, |
|
"loss": 5.1708, |
|
"step": 553 |
|
}, |
|
{ |
|
"epoch": 0.00554, |
|
"grad_norm": 0.36806425815097327, |
|
"learning_rate": 0.0016620000000000003, |
|
"loss": 5.1683, |
|
"step": 554 |
|
}, |
|
{ |
|
"epoch": 0.00555, |
|
"grad_norm": 0.399857849170916, |
|
"learning_rate": 0.0016650000000000002, |
|
"loss": 5.174, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.00556, |
|
"grad_norm": 0.5317181922092629, |
|
"learning_rate": 0.0016680000000000002, |
|
"loss": 5.1608, |
|
"step": 556 |
|
}, |
|
{ |
|
"epoch": 0.00557, |
|
"grad_norm": 0.7831777956168835, |
|
"learning_rate": 0.0016710000000000002, |
|
"loss": 5.1496, |
|
"step": 557 |
|
}, |
|
{ |
|
"epoch": 0.00558, |
|
"grad_norm": 1.0341253030675983, |
|
"learning_rate": 0.0016740000000000001, |
|
"loss": 5.1915, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 0.00559, |
|
"grad_norm": 0.9310990839840096, |
|
"learning_rate": 0.001677, |
|
"loss": 5.1724, |
|
"step": 559 |
|
}, |
|
{ |
|
"epoch": 0.0056, |
|
"grad_norm": 1.1320536331440925, |
|
"learning_rate": 0.0016800000000000003, |
|
"loss": 5.1553, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.00561, |
|
"grad_norm": 1.0798635762834115, |
|
"learning_rate": 0.0016830000000000003, |
|
"loss": 5.1888, |
|
"step": 561 |
|
}, |
|
{ |
|
"epoch": 0.00562, |
|
"grad_norm": 0.9955409946448898, |
|
"learning_rate": 0.0016860000000000002, |
|
"loss": 5.1785, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 0.00563, |
|
"grad_norm": 0.8046398619546209, |
|
"learning_rate": 0.001689, |
|
"loss": 5.1727, |
|
"step": 563 |
|
}, |
|
{ |
|
"epoch": 0.00564, |
|
"grad_norm": 0.8640806614647399, |
|
"learning_rate": 0.001692, |
|
"loss": 5.1564, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 0.00565, |
|
"grad_norm": 0.9094590673653183, |
|
"learning_rate": 0.001695, |
|
"loss": 5.1591, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.00566, |
|
"grad_norm": 1.0763832222921514, |
|
"learning_rate": 0.0016979999999999999, |
|
"loss": 5.1686, |
|
"step": 566 |
|
}, |
|
{ |
|
"epoch": 0.00567, |
|
"grad_norm": 0.9106969464088174, |
|
"learning_rate": 0.0017009999999999998, |
|
"loss": 5.1536, |
|
"step": 567 |
|
}, |
|
{ |
|
"epoch": 0.00568, |
|
"grad_norm": 1.0612496613421336, |
|
"learning_rate": 0.0017039999999999998, |
|
"loss": 5.175, |
|
"step": 568 |
|
}, |
|
{ |
|
"epoch": 0.00569, |
|
"grad_norm": 1.030428797640198, |
|
"learning_rate": 0.001707, |
|
"loss": 5.1588, |
|
"step": 569 |
|
}, |
|
{ |
|
"epoch": 0.0057, |
|
"grad_norm": 1.0168109110540526, |
|
"learning_rate": 0.00171, |
|
"loss": 5.1716, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.00571, |
|
"grad_norm": 1.1348044931505399, |
|
"learning_rate": 0.001713, |
|
"loss": 5.1856, |
|
"step": 571 |
|
}, |
|
{ |
|
"epoch": 0.00572, |
|
"grad_norm": 0.761146802779985, |
|
"learning_rate": 0.0017159999999999999, |
|
"loss": 5.1497, |
|
"step": 572 |
|
}, |
|
{ |
|
"epoch": 0.00573, |
|
"grad_norm": 0.7662650183514703, |
|
"learning_rate": 0.0017189999999999998, |
|
"loss": 5.1536, |
|
"step": 573 |
|
}, |
|
{ |
|
"epoch": 0.00574, |
|
"grad_norm": 0.8733185334950944, |
|
"learning_rate": 0.001722, |
|
"loss": 5.1641, |
|
"step": 574 |
|
}, |
|
{ |
|
"epoch": 0.00575, |
|
"grad_norm": 0.8629871927891676, |
|
"learning_rate": 0.001725, |
|
"loss": 5.142, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.00576, |
|
"grad_norm": 0.8467410686880783, |
|
"learning_rate": 0.001728, |
|
"loss": 5.137, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 0.00577, |
|
"grad_norm": 0.8007452144545169, |
|
"learning_rate": 0.001731, |
|
"loss": 5.1615, |
|
"step": 577 |
|
}, |
|
{ |
|
"epoch": 0.00578, |
|
"grad_norm": 0.6405300314411674, |
|
"learning_rate": 0.0017339999999999999, |
|
"loss": 5.1325, |
|
"step": 578 |
|
}, |
|
{ |
|
"epoch": 0.00579, |
|
"grad_norm": 0.70587507977218, |
|
"learning_rate": 0.0017369999999999998, |
|
"loss": 5.1208, |
|
"step": 579 |
|
}, |
|
{ |
|
"epoch": 0.0058, |
|
"grad_norm": 0.6985854652221088, |
|
"learning_rate": 0.00174, |
|
"loss": 5.1378, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.00581, |
|
"grad_norm": 0.6137529113435678, |
|
"learning_rate": 0.001743, |
|
"loss": 5.1348, |
|
"step": 581 |
|
}, |
|
{ |
|
"epoch": 0.00582, |
|
"grad_norm": 0.48264514730128444, |
|
"learning_rate": 0.001746, |
|
"loss": 5.1045, |
|
"step": 582 |
|
}, |
|
{ |
|
"epoch": 0.00583, |
|
"grad_norm": 0.5771367675923135, |
|
"learning_rate": 0.001749, |
|
"loss": 5.1293, |
|
"step": 583 |
|
}, |
|
{ |
|
"epoch": 0.00584, |
|
"grad_norm": 0.6936037332792736, |
|
"learning_rate": 0.0017519999999999999, |
|
"loss": 5.1229, |
|
"step": 584 |
|
}, |
|
{ |
|
"epoch": 0.00585, |
|
"grad_norm": 0.7595444862659105, |
|
"learning_rate": 0.0017549999999999998, |
|
"loss": 5.1095, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.00586, |
|
"grad_norm": 0.8605796861767604, |
|
"learning_rate": 0.001758, |
|
"loss": 5.1202, |
|
"step": 586 |
|
}, |
|
{ |
|
"epoch": 0.00587, |
|
"grad_norm": 0.907920304070859, |
|
"learning_rate": 0.001761, |
|
"loss": 5.1079, |
|
"step": 587 |
|
}, |
|
{ |
|
"epoch": 0.00588, |
|
"grad_norm": 0.9246177820411534, |
|
"learning_rate": 0.001764, |
|
"loss": 5.1163, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 0.00589, |
|
"grad_norm": 0.8704696950275989, |
|
"learning_rate": 0.001767, |
|
"loss": 5.1211, |
|
"step": 589 |
|
}, |
|
{ |
|
"epoch": 0.0059, |
|
"grad_norm": 0.7193563853719346, |
|
"learning_rate": 0.0017699999999999999, |
|
"loss": 5.101, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.00591, |
|
"grad_norm": 0.6949293332158505, |
|
"learning_rate": 0.001773, |
|
"loss": 5.0928, |
|
"step": 591 |
|
}, |
|
{ |
|
"epoch": 0.00592, |
|
"grad_norm": 0.654055845037553, |
|
"learning_rate": 0.001776, |
|
"loss": 5.1147, |
|
"step": 592 |
|
}, |
|
{ |
|
"epoch": 0.00593, |
|
"grad_norm": 0.7140132542710956, |
|
"learning_rate": 0.001779, |
|
"loss": 5.1161, |
|
"step": 593 |
|
}, |
|
{ |
|
"epoch": 0.00594, |
|
"grad_norm": 0.6270820258277058, |
|
"learning_rate": 0.001782, |
|
"loss": 5.107, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 0.00595, |
|
"grad_norm": 0.5627466921921568, |
|
"learning_rate": 0.001785, |
|
"loss": 5.0962, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.00596, |
|
"grad_norm": 0.6408060930916085, |
|
"learning_rate": 0.0017879999999999999, |
|
"loss": 5.0966, |
|
"step": 596 |
|
}, |
|
{ |
|
"epoch": 0.00597, |
|
"grad_norm": 0.7315483645065906, |
|
"learning_rate": 0.001791, |
|
"loss": 5.077, |
|
"step": 597 |
|
}, |
|
{ |
|
"epoch": 0.00598, |
|
"grad_norm": 0.7860917802127037, |
|
"learning_rate": 0.001794, |
|
"loss": 5.0917, |
|
"step": 598 |
|
}, |
|
{ |
|
"epoch": 0.00599, |
|
"grad_norm": 0.7396694082174577, |
|
"learning_rate": 0.001797, |
|
"loss": 5.0901, |
|
"step": 599 |
|
}, |
|
{ |
|
"epoch": 0.006, |
|
"grad_norm": 0.678011019966431, |
|
"learning_rate": 0.0018, |
|
"loss": 5.0859, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.00601, |
|
"grad_norm": 0.8039567544190375, |
|
"learning_rate": 0.001803, |
|
"loss": 5.0817, |
|
"step": 601 |
|
}, |
|
{ |
|
"epoch": 0.00602, |
|
"grad_norm": 0.8362811130198314, |
|
"learning_rate": 0.0018059999999999999, |
|
"loss": 5.0736, |
|
"step": 602 |
|
}, |
|
{ |
|
"epoch": 0.00603, |
|
"grad_norm": 0.7437701841167941, |
|
"learning_rate": 0.001809, |
|
"loss": 5.0897, |
|
"step": 603 |
|
}, |
|
{ |
|
"epoch": 0.00604, |
|
"grad_norm": 0.7865788629191941, |
|
"learning_rate": 0.001812, |
|
"loss": 5.0711, |
|
"step": 604 |
|
}, |
|
{ |
|
"epoch": 0.00605, |
|
"grad_norm": 0.7065527487675252, |
|
"learning_rate": 0.001815, |
|
"loss": 5.0798, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.00606, |
|
"grad_norm": 0.6576721805135337, |
|
"learning_rate": 0.001818, |
|
"loss": 5.0782, |
|
"step": 606 |
|
}, |
|
{ |
|
"epoch": 0.00607, |
|
"grad_norm": 0.6528064751937639, |
|
"learning_rate": 0.001821, |
|
"loss": 5.0831, |
|
"step": 607 |
|
}, |
|
{ |
|
"epoch": 0.00608, |
|
"grad_norm": 0.6182798778507802, |
|
"learning_rate": 0.001824, |
|
"loss": 5.0615, |
|
"step": 608 |
|
}, |
|
{ |
|
"epoch": 0.00609, |
|
"grad_norm": 0.8163285355074863, |
|
"learning_rate": 0.001827, |
|
"loss": 5.079, |
|
"step": 609 |
|
}, |
|
{ |
|
"epoch": 0.0061, |
|
"grad_norm": 0.795800426894062, |
|
"learning_rate": 0.00183, |
|
"loss": 5.053, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.00611, |
|
"grad_norm": 0.7510077277929101, |
|
"learning_rate": 0.001833, |
|
"loss": 5.0545, |
|
"step": 611 |
|
}, |
|
{ |
|
"epoch": 0.00612, |
|
"grad_norm": 0.7863529213037952, |
|
"learning_rate": 0.001836, |
|
"loss": 5.0625, |
|
"step": 612 |
|
}, |
|
{ |
|
"epoch": 0.00613, |
|
"grad_norm": 0.718541559578873, |
|
"learning_rate": 0.001839, |
|
"loss": 5.0526, |
|
"step": 613 |
|
}, |
|
{ |
|
"epoch": 0.00614, |
|
"grad_norm": 0.648240484004962, |
|
"learning_rate": 0.001842, |
|
"loss": 5.0729, |
|
"step": 614 |
|
}, |
|
{ |
|
"epoch": 0.00615, |
|
"grad_norm": 0.6572100797643513, |
|
"learning_rate": 0.001845, |
|
"loss": 5.0607, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.00616, |
|
"grad_norm": 0.598653088729871, |
|
"learning_rate": 0.001848, |
|
"loss": 5.0431, |
|
"step": 616 |
|
}, |
|
{ |
|
"epoch": 0.00617, |
|
"grad_norm": 0.6867319038647199, |
|
"learning_rate": 0.001851, |
|
"loss": 5.058, |
|
"step": 617 |
|
}, |
|
{ |
|
"epoch": 0.00618, |
|
"grad_norm": 0.8065427766951909, |
|
"learning_rate": 0.001854, |
|
"loss": 5.0471, |
|
"step": 618 |
|
}, |
|
{ |
|
"epoch": 0.00619, |
|
"grad_norm": 0.952336607614539, |
|
"learning_rate": 0.001857, |
|
"loss": 5.0614, |
|
"step": 619 |
|
}, |
|
{ |
|
"epoch": 0.0062, |
|
"grad_norm": 0.937144329881144, |
|
"learning_rate": 0.00186, |
|
"loss": 5.0503, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.00621, |
|
"grad_norm": 0.861992409216581, |
|
"learning_rate": 0.001863, |
|
"loss": 5.0519, |
|
"step": 621 |
|
}, |
|
{ |
|
"epoch": 0.00622, |
|
"grad_norm": 0.7937303039626423, |
|
"learning_rate": 0.001866, |
|
"loss": 5.046, |
|
"step": 622 |
|
}, |
|
{ |
|
"epoch": 0.00623, |
|
"grad_norm": 0.7729108287648654, |
|
"learning_rate": 0.001869, |
|
"loss": 5.0315, |
|
"step": 623 |
|
}, |
|
{ |
|
"epoch": 0.00624, |
|
"grad_norm": 0.7644499866680247, |
|
"learning_rate": 0.001872, |
|
"loss": 5.0449, |
|
"step": 624 |
|
}, |
|
{ |
|
"epoch": 0.00625, |
|
"grad_norm": 0.833886296843224, |
|
"learning_rate": 0.001875, |
|
"loss": 5.0598, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.00626, |
|
"grad_norm": 0.8372671439007264, |
|
"learning_rate": 0.0018780000000000001, |
|
"loss": 5.0351, |
|
"step": 626 |
|
}, |
|
{ |
|
"epoch": 0.00627, |
|
"grad_norm": 0.8603722936304884, |
|
"learning_rate": 0.001881, |
|
"loss": 5.0306, |
|
"step": 627 |
|
}, |
|
{ |
|
"epoch": 0.00628, |
|
"grad_norm": 0.7718433014027412, |
|
"learning_rate": 0.001884, |
|
"loss": 5.0548, |
|
"step": 628 |
|
}, |
|
{ |
|
"epoch": 0.00629, |
|
"grad_norm": 0.6787638881219394, |
|
"learning_rate": 0.001887, |
|
"loss": 5.0263, |
|
"step": 629 |
|
}, |
|
{ |
|
"epoch": 0.0063, |
|
"grad_norm": 0.6841857077673691, |
|
"learning_rate": 0.00189, |
|
"loss": 5.0201, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.00631, |
|
"grad_norm": 0.7297958952205305, |
|
"learning_rate": 0.0018930000000000002, |
|
"loss": 5.0163, |
|
"step": 631 |
|
}, |
|
{ |
|
"epoch": 0.00632, |
|
"grad_norm": 0.8611511166687711, |
|
"learning_rate": 0.0018960000000000001, |
|
"loss": 5.0388, |
|
"step": 632 |
|
}, |
|
{ |
|
"epoch": 0.00633, |
|
"grad_norm": 0.9514927337678543, |
|
"learning_rate": 0.001899, |
|
"loss": 5.0438, |
|
"step": 633 |
|
}, |
|
{ |
|
"epoch": 0.00634, |
|
"grad_norm": 0.8956137982105657, |
|
"learning_rate": 0.001902, |
|
"loss": 5.031, |
|
"step": 634 |
|
}, |
|
{ |
|
"epoch": 0.00635, |
|
"grad_norm": 0.952460918530188, |
|
"learning_rate": 0.001905, |
|
"loss": 5.0218, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.00636, |
|
"grad_norm": 1.0747774477703274, |
|
"learning_rate": 0.001908, |
|
"loss": 5.0402, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 0.00637, |
|
"grad_norm": 0.8336825219104492, |
|
"learning_rate": 0.0019110000000000002, |
|
"loss": 5.0466, |
|
"step": 637 |
|
}, |
|
{ |
|
"epoch": 0.00638, |
|
"grad_norm": 0.8986808700713379, |
|
"learning_rate": 0.0019140000000000001, |
|
"loss": 5.0353, |
|
"step": 638 |
|
}, |
|
{ |
|
"epoch": 0.00639, |
|
"grad_norm": 1.0369332378334974, |
|
"learning_rate": 0.001917, |
|
"loss": 5.0213, |
|
"step": 639 |
|
}, |
|
{ |
|
"epoch": 0.0064, |
|
"grad_norm": 1.0511699002679498, |
|
"learning_rate": 0.00192, |
|
"loss": 5.0287, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.00641, |
|
"grad_norm": 1.0967175650217074, |
|
"learning_rate": 0.001923, |
|
"loss": 5.0426, |
|
"step": 641 |
|
}, |
|
{ |
|
"epoch": 0.00642, |
|
"grad_norm": 0.9933027226251017, |
|
"learning_rate": 0.001926, |
|
"loss": 5.0352, |
|
"step": 642 |
|
}, |
|
{ |
|
"epoch": 0.00643, |
|
"grad_norm": 0.9768777735178511, |
|
"learning_rate": 0.0019290000000000002, |
|
"loss": 5.0523, |
|
"step": 643 |
|
}, |
|
{ |
|
"epoch": 0.00644, |
|
"grad_norm": 1.001060805060802, |
|
"learning_rate": 0.0019320000000000001, |
|
"loss": 5.0363, |
|
"step": 644 |
|
}, |
|
{ |
|
"epoch": 0.00645, |
|
"grad_norm": 1.0724455418770003, |
|
"learning_rate": 0.001935, |
|
"loss": 5.0522, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.00646, |
|
"grad_norm": 1.1074452374796593, |
|
"learning_rate": 0.001938, |
|
"loss": 5.0611, |
|
"step": 646 |
|
}, |
|
{ |
|
"epoch": 0.00647, |
|
"grad_norm": 0.9675974316517354, |
|
"learning_rate": 0.001941, |
|
"loss": 5.062, |
|
"step": 647 |
|
}, |
|
{ |
|
"epoch": 0.00648, |
|
"grad_norm": 0.7269750089664031, |
|
"learning_rate": 0.0019440000000000002, |
|
"loss": 5.0382, |
|
"step": 648 |
|
}, |
|
{ |
|
"epoch": 0.00649, |
|
"grad_norm": 0.6773093371497811, |
|
"learning_rate": 0.0019470000000000002, |
|
"loss": 5.0209, |
|
"step": 649 |
|
}, |
|
{ |
|
"epoch": 0.0065, |
|
"grad_norm": 0.6721153823123059, |
|
"learning_rate": 0.0019500000000000001, |
|
"loss": 5.0458, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.00651, |
|
"grad_norm": 0.6257734701697928, |
|
"learning_rate": 0.001953, |
|
"loss": 5.022, |
|
"step": 651 |
|
}, |
|
{ |
|
"epoch": 0.00652, |
|
"grad_norm": 0.6412471052264174, |
|
"learning_rate": 0.0019560000000000003, |
|
"loss": 5.0157, |
|
"step": 652 |
|
}, |
|
{ |
|
"epoch": 0.00653, |
|
"grad_norm": 0.7030358108381808, |
|
"learning_rate": 0.0019590000000000002, |
|
"loss": 5.0108, |
|
"step": 653 |
|
}, |
|
{ |
|
"epoch": 0.00654, |
|
"grad_norm": 0.6417272703555769, |
|
"learning_rate": 0.001962, |
|
"loss": 4.994, |
|
"step": 654 |
|
}, |
|
{ |
|
"epoch": 0.00655, |
|
"grad_norm": 0.6123939930365004, |
|
"learning_rate": 0.001965, |
|
"loss": 5.0066, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.00656, |
|
"grad_norm": 0.6761646659152818, |
|
"learning_rate": 0.001968, |
|
"loss": 4.9975, |
|
"step": 656 |
|
}, |
|
{ |
|
"epoch": 0.00657, |
|
"grad_norm": 0.9015512681716132, |
|
"learning_rate": 0.001971, |
|
"loss": 4.9965, |
|
"step": 657 |
|
}, |
|
{ |
|
"epoch": 0.00658, |
|
"grad_norm": 1.1070883554163469, |
|
"learning_rate": 0.001974, |
|
"loss": 5.032, |
|
"step": 658 |
|
}, |
|
{ |
|
"epoch": 0.00659, |
|
"grad_norm": 0.8166390486888542, |
|
"learning_rate": 0.001977, |
|
"loss": 5.0022, |
|
"step": 659 |
|
}, |
|
{ |
|
"epoch": 0.0066, |
|
"grad_norm": 0.7886404692657704, |
|
"learning_rate": 0.00198, |
|
"loss": 4.9952, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.00661, |
|
"grad_norm": 0.6930157053874706, |
|
"learning_rate": 0.001983, |
|
"loss": 4.9751, |
|
"step": 661 |
|
}, |
|
{ |
|
"epoch": 0.00662, |
|
"grad_norm": 0.6021137246671374, |
|
"learning_rate": 0.0019860000000000004, |
|
"loss": 4.9738, |
|
"step": 662 |
|
}, |
|
{ |
|
"epoch": 0.00663, |
|
"grad_norm": 0.5342549595827694, |
|
"learning_rate": 0.0019890000000000003, |
|
"loss": 4.9858, |
|
"step": 663 |
|
}, |
|
{ |
|
"epoch": 0.00664, |
|
"grad_norm": 0.5549032292488858, |
|
"learning_rate": 0.0019920000000000003, |
|
"loss": 4.9668, |
|
"step": 664 |
|
}, |
|
{ |
|
"epoch": 0.00665, |
|
"grad_norm": 0.5630527450969944, |
|
"learning_rate": 0.0019950000000000002, |
|
"loss": 4.9437, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.00666, |
|
"grad_norm": 0.5436432724284425, |
|
"learning_rate": 0.001998, |
|
"loss": 4.9653, |
|
"step": 666 |
|
}, |
|
{ |
|
"epoch": 0.00667, |
|
"grad_norm": 0.5455501294317395, |
|
"learning_rate": 0.002001, |
|
"loss": 4.9611, |
|
"step": 667 |
|
}, |
|
{ |
|
"epoch": 0.00668, |
|
"grad_norm": 0.5771340900782089, |
|
"learning_rate": 0.002004, |
|
"loss": 4.9815, |
|
"step": 668 |
|
}, |
|
{ |
|
"epoch": 0.00669, |
|
"grad_norm": 0.5741027689717983, |
|
"learning_rate": 0.002007, |
|
"loss": 4.9757, |
|
"step": 669 |
|
}, |
|
{ |
|
"epoch": 0.0067, |
|
"grad_norm": 0.6058348461614704, |
|
"learning_rate": 0.00201, |
|
"loss": 4.969, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.00671, |
|
"grad_norm": 0.566759025441364, |
|
"learning_rate": 0.002013, |
|
"loss": 4.9434, |
|
"step": 671 |
|
}, |
|
{ |
|
"epoch": 0.00672, |
|
"grad_norm": 0.5204858890697072, |
|
"learning_rate": 0.002016, |
|
"loss": 4.9491, |
|
"step": 672 |
|
}, |
|
{ |
|
"epoch": 0.00673, |
|
"grad_norm": 0.5776654990466389, |
|
"learning_rate": 0.002019, |
|
"loss": 4.9473, |
|
"step": 673 |
|
}, |
|
{ |
|
"epoch": 0.00674, |
|
"grad_norm": 0.5899189939484214, |
|
"learning_rate": 0.0020220000000000004, |
|
"loss": 4.9512, |
|
"step": 674 |
|
}, |
|
{ |
|
"epoch": 0.00675, |
|
"grad_norm": 0.73017970913878, |
|
"learning_rate": 0.0020250000000000003, |
|
"loss": 4.954, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.00676, |
|
"grad_norm": 0.8714534217285366, |
|
"learning_rate": 0.0020280000000000003, |
|
"loss": 4.9527, |
|
"step": 676 |
|
}, |
|
{ |
|
"epoch": 0.00677, |
|
"grad_norm": 0.9455529231086871, |
|
"learning_rate": 0.0020310000000000003, |
|
"loss": 4.966, |
|
"step": 677 |
|
}, |
|
{ |
|
"epoch": 0.00678, |
|
"grad_norm": 0.7654589233287328, |
|
"learning_rate": 0.0020340000000000002, |
|
"loss": 4.9721, |
|
"step": 678 |
|
}, |
|
{ |
|
"epoch": 0.00679, |
|
"grad_norm": 0.7453542715563302, |
|
"learning_rate": 0.002037, |
|
"loss": 4.9514, |
|
"step": 679 |
|
}, |
|
{ |
|
"epoch": 0.0068, |
|
"grad_norm": 0.8198542095591255, |
|
"learning_rate": 0.00204, |
|
"loss": 4.9599, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.00681, |
|
"grad_norm": 1.0035327070670854, |
|
"learning_rate": 0.002043, |
|
"loss": 4.9608, |
|
"step": 681 |
|
}, |
|
{ |
|
"epoch": 0.00682, |
|
"grad_norm": 1.1272493939475496, |
|
"learning_rate": 0.002046, |
|
"loss": 4.9829, |
|
"step": 682 |
|
}, |
|
{ |
|
"epoch": 0.00683, |
|
"grad_norm": 0.9375943776902779, |
|
"learning_rate": 0.002049, |
|
"loss": 4.9573, |
|
"step": 683 |
|
}, |
|
{ |
|
"epoch": 0.00684, |
|
"grad_norm": 0.988255846210588, |
|
"learning_rate": 0.002052, |
|
"loss": 4.9739, |
|
"step": 684 |
|
}, |
|
{ |
|
"epoch": 0.00685, |
|
"grad_norm": 1.2166901935378913, |
|
"learning_rate": 0.0020550000000000004, |
|
"loss": 4.9877, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.00686, |
|
"grad_norm": 0.8834239063706651, |
|
"learning_rate": 0.0020580000000000004, |
|
"loss": 4.9625, |
|
"step": 686 |
|
}, |
|
{ |
|
"epoch": 0.00687, |
|
"grad_norm": 0.9980240769781185, |
|
"learning_rate": 0.0020610000000000003, |
|
"loss": 4.9587, |
|
"step": 687 |
|
}, |
|
{ |
|
"epoch": 0.00688, |
|
"grad_norm": 1.1254414916617237, |
|
"learning_rate": 0.002064, |
|
"loss": 4.9804, |
|
"step": 688 |
|
}, |
|
{ |
|
"epoch": 0.00689, |
|
"grad_norm": 1.0306142304378458, |
|
"learning_rate": 0.002067, |
|
"loss": 4.9703, |
|
"step": 689 |
|
}, |
|
{ |
|
"epoch": 0.0069, |
|
"grad_norm": 1.1841729949788287, |
|
"learning_rate": 0.00207, |
|
"loss": 4.9832, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.00691, |
|
"grad_norm": 0.94703321985356, |
|
"learning_rate": 0.0020729999999999998, |
|
"loss": 4.946, |
|
"step": 691 |
|
}, |
|
{ |
|
"epoch": 0.00692, |
|
"grad_norm": 1.0500733994835558, |
|
"learning_rate": 0.0020759999999999997, |
|
"loss": 4.9769, |
|
"step": 692 |
|
}, |
|
{ |
|
"epoch": 0.00693, |
|
"grad_norm": 0.8703789610160195, |
|
"learning_rate": 0.0020789999999999997, |
|
"loss": 4.9707, |
|
"step": 693 |
|
}, |
|
{ |
|
"epoch": 0.00694, |
|
"grad_norm": 0.8616077913099822, |
|
"learning_rate": 0.002082, |
|
"loss": 4.9464, |
|
"step": 694 |
|
}, |
|
{ |
|
"epoch": 0.00695, |
|
"grad_norm": 1.1391889561194677, |
|
"learning_rate": 0.002085, |
|
"loss": 4.9626, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.00696, |
|
"grad_norm": 0.9584412207038412, |
|
"learning_rate": 0.002088, |
|
"loss": 4.9606, |
|
"step": 696 |
|
}, |
|
{ |
|
"epoch": 0.00697, |
|
"grad_norm": 1.0011041706204022, |
|
"learning_rate": 0.002091, |
|
"loss": 4.9624, |
|
"step": 697 |
|
}, |
|
{ |
|
"epoch": 0.00698, |
|
"grad_norm": 1.3127609957868622, |
|
"learning_rate": 0.002094, |
|
"loss": 4.9728, |
|
"step": 698 |
|
}, |
|
{ |
|
"epoch": 0.00699, |
|
"grad_norm": 0.8537920758308664, |
|
"learning_rate": 0.002097, |
|
"loss": 4.9491, |
|
"step": 699 |
|
}, |
|
{ |
|
"epoch": 0.007, |
|
"grad_norm": 1.0018321540367086, |
|
"learning_rate": 0.0021, |
|
"loss": 4.9652, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.00701, |
|
"grad_norm": 0.9481035749179906, |
|
"learning_rate": 0.002103, |
|
"loss": 4.9422, |
|
"step": 701 |
|
}, |
|
{ |
|
"epoch": 0.00702, |
|
"grad_norm": 0.8258463386850557, |
|
"learning_rate": 0.002106, |
|
"loss": 4.945, |
|
"step": 702 |
|
}, |
|
{ |
|
"epoch": 0.00703, |
|
"grad_norm": 0.6305963851301584, |
|
"learning_rate": 0.0021089999999999998, |
|
"loss": 4.9167, |
|
"step": 703 |
|
}, |
|
{ |
|
"epoch": 0.00704, |
|
"grad_norm": 0.6428932601365419, |
|
"learning_rate": 0.0021119999999999997, |
|
"loss": 4.9232, |
|
"step": 704 |
|
}, |
|
{ |
|
"epoch": 0.00705, |
|
"grad_norm": 0.6965384868722897, |
|
"learning_rate": 0.002115, |
|
"loss": 4.9266, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 0.00706, |
|
"grad_norm": 0.7410784964260257, |
|
"learning_rate": 0.002118, |
|
"loss": 4.9344, |
|
"step": 706 |
|
}, |
|
{ |
|
"epoch": 0.00707, |
|
"grad_norm": 0.721339398040246, |
|
"learning_rate": 0.002121, |
|
"loss": 4.9083, |
|
"step": 707 |
|
}, |
|
{ |
|
"epoch": 0.00708, |
|
"grad_norm": 0.675102725576719, |
|
"learning_rate": 0.002124, |
|
"loss": 4.8875, |
|
"step": 708 |
|
}, |
|
{ |
|
"epoch": 0.00709, |
|
"grad_norm": 0.6428565761724286, |
|
"learning_rate": 0.002127, |
|
"loss": 4.9151, |
|
"step": 709 |
|
}, |
|
{ |
|
"epoch": 0.0071, |
|
"grad_norm": 0.5795546215215135, |
|
"learning_rate": 0.00213, |
|
"loss": 4.8959, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.00711, |
|
"grad_norm": 0.6667417841414063, |
|
"learning_rate": 0.002133, |
|
"loss": 4.8739, |
|
"step": 711 |
|
}, |
|
{ |
|
"epoch": 0.00712, |
|
"grad_norm": 0.7094343142489271, |
|
"learning_rate": 0.002136, |
|
"loss": 4.8945, |
|
"step": 712 |
|
}, |
|
{ |
|
"epoch": 0.00713, |
|
"grad_norm": 0.6503826772007358, |
|
"learning_rate": 0.002139, |
|
"loss": 4.9061, |
|
"step": 713 |
|
}, |
|
{ |
|
"epoch": 0.00714, |
|
"grad_norm": 0.5916220730293257, |
|
"learning_rate": 0.002142, |
|
"loss": 4.8928, |
|
"step": 714 |
|
}, |
|
{ |
|
"epoch": 0.00715, |
|
"grad_norm": 0.6248268778602033, |
|
"learning_rate": 0.0021449999999999998, |
|
"loss": 4.8651, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 0.00716, |
|
"grad_norm": 0.7056512941727522, |
|
"learning_rate": 0.002148, |
|
"loss": 4.8619, |
|
"step": 716 |
|
}, |
|
{ |
|
"epoch": 0.00717, |
|
"grad_norm": 0.7609530945312164, |
|
"learning_rate": 0.002151, |
|
"loss": 4.8823, |
|
"step": 717 |
|
}, |
|
{ |
|
"epoch": 0.00718, |
|
"grad_norm": 0.7494152535556156, |
|
"learning_rate": 0.002154, |
|
"loss": 4.8579, |
|
"step": 718 |
|
}, |
|
{ |
|
"epoch": 0.00719, |
|
"grad_norm": 0.7503709346262388, |
|
"learning_rate": 0.002157, |
|
"loss": 4.8753, |
|
"step": 719 |
|
}, |
|
{ |
|
"epoch": 0.0072, |
|
"grad_norm": 0.7326012456404516, |
|
"learning_rate": 0.00216, |
|
"loss": 4.8631, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.00721, |
|
"grad_norm": 0.7261591024591523, |
|
"learning_rate": 0.002163, |
|
"loss": 4.867, |
|
"step": 721 |
|
}, |
|
{ |
|
"epoch": 0.00722, |
|
"grad_norm": 0.782783835861699, |
|
"learning_rate": 0.002166, |
|
"loss": 4.843, |
|
"step": 722 |
|
}, |
|
{ |
|
"epoch": 0.00723, |
|
"grad_norm": 0.9405574246546834, |
|
"learning_rate": 0.002169, |
|
"loss": 4.859, |
|
"step": 723 |
|
}, |
|
{ |
|
"epoch": 0.00724, |
|
"grad_norm": 0.9635066655828881, |
|
"learning_rate": 0.002172, |
|
"loss": 4.8537, |
|
"step": 724 |
|
}, |
|
{ |
|
"epoch": 0.00725, |
|
"grad_norm": 1.040794527733107, |
|
"learning_rate": 0.002175, |
|
"loss": 4.8644, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.00726, |
|
"grad_norm": 0.8259351691267115, |
|
"learning_rate": 0.002178, |
|
"loss": 4.8549, |
|
"step": 726 |
|
}, |
|
{ |
|
"epoch": 0.00727, |
|
"grad_norm": 0.9246523356382073, |
|
"learning_rate": 0.0021809999999999998, |
|
"loss": 4.8481, |
|
"step": 727 |
|
}, |
|
{ |
|
"epoch": 0.00728, |
|
"grad_norm": 0.8492007448518646, |
|
"learning_rate": 0.002184, |
|
"loss": 4.8551, |
|
"step": 728 |
|
}, |
|
{ |
|
"epoch": 0.00729, |
|
"grad_norm": 0.8802732367664233, |
|
"learning_rate": 0.002187, |
|
"loss": 4.8519, |
|
"step": 729 |
|
}, |
|
{ |
|
"epoch": 0.0073, |
|
"grad_norm": 0.8143690086226398, |
|
"learning_rate": 0.00219, |
|
"loss": 4.8583, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.00731, |
|
"grad_norm": 0.7978098404882568, |
|
"learning_rate": 0.002193, |
|
"loss": 4.846, |
|
"step": 731 |
|
}, |
|
{ |
|
"epoch": 0.00732, |
|
"grad_norm": 0.8726405483808616, |
|
"learning_rate": 0.002196, |
|
"loss": 4.8538, |
|
"step": 732 |
|
}, |
|
{ |
|
"epoch": 0.00733, |
|
"grad_norm": 0.852624212596407, |
|
"learning_rate": 0.002199, |
|
"loss": 4.8348, |
|
"step": 733 |
|
}, |
|
{ |
|
"epoch": 0.00734, |
|
"grad_norm": 0.8516024625713395, |
|
"learning_rate": 0.002202, |
|
"loss": 4.8503, |
|
"step": 734 |
|
}, |
|
{ |
|
"epoch": 0.00735, |
|
"grad_norm": 1.0296852244953179, |
|
"learning_rate": 0.002205, |
|
"loss": 4.8752, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 0.00736, |
|
"grad_norm": 0.8907291733705317, |
|
"learning_rate": 0.002208, |
|
"loss": 4.8478, |
|
"step": 736 |
|
}, |
|
{ |
|
"epoch": 0.00737, |
|
"grad_norm": 0.847923173569621, |
|
"learning_rate": 0.002211, |
|
"loss": 4.8138, |
|
"step": 737 |
|
}, |
|
{ |
|
"epoch": 0.00738, |
|
"grad_norm": 1.0201562870165168, |
|
"learning_rate": 0.002214, |
|
"loss": 4.8304, |
|
"step": 738 |
|
}, |
|
{ |
|
"epoch": 0.00739, |
|
"grad_norm": 0.8864771113941665, |
|
"learning_rate": 0.0022170000000000002, |
|
"loss": 4.8243, |
|
"step": 739 |
|
}, |
|
{ |
|
"epoch": 0.0074, |
|
"grad_norm": 0.8107829600567034, |
|
"learning_rate": 0.00222, |
|
"loss": 4.8172, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.00741, |
|
"grad_norm": 0.9926562607182313, |
|
"learning_rate": 0.002223, |
|
"loss": 4.7981, |
|
"step": 741 |
|
}, |
|
{ |
|
"epoch": 0.00742, |
|
"grad_norm": 1.1169095501625559, |
|
"learning_rate": 0.002226, |
|
"loss": 4.8325, |
|
"step": 742 |
|
}, |
|
{ |
|
"epoch": 0.00743, |
|
"grad_norm": 1.0154903575297356, |
|
"learning_rate": 0.002229, |
|
"loss": 4.8033, |
|
"step": 743 |
|
}, |
|
{ |
|
"epoch": 0.00744, |
|
"grad_norm": 0.8796310645612629, |
|
"learning_rate": 0.002232, |
|
"loss": 4.8021, |
|
"step": 744 |
|
}, |
|
{ |
|
"epoch": 0.00745, |
|
"grad_norm": 0.8347673276949072, |
|
"learning_rate": 0.002235, |
|
"loss": 4.8034, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 0.00746, |
|
"grad_norm": 1.0163090629032976, |
|
"learning_rate": 0.002238, |
|
"loss": 4.8122, |
|
"step": 746 |
|
}, |
|
{ |
|
"epoch": 0.00747, |
|
"grad_norm": 0.9716623005872416, |
|
"learning_rate": 0.002241, |
|
"loss": 4.7984, |
|
"step": 747 |
|
}, |
|
{ |
|
"epoch": 0.00748, |
|
"grad_norm": 1.0278655828642824, |
|
"learning_rate": 0.002244, |
|
"loss": 4.8089, |
|
"step": 748 |
|
}, |
|
{ |
|
"epoch": 0.00749, |
|
"grad_norm": 1.0480541445842007, |
|
"learning_rate": 0.002247, |
|
"loss": 4.797, |
|
"step": 749 |
|
}, |
|
{ |
|
"epoch": 0.0075, |
|
"grad_norm": 0.8547105592298554, |
|
"learning_rate": 0.0022500000000000003, |
|
"loss": 4.8161, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.00751, |
|
"grad_norm": 1.124867997489978, |
|
"learning_rate": 0.0022530000000000002, |
|
"loss": 4.8181, |
|
"step": 751 |
|
}, |
|
{ |
|
"epoch": 0.00752, |
|
"grad_norm": 0.9791690369641056, |
|
"learning_rate": 0.002256, |
|
"loss": 4.777, |
|
"step": 752 |
|
}, |
|
{ |
|
"epoch": 0.00753, |
|
"grad_norm": 1.076442658365417, |
|
"learning_rate": 0.002259, |
|
"loss": 4.8205, |
|
"step": 753 |
|
}, |
|
{ |
|
"epoch": 0.00754, |
|
"grad_norm": 1.2763721488952993, |
|
"learning_rate": 0.002262, |
|
"loss": 4.8438, |
|
"step": 754 |
|
}, |
|
{ |
|
"epoch": 0.00755, |
|
"grad_norm": 0.956893666746658, |
|
"learning_rate": 0.002265, |
|
"loss": 4.7969, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 0.00756, |
|
"grad_norm": 0.9171753465968743, |
|
"learning_rate": 0.002268, |
|
"loss": 4.7762, |
|
"step": 756 |
|
}, |
|
{ |
|
"epoch": 0.00757, |
|
"grad_norm": 1.0908406750664337, |
|
"learning_rate": 0.002271, |
|
"loss": 4.7959, |
|
"step": 757 |
|
}, |
|
{ |
|
"epoch": 0.00758, |
|
"grad_norm": 0.9176733613061218, |
|
"learning_rate": 0.002274, |
|
"loss": 4.7709, |
|
"step": 758 |
|
}, |
|
{ |
|
"epoch": 0.00759, |
|
"grad_norm": 0.8908245026099332, |
|
"learning_rate": 0.002277, |
|
"loss": 4.7862, |
|
"step": 759 |
|
}, |
|
{ |
|
"epoch": 0.0076, |
|
"grad_norm": 0.9024826649233609, |
|
"learning_rate": 0.00228, |
|
"loss": 4.7716, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.00761, |
|
"grad_norm": 0.8163892030302853, |
|
"learning_rate": 0.002283, |
|
"loss": 4.7408, |
|
"step": 761 |
|
}, |
|
{ |
|
"epoch": 0.00762, |
|
"grad_norm": 0.8370737395441165, |
|
"learning_rate": 0.0022860000000000003, |
|
"loss": 4.7247, |
|
"step": 762 |
|
}, |
|
{ |
|
"epoch": 0.00763, |
|
"grad_norm": 0.8480960736158745, |
|
"learning_rate": 0.0022890000000000002, |
|
"loss": 4.7641, |
|
"step": 763 |
|
}, |
|
{ |
|
"epoch": 0.00764, |
|
"grad_norm": 0.7323677069229925, |
|
"learning_rate": 0.002292, |
|
"loss": 4.7635, |
|
"step": 764 |
|
}, |
|
{ |
|
"epoch": 0.00765, |
|
"grad_norm": 0.517428476031845, |
|
"learning_rate": 0.002295, |
|
"loss": 4.7402, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 0.00766, |
|
"grad_norm": 0.5449748434883583, |
|
"learning_rate": 0.002298, |
|
"loss": 4.7349, |
|
"step": 766 |
|
}, |
|
{ |
|
"epoch": 0.00767, |
|
"grad_norm": 0.519905380789997, |
|
"learning_rate": 0.002301, |
|
"loss": 4.7297, |
|
"step": 767 |
|
}, |
|
{ |
|
"epoch": 0.00768, |
|
"grad_norm": 0.5353014095205901, |
|
"learning_rate": 0.002304, |
|
"loss": 4.7326, |
|
"step": 768 |
|
}, |
|
{ |
|
"epoch": 0.00769, |
|
"grad_norm": 0.6021476092106454, |
|
"learning_rate": 0.002307, |
|
"loss": 4.7369, |
|
"step": 769 |
|
}, |
|
{ |
|
"epoch": 0.0077, |
|
"grad_norm": 0.6291707943467315, |
|
"learning_rate": 0.00231, |
|
"loss": 4.6951, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.00771, |
|
"grad_norm": 0.6259748544734598, |
|
"learning_rate": 0.002313, |
|
"loss": 4.726, |
|
"step": 771 |
|
}, |
|
{ |
|
"epoch": 0.00772, |
|
"grad_norm": 0.6411937087975925, |
|
"learning_rate": 0.002316, |
|
"loss": 4.71, |
|
"step": 772 |
|
}, |
|
{ |
|
"epoch": 0.00773, |
|
"grad_norm": 0.6087209583882061, |
|
"learning_rate": 0.0023190000000000003, |
|
"loss": 4.7089, |
|
"step": 773 |
|
}, |
|
{ |
|
"epoch": 0.00774, |
|
"grad_norm": 0.6187878154543281, |
|
"learning_rate": 0.0023220000000000003, |
|
"loss": 4.7216, |
|
"step": 774 |
|
}, |
|
{ |
|
"epoch": 0.00775, |
|
"grad_norm": 0.6698244521960122, |
|
"learning_rate": 0.0023250000000000002, |
|
"loss": 4.7125, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.00776, |
|
"grad_norm": 0.681313335099601, |
|
"learning_rate": 0.002328, |
|
"loss": 4.716, |
|
"step": 776 |
|
}, |
|
{ |
|
"epoch": 0.00777, |
|
"grad_norm": 0.564623702064127, |
|
"learning_rate": 0.002331, |
|
"loss": 4.6866, |
|
"step": 777 |
|
}, |
|
{ |
|
"epoch": 0.00778, |
|
"grad_norm": 0.5709713255684672, |
|
"learning_rate": 0.002334, |
|
"loss": 4.6954, |
|
"step": 778 |
|
}, |
|
{ |
|
"epoch": 0.00779, |
|
"grad_norm": 0.6149821587836622, |
|
"learning_rate": 0.002337, |
|
"loss": 4.6565, |
|
"step": 779 |
|
}, |
|
{ |
|
"epoch": 0.0078, |
|
"grad_norm": 0.632842020953678, |
|
"learning_rate": 0.00234, |
|
"loss": 4.6725, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.00781, |
|
"grad_norm": 0.7467970568022195, |
|
"learning_rate": 0.002343, |
|
"loss": 4.678, |
|
"step": 781 |
|
}, |
|
{ |
|
"epoch": 0.00782, |
|
"grad_norm": 0.9047857384333676, |
|
"learning_rate": 0.002346, |
|
"loss": 4.6801, |
|
"step": 782 |
|
}, |
|
{ |
|
"epoch": 0.00783, |
|
"grad_norm": 1.0576276659724262, |
|
"learning_rate": 0.002349, |
|
"loss": 4.6678, |
|
"step": 783 |
|
}, |
|
{ |
|
"epoch": 0.00784, |
|
"grad_norm": 1.1751809929780095, |
|
"learning_rate": 0.002352, |
|
"loss": 4.6921, |
|
"step": 784 |
|
}, |
|
{ |
|
"epoch": 0.00785, |
|
"grad_norm": 0.8865561583369859, |
|
"learning_rate": 0.0023550000000000003, |
|
"loss": 4.6774, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 0.00786, |
|
"grad_norm": 1.1372694124786462, |
|
"learning_rate": 0.0023580000000000003, |
|
"loss": 4.7016, |
|
"step": 786 |
|
}, |
|
{ |
|
"epoch": 0.00787, |
|
"grad_norm": 1.0092158733190273, |
|
"learning_rate": 0.0023610000000000003, |
|
"loss": 4.6918, |
|
"step": 787 |
|
}, |
|
{ |
|
"epoch": 0.00788, |
|
"grad_norm": 1.1115636173510615, |
|
"learning_rate": 0.002364, |
|
"loss": 4.7036, |
|
"step": 788 |
|
}, |
|
{ |
|
"epoch": 0.00789, |
|
"grad_norm": 0.9627507497820444, |
|
"learning_rate": 0.002367, |
|
"loss": 4.7196, |
|
"step": 789 |
|
}, |
|
{ |
|
"epoch": 0.0079, |
|
"grad_norm": 1.0391564883926698, |
|
"learning_rate": 0.00237, |
|
"loss": 4.706, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.00791, |
|
"grad_norm": 0.9857967273862618, |
|
"learning_rate": 0.002373, |
|
"loss": 4.6613, |
|
"step": 791 |
|
}, |
|
{ |
|
"epoch": 0.00792, |
|
"grad_norm": 1.07807376567954, |
|
"learning_rate": 0.002376, |
|
"loss": 4.6969, |
|
"step": 792 |
|
}, |
|
{ |
|
"epoch": 0.00793, |
|
"grad_norm": 1.1762161150555115, |
|
"learning_rate": 0.002379, |
|
"loss": 4.7002, |
|
"step": 793 |
|
}, |
|
{ |
|
"epoch": 0.00794, |
|
"grad_norm": 0.8247319634032979, |
|
"learning_rate": 0.002382, |
|
"loss": 4.6948, |
|
"step": 794 |
|
}, |
|
{ |
|
"epoch": 0.00795, |
|
"grad_norm": 0.8498192334201926, |
|
"learning_rate": 0.002385, |
|
"loss": 4.676, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 0.00796, |
|
"grad_norm": 0.7871292688998256, |
|
"learning_rate": 0.0023880000000000004, |
|
"loss": 4.6606, |
|
"step": 796 |
|
}, |
|
{ |
|
"epoch": 0.00797, |
|
"grad_norm": 0.7600244620970749, |
|
"learning_rate": 0.0023910000000000003, |
|
"loss": 4.7021, |
|
"step": 797 |
|
}, |
|
{ |
|
"epoch": 0.00798, |
|
"grad_norm": 0.7960964641914068, |
|
"learning_rate": 0.0023940000000000003, |
|
"loss": 4.6698, |
|
"step": 798 |
|
}, |
|
{ |
|
"epoch": 0.00799, |
|
"grad_norm": 0.740030757660218, |
|
"learning_rate": 0.0023970000000000003, |
|
"loss": 4.6941, |
|
"step": 799 |
|
}, |
|
{ |
|
"epoch": 0.008, |
|
"grad_norm": 0.7620304314170098, |
|
"learning_rate": 0.0024000000000000002, |
|
"loss": 4.6524, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.00801, |
|
"grad_norm": 0.7109641363313225, |
|
"learning_rate": 0.002403, |
|
"loss": 4.6746, |
|
"step": 801 |
|
}, |
|
{ |
|
"epoch": 0.00802, |
|
"grad_norm": 0.6915734125547375, |
|
"learning_rate": 0.002406, |
|
"loss": 4.6614, |
|
"step": 802 |
|
}, |
|
{ |
|
"epoch": 0.00803, |
|
"grad_norm": 0.6786740334805601, |
|
"learning_rate": 0.002409, |
|
"loss": 4.6408, |
|
"step": 803 |
|
}, |
|
{ |
|
"epoch": 0.00804, |
|
"grad_norm": 0.6631767318006158, |
|
"learning_rate": 0.002412, |
|
"loss": 4.6361, |
|
"step": 804 |
|
}, |
|
{ |
|
"epoch": 0.00805, |
|
"grad_norm": 0.6460763714070807, |
|
"learning_rate": 0.002415, |
|
"loss": 4.631, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 0.00806, |
|
"grad_norm": 0.5689446567265103, |
|
"learning_rate": 0.002418, |
|
"loss": 4.6413, |
|
"step": 806 |
|
}, |
|
{ |
|
"epoch": 0.00807, |
|
"grad_norm": 0.7294155919129667, |
|
"learning_rate": 0.0024210000000000004, |
|
"loss": 4.6407, |
|
"step": 807 |
|
}, |
|
{ |
|
"epoch": 0.00808, |
|
"grad_norm": 0.9713010260235329, |
|
"learning_rate": 0.0024240000000000004, |
|
"loss": 4.6372, |
|
"step": 808 |
|
}, |
|
{ |
|
"epoch": 0.00809, |
|
"grad_norm": 1.039651350431828, |
|
"learning_rate": 0.0024270000000000003, |
|
"loss": 4.6452, |
|
"step": 809 |
|
}, |
|
{ |
|
"epoch": 0.0081, |
|
"grad_norm": 0.7987971948237619, |
|
"learning_rate": 0.0024300000000000003, |
|
"loss": 4.6558, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.00811, |
|
"grad_norm": 0.866506786468452, |
|
"learning_rate": 0.0024330000000000003, |
|
"loss": 4.6332, |
|
"step": 811 |
|
}, |
|
{ |
|
"epoch": 0.00812, |
|
"grad_norm": 0.8940070034144846, |
|
"learning_rate": 0.0024360000000000002, |
|
"loss": 4.6162, |
|
"step": 812 |
|
}, |
|
{ |
|
"epoch": 0.00813, |
|
"grad_norm": 0.7601015395596301, |
|
"learning_rate": 0.0024389999999999998, |
|
"loss": 4.6418, |
|
"step": 813 |
|
}, |
|
{ |
|
"epoch": 0.00814, |
|
"grad_norm": 0.959371802874838, |
|
"learning_rate": 0.0024419999999999997, |
|
"loss": 4.6561, |
|
"step": 814 |
|
}, |
|
{ |
|
"epoch": 0.00815, |
|
"grad_norm": 1.0501943727451555, |
|
"learning_rate": 0.0024449999999999997, |
|
"loss": 4.6758, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 0.00816, |
|
"grad_norm": 1.0657571457398118, |
|
"learning_rate": 0.002448, |
|
"loss": 4.6479, |
|
"step": 816 |
|
}, |
|
{ |
|
"epoch": 0.00817, |
|
"grad_norm": 0.9466339757774411, |
|
"learning_rate": 0.002451, |
|
"loss": 4.6581, |
|
"step": 817 |
|
}, |
|
{ |
|
"epoch": 0.00818, |
|
"grad_norm": 1.0180120345579544, |
|
"learning_rate": 0.002454, |
|
"loss": 4.6439, |
|
"step": 818 |
|
}, |
|
{ |
|
"epoch": 0.00819, |
|
"grad_norm": 0.9541848583096257, |
|
"learning_rate": 0.002457, |
|
"loss": 4.6558, |
|
"step": 819 |
|
}, |
|
{ |
|
"epoch": 0.0082, |
|
"grad_norm": 0.8369907334148766, |
|
"learning_rate": 0.00246, |
|
"loss": 4.6334, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.00821, |
|
"grad_norm": 0.9636511535948165, |
|
"learning_rate": 0.002463, |
|
"loss": 4.6201, |
|
"step": 821 |
|
}, |
|
{ |
|
"epoch": 0.00822, |
|
"grad_norm": 0.9781201110522014, |
|
"learning_rate": 0.002466, |
|
"loss": 4.6234, |
|
"step": 822 |
|
}, |
|
{ |
|
"epoch": 0.00823, |
|
"grad_norm": 0.7544698826164155, |
|
"learning_rate": 0.002469, |
|
"loss": 4.6177, |
|
"step": 823 |
|
}, |
|
{ |
|
"epoch": 0.00824, |
|
"grad_norm": 0.8351046210351873, |
|
"learning_rate": 0.002472, |
|
"loss": 4.6395, |
|
"step": 824 |
|
}, |
|
{ |
|
"epoch": 0.00825, |
|
"grad_norm": 0.9579203449788568, |
|
"learning_rate": 0.0024749999999999998, |
|
"loss": 4.6552, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.00826, |
|
"grad_norm": 0.6855903524790949, |
|
"learning_rate": 0.0024779999999999997, |
|
"loss": 4.6015, |
|
"step": 826 |
|
}, |
|
{ |
|
"epoch": 0.00827, |
|
"grad_norm": 0.5831382107833026, |
|
"learning_rate": 0.002481, |
|
"loss": 4.6271, |
|
"step": 827 |
|
}, |
|
{ |
|
"epoch": 0.00828, |
|
"grad_norm": 0.6353397555552095, |
|
"learning_rate": 0.002484, |
|
"loss": 4.6166, |
|
"step": 828 |
|
}, |
|
{ |
|
"epoch": 0.00829, |
|
"grad_norm": 0.7710602439494972, |
|
"learning_rate": 0.002487, |
|
"loss": 4.619, |
|
"step": 829 |
|
}, |
|
{ |
|
"epoch": 0.0083, |
|
"grad_norm": 0.8708544431374072, |
|
"learning_rate": 0.00249, |
|
"loss": 4.6284, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.00831, |
|
"grad_norm": 1.0453965047267408, |
|
"learning_rate": 0.002493, |
|
"loss": 4.6196, |
|
"step": 831 |
|
}, |
|
{ |
|
"epoch": 0.00832, |
|
"grad_norm": 1.1447270695034462, |
|
"learning_rate": 0.002496, |
|
"loss": 4.6327, |
|
"step": 832 |
|
}, |
|
{ |
|
"epoch": 0.00833, |
|
"grad_norm": 1.0246810397019857, |
|
"learning_rate": 0.002499, |
|
"loss": 4.6176, |
|
"step": 833 |
|
}, |
|
{ |
|
"epoch": 0.00834, |
|
"grad_norm": 0.9810039542594958, |
|
"learning_rate": 0.002502, |
|
"loss": 4.6274, |
|
"step": 834 |
|
}, |
|
{ |
|
"epoch": 0.00835, |
|
"grad_norm": 0.8876541499902995, |
|
"learning_rate": 0.002505, |
|
"loss": 4.6197, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 0.00836, |
|
"grad_norm": 0.903016402602156, |
|
"learning_rate": 0.002508, |
|
"loss": 4.6457, |
|
"step": 836 |
|
}, |
|
{ |
|
"epoch": 0.00837, |
|
"grad_norm": 0.9089070943446178, |
|
"learning_rate": 0.0025109999999999998, |
|
"loss": 4.6001, |
|
"step": 837 |
|
}, |
|
{ |
|
"epoch": 0.00838, |
|
"grad_norm": 1.0056159571167094, |
|
"learning_rate": 0.0025139999999999997, |
|
"loss": 4.6276, |
|
"step": 838 |
|
}, |
|
{ |
|
"epoch": 0.00839, |
|
"grad_norm": 0.9338401740679543, |
|
"learning_rate": 0.002517, |
|
"loss": 4.6258, |
|
"step": 839 |
|
}, |
|
{ |
|
"epoch": 0.0084, |
|
"grad_norm": 0.8595710991386702, |
|
"learning_rate": 0.00252, |
|
"loss": 4.6076, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.00841, |
|
"grad_norm": 0.8408169361023179, |
|
"learning_rate": 0.002523, |
|
"loss": 4.5981, |
|
"step": 841 |
|
}, |
|
{ |
|
"epoch": 0.00842, |
|
"grad_norm": 0.9223461051136856, |
|
"learning_rate": 0.002526, |
|
"loss": 4.6018, |
|
"step": 842 |
|
}, |
|
{ |
|
"epoch": 0.00843, |
|
"grad_norm": 0.8967298269444864, |
|
"learning_rate": 0.002529, |
|
"loss": 4.5992, |
|
"step": 843 |
|
}, |
|
{ |
|
"epoch": 0.00844, |
|
"grad_norm": 1.0054950179651305, |
|
"learning_rate": 0.002532, |
|
"loss": 4.6384, |
|
"step": 844 |
|
}, |
|
{ |
|
"epoch": 0.00845, |
|
"grad_norm": 0.9421873102957943, |
|
"learning_rate": 0.002535, |
|
"loss": 4.597, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 0.00846, |
|
"grad_norm": 0.7813468730903476, |
|
"learning_rate": 0.002538, |
|
"loss": 4.612, |
|
"step": 846 |
|
}, |
|
{ |
|
"epoch": 0.00847, |
|
"grad_norm": 0.7336538825841323, |
|
"learning_rate": 0.002541, |
|
"loss": 4.5922, |
|
"step": 847 |
|
}, |
|
{ |
|
"epoch": 0.00848, |
|
"grad_norm": 0.7490910216536115, |
|
"learning_rate": 0.002544, |
|
"loss": 4.5881, |
|
"step": 848 |
|
}, |
|
{ |
|
"epoch": 0.00849, |
|
"grad_norm": 0.7281767237417216, |
|
"learning_rate": 0.002547, |
|
"loss": 4.5791, |
|
"step": 849 |
|
}, |
|
{ |
|
"epoch": 0.0085, |
|
"grad_norm": 0.6108432594850526, |
|
"learning_rate": 0.00255, |
|
"loss": 4.5847, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.00851, |
|
"grad_norm": 0.6024151823798927, |
|
"learning_rate": 0.002553, |
|
"loss": 4.5988, |
|
"step": 851 |
|
}, |
|
{ |
|
"epoch": 0.00852, |
|
"grad_norm": 0.4780185378158163, |
|
"learning_rate": 0.002556, |
|
"loss": 4.5828, |
|
"step": 852 |
|
}, |
|
{ |
|
"epoch": 0.00853, |
|
"grad_norm": 0.5435830639563773, |
|
"learning_rate": 0.002559, |
|
"loss": 4.5614, |
|
"step": 853 |
|
}, |
|
{ |
|
"epoch": 0.00854, |
|
"grad_norm": 0.6028384776402124, |
|
"learning_rate": 0.002562, |
|
"loss": 4.5542, |
|
"step": 854 |
|
}, |
|
{ |
|
"epoch": 0.00855, |
|
"grad_norm": 0.5885480983638159, |
|
"learning_rate": 0.002565, |
|
"loss": 4.5306, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 0.00856, |
|
"grad_norm": 0.6060410896547319, |
|
"learning_rate": 0.002568, |
|
"loss": 4.5634, |
|
"step": 856 |
|
}, |
|
{ |
|
"epoch": 0.00857, |
|
"grad_norm": 0.5609885323605243, |
|
"learning_rate": 0.002571, |
|
"loss": 4.5495, |
|
"step": 857 |
|
}, |
|
{ |
|
"epoch": 0.00858, |
|
"grad_norm": 0.5199675893089152, |
|
"learning_rate": 0.002574, |
|
"loss": 4.541, |
|
"step": 858 |
|
}, |
|
{ |
|
"epoch": 0.00859, |
|
"grad_norm": 0.5200210947429836, |
|
"learning_rate": 0.002577, |
|
"loss": 4.5318, |
|
"step": 859 |
|
}, |
|
{ |
|
"epoch": 0.0086, |
|
"grad_norm": 0.5623426902483657, |
|
"learning_rate": 0.00258, |
|
"loss": 4.5597, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.00861, |
|
"grad_norm": 0.6385070114593622, |
|
"learning_rate": 0.0025830000000000002, |
|
"loss": 4.5763, |
|
"step": 861 |
|
}, |
|
{ |
|
"epoch": 0.00862, |
|
"grad_norm": 0.7215967762933786, |
|
"learning_rate": 0.002586, |
|
"loss": 4.5292, |
|
"step": 862 |
|
}, |
|
{ |
|
"epoch": 0.00863, |
|
"grad_norm": 0.8778870896923412, |
|
"learning_rate": 0.002589, |
|
"loss": 4.559, |
|
"step": 863 |
|
}, |
|
{ |
|
"epoch": 0.00864, |
|
"grad_norm": 1.1440074056398795, |
|
"learning_rate": 0.002592, |
|
"loss": 4.547, |
|
"step": 864 |
|
}, |
|
{ |
|
"epoch": 0.00865, |
|
"grad_norm": 0.9219223071258862, |
|
"learning_rate": 0.002595, |
|
"loss": 4.5491, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 0.00866, |
|
"grad_norm": 1.065047382603863, |
|
"learning_rate": 0.002598, |
|
"loss": 4.5845, |
|
"step": 866 |
|
}, |
|
{ |
|
"epoch": 0.00867, |
|
"grad_norm": 0.9527212983309642, |
|
"learning_rate": 0.002601, |
|
"loss": 4.5736, |
|
"step": 867 |
|
}, |
|
{ |
|
"epoch": 0.00868, |
|
"grad_norm": 0.7943624191664302, |
|
"learning_rate": 0.002604, |
|
"loss": 4.5474, |
|
"step": 868 |
|
}, |
|
{ |
|
"epoch": 0.00869, |
|
"grad_norm": 0.671885521142916, |
|
"learning_rate": 0.002607, |
|
"loss": 4.5536, |
|
"step": 869 |
|
}, |
|
{ |
|
"epoch": 0.0087, |
|
"grad_norm": 0.6527559883208905, |
|
"learning_rate": 0.00261, |
|
"loss": 4.54, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.00871, |
|
"grad_norm": 0.6266922867904314, |
|
"learning_rate": 0.002613, |
|
"loss": 4.5522, |
|
"step": 871 |
|
}, |
|
{ |
|
"epoch": 0.00872, |
|
"grad_norm": 0.5879044949928981, |
|
"learning_rate": 0.002616, |
|
"loss": 4.5345, |
|
"step": 872 |
|
}, |
|
{ |
|
"epoch": 0.00873, |
|
"grad_norm": 0.5844885657438569, |
|
"learning_rate": 0.0026190000000000002, |
|
"loss": 4.5441, |
|
"step": 873 |
|
}, |
|
{ |
|
"epoch": 0.00874, |
|
"grad_norm": 0.5286751889514939, |
|
"learning_rate": 0.002622, |
|
"loss": 4.5573, |
|
"step": 874 |
|
}, |
|
{ |
|
"epoch": 0.00875, |
|
"grad_norm": 0.5494502975686567, |
|
"learning_rate": 0.002625, |
|
"loss": 4.5051, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.00876, |
|
"grad_norm": 0.6269645893334619, |
|
"learning_rate": 0.002628, |
|
"loss": 4.5356, |
|
"step": 876 |
|
}, |
|
{ |
|
"epoch": 0.00877, |
|
"grad_norm": 0.6901692969476347, |
|
"learning_rate": 0.002631, |
|
"loss": 4.5537, |
|
"step": 877 |
|
}, |
|
{ |
|
"epoch": 0.00878, |
|
"grad_norm": 0.7433571199864201, |
|
"learning_rate": 0.002634, |
|
"loss": 4.4894, |
|
"step": 878 |
|
}, |
|
{ |
|
"epoch": 0.00879, |
|
"grad_norm": 0.7806263902694818, |
|
"learning_rate": 0.002637, |
|
"loss": 4.5303, |
|
"step": 879 |
|
}, |
|
{ |
|
"epoch": 0.0088, |
|
"grad_norm": 0.7694352427135146, |
|
"learning_rate": 0.00264, |
|
"loss": 4.5353, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.00881, |
|
"grad_norm": 0.6938035372615263, |
|
"learning_rate": 0.002643, |
|
"loss": 4.5351, |
|
"step": 881 |
|
}, |
|
{ |
|
"epoch": 0.00882, |
|
"grad_norm": 0.672108824408367, |
|
"learning_rate": 0.002646, |
|
"loss": 4.5331, |
|
"step": 882 |
|
}, |
|
{ |
|
"epoch": 0.00883, |
|
"grad_norm": 0.6847246415873477, |
|
"learning_rate": 0.002649, |
|
"loss": 4.524, |
|
"step": 883 |
|
}, |
|
{ |
|
"epoch": 0.00884, |
|
"grad_norm": 0.8034466162851082, |
|
"learning_rate": 0.0026520000000000003, |
|
"loss": 4.5123, |
|
"step": 884 |
|
}, |
|
{ |
|
"epoch": 0.00885, |
|
"grad_norm": 0.8504740488359851, |
|
"learning_rate": 0.0026550000000000002, |
|
"loss": 4.5174, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 0.00886, |
|
"grad_norm": 0.6814828631263485, |
|
"learning_rate": 0.002658, |
|
"loss": 4.526, |
|
"step": 886 |
|
}, |
|
{ |
|
"epoch": 0.00887, |
|
"grad_norm": 0.7764171535679266, |
|
"learning_rate": 0.002661, |
|
"loss": 4.5254, |
|
"step": 887 |
|
}, |
|
{ |
|
"epoch": 0.00888, |
|
"grad_norm": 0.864178556132455, |
|
"learning_rate": 0.002664, |
|
"loss": 4.5323, |
|
"step": 888 |
|
}, |
|
{ |
|
"epoch": 0.00889, |
|
"grad_norm": 0.9963366133988169, |
|
"learning_rate": 0.002667, |
|
"loss": 4.4915, |
|
"step": 889 |
|
}, |
|
{ |
|
"epoch": 0.0089, |
|
"grad_norm": 0.9684646116139818, |
|
"learning_rate": 0.00267, |
|
"loss": 4.5454, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.00891, |
|
"grad_norm": 0.8019607803647243, |
|
"learning_rate": 0.002673, |
|
"loss": 4.523, |
|
"step": 891 |
|
}, |
|
{ |
|
"epoch": 0.00892, |
|
"grad_norm": 0.9082109544296421, |
|
"learning_rate": 0.002676, |
|
"loss": 4.5431, |
|
"step": 892 |
|
}, |
|
{ |
|
"epoch": 0.00893, |
|
"grad_norm": 0.9728985136087724, |
|
"learning_rate": 0.002679, |
|
"loss": 4.5212, |
|
"step": 893 |
|
}, |
|
{ |
|
"epoch": 0.00894, |
|
"grad_norm": 1.0580229080074564, |
|
"learning_rate": 0.002682, |
|
"loss": 4.5156, |
|
"step": 894 |
|
}, |
|
{ |
|
"epoch": 0.00895, |
|
"grad_norm": 0.8166896686472195, |
|
"learning_rate": 0.0026850000000000003, |
|
"loss": 4.5188, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 0.00896, |
|
"grad_norm": 0.8942454442625324, |
|
"learning_rate": 0.0026880000000000003, |
|
"loss": 4.5121, |
|
"step": 896 |
|
}, |
|
{ |
|
"epoch": 0.00897, |
|
"grad_norm": 0.9627689980406442, |
|
"learning_rate": 0.0026910000000000002, |
|
"loss": 4.5406, |
|
"step": 897 |
|
}, |
|
{ |
|
"epoch": 0.00898, |
|
"grad_norm": 0.915209672072645, |
|
"learning_rate": 0.002694, |
|
"loss": 4.5291, |
|
"step": 898 |
|
}, |
|
{ |
|
"epoch": 0.00899, |
|
"grad_norm": 0.8008505248098504, |
|
"learning_rate": 0.002697, |
|
"loss": 4.5313, |
|
"step": 899 |
|
}, |
|
{ |
|
"epoch": 0.009, |
|
"grad_norm": 0.974662986907947, |
|
"learning_rate": 0.0027, |
|
"loss": 4.5627, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.00901, |
|
"grad_norm": 1.1878109077704273, |
|
"learning_rate": 0.002703, |
|
"loss": 4.5336, |
|
"step": 901 |
|
}, |
|
{ |
|
"epoch": 0.00902, |
|
"grad_norm": 0.7391945290133592, |
|
"learning_rate": 0.002706, |
|
"loss": 4.5248, |
|
"step": 902 |
|
}, |
|
{ |
|
"epoch": 0.00903, |
|
"grad_norm": 0.7310904041760188, |
|
"learning_rate": 0.002709, |
|
"loss": 4.5195, |
|
"step": 903 |
|
}, |
|
{ |
|
"epoch": 0.00904, |
|
"grad_norm": 0.763540341773903, |
|
"learning_rate": 0.002712, |
|
"loss": 4.4985, |
|
"step": 904 |
|
}, |
|
{ |
|
"epoch": 0.00905, |
|
"grad_norm": 0.922476362328922, |
|
"learning_rate": 0.002715, |
|
"loss": 4.5209, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 0.00906, |
|
"grad_norm": 0.9574009618515481, |
|
"learning_rate": 0.002718, |
|
"loss": 4.5379, |
|
"step": 906 |
|
}, |
|
{ |
|
"epoch": 0.00907, |
|
"grad_norm": 1.035774941473643, |
|
"learning_rate": 0.0027210000000000003, |
|
"loss": 4.5538, |
|
"step": 907 |
|
}, |
|
{ |
|
"epoch": 0.00908, |
|
"grad_norm": 1.0191319400722418, |
|
"learning_rate": 0.0027240000000000003, |
|
"loss": 4.5279, |
|
"step": 908 |
|
}, |
|
{ |
|
"epoch": 0.00909, |
|
"grad_norm": 1.1103950353838743, |
|
"learning_rate": 0.0027270000000000003, |
|
"loss": 4.554, |
|
"step": 909 |
|
}, |
|
{ |
|
"epoch": 0.0091, |
|
"grad_norm": 0.8691030580510016, |
|
"learning_rate": 0.0027300000000000002, |
|
"loss": 4.5543, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.00911, |
|
"grad_norm": 0.7723878752960539, |
|
"learning_rate": 0.002733, |
|
"loss": 4.5035, |
|
"step": 911 |
|
}, |
|
{ |
|
"epoch": 0.00912, |
|
"grad_norm": 0.8180906874107354, |
|
"learning_rate": 0.002736, |
|
"loss": 4.5117, |
|
"step": 912 |
|
}, |
|
{ |
|
"epoch": 0.00913, |
|
"grad_norm": 0.8148069350423363, |
|
"learning_rate": 0.002739, |
|
"loss": 4.505, |
|
"step": 913 |
|
}, |
|
{ |
|
"epoch": 0.00914, |
|
"grad_norm": 0.7118677869163097, |
|
"learning_rate": 0.002742, |
|
"loss": 4.5326, |
|
"step": 914 |
|
}, |
|
{ |
|
"epoch": 0.00915, |
|
"grad_norm": 0.67221021637466, |
|
"learning_rate": 0.002745, |
|
"loss": 4.4896, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 0.00916, |
|
"grad_norm": 0.7521734182214744, |
|
"learning_rate": 0.002748, |
|
"loss": 4.4742, |
|
"step": 916 |
|
}, |
|
{ |
|
"epoch": 0.00917, |
|
"grad_norm": 0.6660072639776646, |
|
"learning_rate": 0.002751, |
|
"loss": 4.4809, |
|
"step": 917 |
|
}, |
|
{ |
|
"epoch": 0.00918, |
|
"grad_norm": 0.5671697873930601, |
|
"learning_rate": 0.0027540000000000004, |
|
"loss": 4.4963, |
|
"step": 918 |
|
}, |
|
{ |
|
"epoch": 0.00919, |
|
"grad_norm": 0.5594822914263968, |
|
"learning_rate": 0.0027570000000000003, |
|
"loss": 4.4754, |
|
"step": 919 |
|
}, |
|
{ |
|
"epoch": 0.0092, |
|
"grad_norm": 0.6080421327199805, |
|
"learning_rate": 0.0027600000000000003, |
|
"loss": 4.4903, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.00921, |
|
"grad_norm": 0.728784609109995, |
|
"learning_rate": 0.0027630000000000003, |
|
"loss": 4.4759, |
|
"step": 921 |
|
}, |
|
{ |
|
"epoch": 0.00922, |
|
"grad_norm": 0.8477951683682543, |
|
"learning_rate": 0.0027660000000000002, |
|
"loss": 4.5076, |
|
"step": 922 |
|
}, |
|
{ |
|
"epoch": 0.00923, |
|
"grad_norm": 0.6016579194588629, |
|
"learning_rate": 0.002769, |
|
"loss": 4.4637, |
|
"step": 923 |
|
}, |
|
{ |
|
"epoch": 0.00924, |
|
"grad_norm": 0.6726777284073512, |
|
"learning_rate": 0.002772, |
|
"loss": 4.4919, |
|
"step": 924 |
|
}, |
|
{ |
|
"epoch": 0.00925, |
|
"grad_norm": 0.7061399279754877, |
|
"learning_rate": 0.002775, |
|
"loss": 4.4751, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 0.00926, |
|
"grad_norm": 0.6525850527724611, |
|
"learning_rate": 0.002778, |
|
"loss": 4.4844, |
|
"step": 926 |
|
}, |
|
{ |
|
"epoch": 0.00927, |
|
"grad_norm": 0.6775355167555962, |
|
"learning_rate": 0.002781, |
|
"loss": 4.4493, |
|
"step": 927 |
|
}, |
|
{ |
|
"epoch": 0.00928, |
|
"grad_norm": 0.660488843824027, |
|
"learning_rate": 0.002784, |
|
"loss": 4.493, |
|
"step": 928 |
|
}, |
|
{ |
|
"epoch": 0.00929, |
|
"grad_norm": 0.6375906920183253, |
|
"learning_rate": 0.0027870000000000004, |
|
"loss": 4.4714, |
|
"step": 929 |
|
}, |
|
{ |
|
"epoch": 0.0093, |
|
"grad_norm": 0.7170906127689122, |
|
"learning_rate": 0.0027900000000000004, |
|
"loss": 4.4557, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.00931, |
|
"grad_norm": 0.7727209257871167, |
|
"learning_rate": 0.0027930000000000003, |
|
"loss": 4.4706, |
|
"step": 931 |
|
}, |
|
{ |
|
"epoch": 0.00932, |
|
"grad_norm": 0.759902395005458, |
|
"learning_rate": 0.0027960000000000003, |
|
"loss": 4.4795, |
|
"step": 932 |
|
}, |
|
{ |
|
"epoch": 0.00933, |
|
"grad_norm": 0.9089287828754257, |
|
"learning_rate": 0.0027990000000000003, |
|
"loss": 4.4536, |
|
"step": 933 |
|
}, |
|
{ |
|
"epoch": 0.00934, |
|
"grad_norm": 0.9561562356933814, |
|
"learning_rate": 0.0028020000000000002, |
|
"loss": 4.4859, |
|
"step": 934 |
|
}, |
|
{ |
|
"epoch": 0.00935, |
|
"grad_norm": 1.010681147518413, |
|
"learning_rate": 0.002805, |
|
"loss": 4.5044, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 0.00936, |
|
"grad_norm": 0.960270788501537, |
|
"learning_rate": 0.002808, |
|
"loss": 4.5076, |
|
"step": 936 |
|
}, |
|
{ |
|
"epoch": 0.00937, |
|
"grad_norm": 0.8170703531029062, |
|
"learning_rate": 0.002811, |
|
"loss": 4.5151, |
|
"step": 937 |
|
}, |
|
{ |
|
"epoch": 0.00938, |
|
"grad_norm": 0.6779931914379209, |
|
"learning_rate": 0.002814, |
|
"loss": 4.4854, |
|
"step": 938 |
|
}, |
|
{ |
|
"epoch": 0.00939, |
|
"grad_norm": 0.6509733323350428, |
|
"learning_rate": 0.002817, |
|
"loss": 4.4771, |
|
"step": 939 |
|
}, |
|
{ |
|
"epoch": 0.0094, |
|
"grad_norm": 0.6633333111873998, |
|
"learning_rate": 0.00282, |
|
"loss": 4.4796, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.00941, |
|
"grad_norm": 0.7449615790040228, |
|
"learning_rate": 0.002823, |
|
"loss": 4.4576, |
|
"step": 941 |
|
}, |
|
{ |
|
"epoch": 0.00942, |
|
"grad_norm": 0.7696922222349031, |
|
"learning_rate": 0.002826, |
|
"loss": 4.4823, |
|
"step": 942 |
|
}, |
|
{ |
|
"epoch": 0.00943, |
|
"grad_norm": 0.6087986992050594, |
|
"learning_rate": 0.002829, |
|
"loss": 4.4688, |
|
"step": 943 |
|
}, |
|
{ |
|
"epoch": 0.00944, |
|
"grad_norm": 0.5228736659630974, |
|
"learning_rate": 0.002832, |
|
"loss": 4.4678, |
|
"step": 944 |
|
}, |
|
{ |
|
"epoch": 0.00945, |
|
"grad_norm": 0.5754533269053475, |
|
"learning_rate": 0.002835, |
|
"loss": 4.4713, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 0.00946, |
|
"grad_norm": 0.6164648444392735, |
|
"learning_rate": 0.002838, |
|
"loss": 4.4832, |
|
"step": 946 |
|
}, |
|
{ |
|
"epoch": 0.00947, |
|
"grad_norm": 0.6419505093155794, |
|
"learning_rate": 0.0028409999999999998, |
|
"loss": 4.4556, |
|
"step": 947 |
|
}, |
|
{ |
|
"epoch": 0.00948, |
|
"grad_norm": 0.556707209906786, |
|
"learning_rate": 0.0028439999999999997, |
|
"loss": 4.4351, |
|
"step": 948 |
|
}, |
|
{ |
|
"epoch": 0.00949, |
|
"grad_norm": 0.5031407625923785, |
|
"learning_rate": 0.002847, |
|
"loss": 4.4737, |
|
"step": 949 |
|
}, |
|
{ |
|
"epoch": 0.0095, |
|
"grad_norm": 0.5881138889925357, |
|
"learning_rate": 0.00285, |
|
"loss": 4.4655, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.00951, |
|
"grad_norm": 0.6194945571715194, |
|
"learning_rate": 0.002853, |
|
"loss": 4.4418, |
|
"step": 951 |
|
}, |
|
{ |
|
"epoch": 0.00952, |
|
"grad_norm": 0.6965416314433459, |
|
"learning_rate": 0.002856, |
|
"loss": 4.4704, |
|
"step": 952 |
|
}, |
|
{ |
|
"epoch": 0.00953, |
|
"grad_norm": 0.7506696864658969, |
|
"learning_rate": 0.002859, |
|
"loss": 4.4161, |
|
"step": 953 |
|
}, |
|
{ |
|
"epoch": 0.00954, |
|
"grad_norm": 0.6440899731036704, |
|
"learning_rate": 0.002862, |
|
"loss": 4.4703, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 0.00955, |
|
"grad_norm": 0.7716821398038454, |
|
"learning_rate": 0.002865, |
|
"loss": 4.4601, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 0.00956, |
|
"grad_norm": 0.9111507025485583, |
|
"learning_rate": 0.002868, |
|
"loss": 4.4578, |
|
"step": 956 |
|
}, |
|
{ |
|
"epoch": 0.00957, |
|
"grad_norm": 0.8443539201487298, |
|
"learning_rate": 0.002871, |
|
"loss": 4.4727, |
|
"step": 957 |
|
}, |
|
{ |
|
"epoch": 0.00958, |
|
"grad_norm": 0.7989694313181581, |
|
"learning_rate": 0.002874, |
|
"loss": 4.4582, |
|
"step": 958 |
|
}, |
|
{ |
|
"epoch": 0.00959, |
|
"grad_norm": 0.7547037856292312, |
|
"learning_rate": 0.002877, |
|
"loss": 4.4482, |
|
"step": 959 |
|
}, |
|
{ |
|
"epoch": 0.0096, |
|
"grad_norm": 0.7121103430025651, |
|
"learning_rate": 0.0028799999999999997, |
|
"loss": 4.4466, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.00961, |
|
"grad_norm": 0.6758285086831927, |
|
"learning_rate": 0.002883, |
|
"loss": 4.4209, |
|
"step": 961 |
|
}, |
|
{ |
|
"epoch": 0.00962, |
|
"grad_norm": 0.5336350485211147, |
|
"learning_rate": 0.002886, |
|
"loss": 4.4436, |
|
"step": 962 |
|
}, |
|
{ |
|
"epoch": 0.00963, |
|
"grad_norm": 0.5703472269796636, |
|
"learning_rate": 0.002889, |
|
"loss": 4.4467, |
|
"step": 963 |
|
}, |
|
{ |
|
"epoch": 0.00964, |
|
"grad_norm": 0.6524118600131552, |
|
"learning_rate": 0.002892, |
|
"loss": 4.4355, |
|
"step": 964 |
|
}, |
|
{ |
|
"epoch": 0.00965, |
|
"grad_norm": 0.7342343756321769, |
|
"learning_rate": 0.002895, |
|
"loss": 4.4144, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 0.00966, |
|
"grad_norm": 0.9544017629329996, |
|
"learning_rate": 0.002898, |
|
"loss": 4.4408, |
|
"step": 966 |
|
}, |
|
{ |
|
"epoch": 0.00967, |
|
"grad_norm": 1.1065817167809169, |
|
"learning_rate": 0.002901, |
|
"loss": 4.4927, |
|
"step": 967 |
|
}, |
|
{ |
|
"epoch": 0.00968, |
|
"grad_norm": 0.9829826223932284, |
|
"learning_rate": 0.002904, |
|
"loss": 4.4568, |
|
"step": 968 |
|
}, |
|
{ |
|
"epoch": 0.00969, |
|
"grad_norm": 0.8112557218556605, |
|
"learning_rate": 0.002907, |
|
"loss": 4.4805, |
|
"step": 969 |
|
}, |
|
{ |
|
"epoch": 0.0097, |
|
"grad_norm": 0.983264710703698, |
|
"learning_rate": 0.00291, |
|
"loss": 4.4488, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.00971, |
|
"grad_norm": 1.010591995610007, |
|
"learning_rate": 0.002913, |
|
"loss": 4.4796, |
|
"step": 971 |
|
}, |
|
{ |
|
"epoch": 0.00972, |
|
"grad_norm": 0.9930743768968635, |
|
"learning_rate": 0.002916, |
|
"loss": 4.4798, |
|
"step": 972 |
|
}, |
|
{ |
|
"epoch": 0.00973, |
|
"grad_norm": 1.1817662624154759, |
|
"learning_rate": 0.002919, |
|
"loss": 4.4931, |
|
"step": 973 |
|
}, |
|
{ |
|
"epoch": 0.00974, |
|
"grad_norm": 1.1915796929226945, |
|
"learning_rate": 0.002922, |
|
"loss": 4.4839, |
|
"step": 974 |
|
}, |
|
{ |
|
"epoch": 0.00975, |
|
"grad_norm": 0.9880254500237188, |
|
"learning_rate": 0.002925, |
|
"loss": 4.4969, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 0.00976, |
|
"grad_norm": 1.0394868289507098, |
|
"learning_rate": 0.002928, |
|
"loss": 4.4906, |
|
"step": 976 |
|
}, |
|
{ |
|
"epoch": 0.00977, |
|
"grad_norm": 1.1242485803004214, |
|
"learning_rate": 0.002931, |
|
"loss": 4.5346, |
|
"step": 977 |
|
}, |
|
{ |
|
"epoch": 0.00978, |
|
"grad_norm": 1.1287291198302327, |
|
"learning_rate": 0.002934, |
|
"loss": 4.5134, |
|
"step": 978 |
|
}, |
|
{ |
|
"epoch": 0.00979, |
|
"grad_norm": 0.937365777135907, |
|
"learning_rate": 0.002937, |
|
"loss": 4.4945, |
|
"step": 979 |
|
}, |
|
{ |
|
"epoch": 0.0098, |
|
"grad_norm": 0.9999316178288561, |
|
"learning_rate": 0.00294, |
|
"loss": 4.4906, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.00981, |
|
"grad_norm": 1.0692695747943242, |
|
"learning_rate": 0.002943, |
|
"loss": 4.5166, |
|
"step": 981 |
|
}, |
|
{ |
|
"epoch": 0.00982, |
|
"grad_norm": 0.855671043658802, |
|
"learning_rate": 0.002946, |
|
"loss": 4.4983, |
|
"step": 982 |
|
}, |
|
{ |
|
"epoch": 0.00983, |
|
"grad_norm": 0.8819131773225922, |
|
"learning_rate": 0.0029490000000000002, |
|
"loss": 4.4932, |
|
"step": 983 |
|
}, |
|
{ |
|
"epoch": 0.00984, |
|
"grad_norm": 0.8474323035176099, |
|
"learning_rate": 0.002952, |
|
"loss": 4.4794, |
|
"step": 984 |
|
}, |
|
{ |
|
"epoch": 0.00985, |
|
"grad_norm": 0.8010021292921615, |
|
"learning_rate": 0.002955, |
|
"loss": 4.4955, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 0.00986, |
|
"grad_norm": 0.6778548937329826, |
|
"learning_rate": 0.002958, |
|
"loss": 4.4543, |
|
"step": 986 |
|
}, |
|
{ |
|
"epoch": 0.00987, |
|
"grad_norm": 0.6335901643582099, |
|
"learning_rate": 0.002961, |
|
"loss": 4.4618, |
|
"step": 987 |
|
}, |
|
{ |
|
"epoch": 0.00988, |
|
"grad_norm": 0.6898288520048222, |
|
"learning_rate": 0.002964, |
|
"loss": 4.4934, |
|
"step": 988 |
|
}, |
|
{ |
|
"epoch": 0.00989, |
|
"grad_norm": 0.6968481266470886, |
|
"learning_rate": 0.002967, |
|
"loss": 4.4338, |
|
"step": 989 |
|
}, |
|
{ |
|
"epoch": 0.0099, |
|
"grad_norm": 0.7462093807424477, |
|
"learning_rate": 0.00297, |
|
"loss": 4.4706, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.00991, |
|
"grad_norm": 0.7437971839370325, |
|
"learning_rate": 0.002973, |
|
"loss": 4.4697, |
|
"step": 991 |
|
}, |
|
{ |
|
"epoch": 0.00992, |
|
"grad_norm": 0.6722774287362101, |
|
"learning_rate": 0.002976, |
|
"loss": 4.4576, |
|
"step": 992 |
|
}, |
|
{ |
|
"epoch": 0.00993, |
|
"grad_norm": 0.6583210228081078, |
|
"learning_rate": 0.002979, |
|
"loss": 4.4522, |
|
"step": 993 |
|
}, |
|
{ |
|
"epoch": 0.00994, |
|
"grad_norm": 0.6827391668775007, |
|
"learning_rate": 0.002982, |
|
"loss": 4.4553, |
|
"step": 994 |
|
}, |
|
{ |
|
"epoch": 0.00995, |
|
"grad_norm": 0.7054299500075728, |
|
"learning_rate": 0.0029850000000000002, |
|
"loss": 4.4538, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 0.00996, |
|
"grad_norm": 0.7382372248626952, |
|
"learning_rate": 0.002988, |
|
"loss": 4.4592, |
|
"step": 996 |
|
}, |
|
{ |
|
"epoch": 0.00997, |
|
"grad_norm": 0.6621181102222602, |
|
"learning_rate": 0.002991, |
|
"loss": 4.4437, |
|
"step": 997 |
|
}, |
|
{ |
|
"epoch": 0.00998, |
|
"grad_norm": 0.6532026522918166, |
|
"learning_rate": 0.002994, |
|
"loss": 4.4467, |
|
"step": 998 |
|
}, |
|
{ |
|
"epoch": 0.00999, |
|
"grad_norm": 0.6863970198472243, |
|
"learning_rate": 0.002997, |
|
"loss": 4.4317, |
|
"step": 999 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.6643394709769835, |
|
"learning_rate": 0.003, |
|
"loss": 4.4529, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.01001, |
|
"grad_norm": 0.5664327532419113, |
|
"learning_rate": 0.003, |
|
"loss": 4.4443, |
|
"step": 1001 |
|
}, |
|
{ |
|
"epoch": 0.01002, |
|
"grad_norm": 0.5614282124245983, |
|
"learning_rate": 0.003, |
|
"loss": 4.4107, |
|
"step": 1002 |
|
}, |
|
{ |
|
"epoch": 0.01003, |
|
"grad_norm": 0.5395347922472447, |
|
"learning_rate": 0.003, |
|
"loss": 4.4319, |
|
"step": 1003 |
|
}, |
|
{ |
|
"epoch": 0.01004, |
|
"grad_norm": 0.5638853817459814, |
|
"learning_rate": 0.003, |
|
"loss": 4.4194, |
|
"step": 1004 |
|
}, |
|
{ |
|
"epoch": 0.01005, |
|
"grad_norm": 0.5916080142589283, |
|
"learning_rate": 0.003, |
|
"loss": 4.3932, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 0.01006, |
|
"grad_norm": 0.5700688347090367, |
|
"learning_rate": 0.003, |
|
"loss": 4.3913, |
|
"step": 1006 |
|
}, |
|
{ |
|
"epoch": 0.01007, |
|
"grad_norm": 0.5618557938751672, |
|
"learning_rate": 0.003, |
|
"loss": 4.4343, |
|
"step": 1007 |
|
}, |
|
{ |
|
"epoch": 0.01008, |
|
"grad_norm": 0.6176294799951538, |
|
"learning_rate": 0.003, |
|
"loss": 4.4449, |
|
"step": 1008 |
|
}, |
|
{ |
|
"epoch": 0.01009, |
|
"grad_norm": 0.6917308148051521, |
|
"learning_rate": 0.003, |
|
"loss": 4.4152, |
|
"step": 1009 |
|
}, |
|
{ |
|
"epoch": 0.0101, |
|
"grad_norm": 0.7355219940735465, |
|
"learning_rate": 0.003, |
|
"loss": 4.4242, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 0.01011, |
|
"grad_norm": 0.8258393023556594, |
|
"learning_rate": 0.003, |
|
"loss": 4.437, |
|
"step": 1011 |
|
}, |
|
{ |
|
"epoch": 0.01012, |
|
"grad_norm": 0.7883368033913232, |
|
"learning_rate": 0.003, |
|
"loss": 4.4273, |
|
"step": 1012 |
|
}, |
|
{ |
|
"epoch": 0.01013, |
|
"grad_norm": 0.8421670780561094, |
|
"learning_rate": 0.003, |
|
"loss": 4.453, |
|
"step": 1013 |
|
}, |
|
{ |
|
"epoch": 0.01014, |
|
"grad_norm": 0.7611364620814401, |
|
"learning_rate": 0.003, |
|
"loss": 4.3906, |
|
"step": 1014 |
|
}, |
|
{ |
|
"epoch": 0.01015, |
|
"grad_norm": 0.8948224171004916, |
|
"learning_rate": 0.003, |
|
"loss": 4.4422, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 0.01016, |
|
"grad_norm": 0.9330968648547454, |
|
"learning_rate": 0.003, |
|
"loss": 4.4537, |
|
"step": 1016 |
|
}, |
|
{ |
|
"epoch": 0.01017, |
|
"grad_norm": 0.9071988294298332, |
|
"learning_rate": 0.003, |
|
"loss": 4.4046, |
|
"step": 1017 |
|
}, |
|
{ |
|
"epoch": 0.01018, |
|
"grad_norm": 0.9028445315923417, |
|
"learning_rate": 0.003, |
|
"loss": 4.4536, |
|
"step": 1018 |
|
}, |
|
{ |
|
"epoch": 0.01019, |
|
"grad_norm": 0.701398424995911, |
|
"learning_rate": 0.003, |
|
"loss": 4.4297, |
|
"step": 1019 |
|
}, |
|
{ |
|
"epoch": 0.0102, |
|
"grad_norm": 0.7422831527736846, |
|
"learning_rate": 0.003, |
|
"loss": 4.428, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 0.01021, |
|
"grad_norm": 0.7468650100074169, |
|
"learning_rate": 0.003, |
|
"loss": 4.4136, |
|
"step": 1021 |
|
}, |
|
{ |
|
"epoch": 0.01022, |
|
"grad_norm": 0.9325291879496833, |
|
"learning_rate": 0.003, |
|
"loss": 4.4294, |
|
"step": 1022 |
|
}, |
|
{ |
|
"epoch": 0.01023, |
|
"grad_norm": 0.8401676161943546, |
|
"learning_rate": 0.003, |
|
"loss": 4.4261, |
|
"step": 1023 |
|
}, |
|
{ |
|
"epoch": 0.01024, |
|
"grad_norm": 0.8270343729935282, |
|
"learning_rate": 0.003, |
|
"loss": 4.3898, |
|
"step": 1024 |
|
}, |
|
{ |
|
"epoch": 0.01025, |
|
"grad_norm": 0.7180913593704098, |
|
"learning_rate": 0.003, |
|
"loss": 4.4147, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 0.01026, |
|
"grad_norm": 0.6277034329789435, |
|
"learning_rate": 0.003, |
|
"loss": 4.4039, |
|
"step": 1026 |
|
}, |
|
{ |
|
"epoch": 0.01027, |
|
"grad_norm": 0.5318737503801085, |
|
"learning_rate": 0.003, |
|
"loss": 4.4219, |
|
"step": 1027 |
|
}, |
|
{ |
|
"epoch": 0.01028, |
|
"grad_norm": 0.4999785226446009, |
|
"learning_rate": 0.003, |
|
"loss": 4.4015, |
|
"step": 1028 |
|
}, |
|
{ |
|
"epoch": 0.01029, |
|
"grad_norm": 0.48973961084120876, |
|
"learning_rate": 0.003, |
|
"loss": 4.3769, |
|
"step": 1029 |
|
}, |
|
{ |
|
"epoch": 0.0103, |
|
"grad_norm": 0.5942733082980836, |
|
"learning_rate": 0.003, |
|
"loss": 4.4125, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 0.01031, |
|
"grad_norm": 0.7229752342980397, |
|
"learning_rate": 0.003, |
|
"loss": 4.4274, |
|
"step": 1031 |
|
}, |
|
{ |
|
"epoch": 0.01032, |
|
"grad_norm": 0.7373514084562369, |
|
"learning_rate": 0.003, |
|
"loss": 4.4145, |
|
"step": 1032 |
|
}, |
|
{ |
|
"epoch": 0.01033, |
|
"grad_norm": 0.6482800157489234, |
|
"learning_rate": 0.003, |
|
"loss": 4.4346, |
|
"step": 1033 |
|
}, |
|
{ |
|
"epoch": 0.01034, |
|
"grad_norm": 0.6524184922218951, |
|
"learning_rate": 0.003, |
|
"loss": 4.4051, |
|
"step": 1034 |
|
}, |
|
{ |
|
"epoch": 0.01035, |
|
"grad_norm": 0.705389079509784, |
|
"learning_rate": 0.003, |
|
"loss": 4.4073, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 0.01036, |
|
"grad_norm": 0.7235541204605349, |
|
"learning_rate": 0.003, |
|
"loss": 4.3939, |
|
"step": 1036 |
|
}, |
|
{ |
|
"epoch": 0.01037, |
|
"grad_norm": 0.6796343657160102, |
|
"learning_rate": 0.003, |
|
"loss": 4.4161, |
|
"step": 1037 |
|
}, |
|
{ |
|
"epoch": 0.01038, |
|
"grad_norm": 0.5773141410598708, |
|
"learning_rate": 0.003, |
|
"loss": 4.4121, |
|
"step": 1038 |
|
}, |
|
{ |
|
"epoch": 0.01039, |
|
"grad_norm": 0.5565285106963757, |
|
"learning_rate": 0.003, |
|
"loss": 4.4014, |
|
"step": 1039 |
|
}, |
|
{ |
|
"epoch": 0.0104, |
|
"grad_norm": 0.526665138631767, |
|
"learning_rate": 0.003, |
|
"loss": 4.4124, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 0.01041, |
|
"grad_norm": 0.5117793067919421, |
|
"learning_rate": 0.003, |
|
"loss": 4.3909, |
|
"step": 1041 |
|
}, |
|
{ |
|
"epoch": 0.01042, |
|
"grad_norm": 0.5092553549098504, |
|
"learning_rate": 0.003, |
|
"loss": 4.4167, |
|
"step": 1042 |
|
}, |
|
{ |
|
"epoch": 0.01043, |
|
"grad_norm": 0.5543198083705748, |
|
"learning_rate": 0.003, |
|
"loss": 4.3593, |
|
"step": 1043 |
|
}, |
|
{ |
|
"epoch": 0.01044, |
|
"grad_norm": 0.600481111898673, |
|
"learning_rate": 0.003, |
|
"loss": 4.3987, |
|
"step": 1044 |
|
}, |
|
{ |
|
"epoch": 0.01045, |
|
"grad_norm": 0.5965980004717368, |
|
"learning_rate": 0.003, |
|
"loss": 4.4162, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 0.01046, |
|
"grad_norm": 0.6740252530686676, |
|
"learning_rate": 0.003, |
|
"loss": 4.3928, |
|
"step": 1046 |
|
}, |
|
{ |
|
"epoch": 0.01047, |
|
"grad_norm": 0.7361053030528937, |
|
"learning_rate": 0.003, |
|
"loss": 4.3997, |
|
"step": 1047 |
|
}, |
|
{ |
|
"epoch": 0.01048, |
|
"grad_norm": 0.767513361829787, |
|
"learning_rate": 0.003, |
|
"loss": 4.3936, |
|
"step": 1048 |
|
}, |
|
{ |
|
"epoch": 0.01049, |
|
"grad_norm": 0.6855190682446736, |
|
"learning_rate": 0.003, |
|
"loss": 4.3699, |
|
"step": 1049 |
|
}, |
|
{ |
|
"epoch": 0.0105, |
|
"grad_norm": 0.5741691502464937, |
|
"learning_rate": 0.003, |
|
"loss": 4.4062, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.01051, |
|
"grad_norm": 0.6230464159511063, |
|
"learning_rate": 0.003, |
|
"loss": 4.3827, |
|
"step": 1051 |
|
}, |
|
{ |
|
"epoch": 0.01052, |
|
"grad_norm": 0.6366737529257143, |
|
"learning_rate": 0.003, |
|
"loss": 4.3967, |
|
"step": 1052 |
|
}, |
|
{ |
|
"epoch": 0.01053, |
|
"grad_norm": 0.6755427193415695, |
|
"learning_rate": 0.003, |
|
"loss": 4.4056, |
|
"step": 1053 |
|
}, |
|
{ |
|
"epoch": 0.01054, |
|
"grad_norm": 0.6231015147472246, |
|
"learning_rate": 0.003, |
|
"loss": 4.3965, |
|
"step": 1054 |
|
}, |
|
{ |
|
"epoch": 0.01055, |
|
"grad_norm": 0.5394645878748071, |
|
"learning_rate": 0.003, |
|
"loss": 4.3715, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 0.01056, |
|
"grad_norm": 0.5755128064540977, |
|
"learning_rate": 0.003, |
|
"loss": 4.3719, |
|
"step": 1056 |
|
}, |
|
{ |
|
"epoch": 0.01057, |
|
"grad_norm": 0.5491694969265289, |
|
"learning_rate": 0.003, |
|
"loss": 4.3754, |
|
"step": 1057 |
|
}, |
|
{ |
|
"epoch": 0.01058, |
|
"grad_norm": 0.7343919109861163, |
|
"learning_rate": 0.003, |
|
"loss": 4.375, |
|
"step": 1058 |
|
}, |
|
{ |
|
"epoch": 0.01059, |
|
"grad_norm": 0.9361910412332192, |
|
"learning_rate": 0.003, |
|
"loss": 4.3892, |
|
"step": 1059 |
|
}, |
|
{ |
|
"epoch": 0.0106, |
|
"grad_norm": 0.9065515100292879, |
|
"learning_rate": 0.003, |
|
"loss": 4.3901, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 0.01061, |
|
"grad_norm": 1.1211758916188983, |
|
"learning_rate": 0.003, |
|
"loss": 4.3972, |
|
"step": 1061 |
|
}, |
|
{ |
|
"epoch": 0.01062, |
|
"grad_norm": 0.7834596971762835, |
|
"learning_rate": 0.003, |
|
"loss": 4.3907, |
|
"step": 1062 |
|
}, |
|
{ |
|
"epoch": 0.01063, |
|
"grad_norm": 0.6963669031971059, |
|
"learning_rate": 0.003, |
|
"loss": 4.3832, |
|
"step": 1063 |
|
}, |
|
{ |
|
"epoch": 0.01064, |
|
"grad_norm": 0.6238185439704477, |
|
"learning_rate": 0.003, |
|
"loss": 4.3945, |
|
"step": 1064 |
|
}, |
|
{ |
|
"epoch": 0.01065, |
|
"grad_norm": 0.6958753605701185, |
|
"learning_rate": 0.003, |
|
"loss": 4.3993, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 0.01066, |
|
"grad_norm": 0.7129893176363267, |
|
"learning_rate": 0.003, |
|
"loss": 4.4025, |
|
"step": 1066 |
|
}, |
|
{ |
|
"epoch": 0.01067, |
|
"grad_norm": 0.7862199002009355, |
|
"learning_rate": 0.003, |
|
"loss": 4.4178, |
|
"step": 1067 |
|
}, |
|
{ |
|
"epoch": 0.01068, |
|
"grad_norm": 0.7422673244500091, |
|
"learning_rate": 0.003, |
|
"loss": 4.3984, |
|
"step": 1068 |
|
}, |
|
{ |
|
"epoch": 0.01069, |
|
"grad_norm": 0.7997129592129516, |
|
"learning_rate": 0.003, |
|
"loss": 4.4072, |
|
"step": 1069 |
|
}, |
|
{ |
|
"epoch": 0.0107, |
|
"grad_norm": 0.945099613944759, |
|
"learning_rate": 0.003, |
|
"loss": 4.4674, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 0.01071, |
|
"grad_norm": 0.8590095455876905, |
|
"learning_rate": 0.003, |
|
"loss": 4.387, |
|
"step": 1071 |
|
}, |
|
{ |
|
"epoch": 0.01072, |
|
"grad_norm": 0.8651521698594221, |
|
"learning_rate": 0.003, |
|
"loss": 4.4227, |
|
"step": 1072 |
|
}, |
|
{ |
|
"epoch": 0.01073, |
|
"grad_norm": 0.80387311045371, |
|
"learning_rate": 0.003, |
|
"loss": 4.4158, |
|
"step": 1073 |
|
}, |
|
{ |
|
"epoch": 0.01074, |
|
"grad_norm": 0.8137862583554676, |
|
"learning_rate": 0.003, |
|
"loss": 4.3801, |
|
"step": 1074 |
|
}, |
|
{ |
|
"epoch": 0.01075, |
|
"grad_norm": 0.8055768924194764, |
|
"learning_rate": 0.003, |
|
"loss": 4.4185, |
|
"step": 1075 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.26169160695808e+16, |
|
"train_batch_size": 512, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|