nguyenthanhdo's picture
Upload folder using huggingface_hub
0ebd7b6 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9958522615050365,
"eval_steps": 16,
"global_step": 474,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.006320363420896702,
"grad_norm": 1.03125,
"learning_rate": 2e-05,
"loss": 0.5558,
"step": 1
},
{
"epoch": 0.006320363420896702,
"eval_loss": 0.5296231508255005,
"eval_runtime": 52.0012,
"eval_samples_per_second": 20.5,
"eval_steps_per_second": 20.5,
"step": 1
},
{
"epoch": 0.012640726841793404,
"grad_norm": 0.96484375,
"learning_rate": 4e-05,
"loss": 0.5376,
"step": 2
},
{
"epoch": 0.018961090262690106,
"grad_norm": 0.9609375,
"learning_rate": 6e-05,
"loss": 0.5124,
"step": 3
},
{
"epoch": 0.025281453683586808,
"grad_norm": 0.71484375,
"learning_rate": 8e-05,
"loss": 0.4336,
"step": 4
},
{
"epoch": 0.03160181710448351,
"grad_norm": 0.427734375,
"learning_rate": 0.0001,
"loss": 0.332,
"step": 5
},
{
"epoch": 0.03792218052538021,
"grad_norm": 0.326171875,
"learning_rate": 0.00012,
"loss": 0.2481,
"step": 6
},
{
"epoch": 0.04424254394627691,
"grad_norm": 0.34375,
"learning_rate": 0.00014,
"loss": 0.225,
"step": 7
},
{
"epoch": 0.050562907367173615,
"grad_norm": 0.30078125,
"learning_rate": 0.00016,
"loss": 0.1996,
"step": 8
},
{
"epoch": 0.05688327078807032,
"grad_norm": 0.298828125,
"learning_rate": 0.00018,
"loss": 0.1811,
"step": 9
},
{
"epoch": 0.06320363420896702,
"grad_norm": 0.240234375,
"learning_rate": 0.0002,
"loss": 0.1933,
"step": 10
},
{
"epoch": 0.06952399762986372,
"grad_norm": 0.267578125,
"learning_rate": 0.00019999770790755575,
"loss": 0.2048,
"step": 11
},
{
"epoch": 0.07584436105076042,
"grad_norm": 0.2177734375,
"learning_rate": 0.00019999083173529673,
"loss": 0.1728,
"step": 12
},
{
"epoch": 0.08216472447165712,
"grad_norm": 0.1748046875,
"learning_rate": 0.00019997937179843937,
"loss": 0.1804,
"step": 13
},
{
"epoch": 0.08848508789255383,
"grad_norm": 0.1826171875,
"learning_rate": 0.0001999633286223284,
"loss": 0.1894,
"step": 14
},
{
"epoch": 0.09480545131345053,
"grad_norm": 0.15234375,
"learning_rate": 0.00019994270294241266,
"loss": 0.1633,
"step": 15
},
{
"epoch": 0.10112581473434723,
"grad_norm": 0.1455078125,
"learning_rate": 0.00019991749570421146,
"loss": 0.1574,
"step": 16
},
{
"epoch": 0.10112581473434723,
"eval_loss": 0.16320638358592987,
"eval_runtime": 56.0359,
"eval_samples_per_second": 19.024,
"eval_steps_per_second": 19.024,
"step": 16
},
{
"epoch": 0.10744617815524393,
"grad_norm": 0.1328125,
"learning_rate": 0.0001998877080632712,
"loss": 0.1589,
"step": 17
},
{
"epoch": 0.11376654157614063,
"grad_norm": 0.1650390625,
"learning_rate": 0.00019985334138511237,
"loss": 0.1718,
"step": 18
},
{
"epoch": 0.12008690499703734,
"grad_norm": 0.1455078125,
"learning_rate": 0.00019981439724516716,
"loss": 0.156,
"step": 19
},
{
"epoch": 0.12640726841793404,
"grad_norm": 0.162109375,
"learning_rate": 0.0001997708774287068,
"loss": 0.1781,
"step": 20
},
{
"epoch": 0.13272763183883074,
"grad_norm": 0.1416015625,
"learning_rate": 0.00019972278393076023,
"loss": 0.1347,
"step": 21
},
{
"epoch": 0.13904799525972744,
"grad_norm": 0.130859375,
"learning_rate": 0.0001996701189560223,
"loss": 0.1389,
"step": 22
},
{
"epoch": 0.14536835868062414,
"grad_norm": 0.138671875,
"learning_rate": 0.00019961288491875278,
"loss": 0.1418,
"step": 23
},
{
"epoch": 0.15168872210152085,
"grad_norm": 0.11279296875,
"learning_rate": 0.00019955108444266585,
"loss": 0.1269,
"step": 24
},
{
"epoch": 0.15800908552241755,
"grad_norm": 0.1435546875,
"learning_rate": 0.00019948472036080949,
"loss": 0.1659,
"step": 25
},
{
"epoch": 0.16432944894331425,
"grad_norm": 0.13671875,
"learning_rate": 0.00019941379571543596,
"loss": 0.124,
"step": 26
},
{
"epoch": 0.17064981236421095,
"grad_norm": 0.1396484375,
"learning_rate": 0.00019933831375786216,
"loss": 0.1423,
"step": 27
},
{
"epoch": 0.17697017578510765,
"grad_norm": 0.1494140625,
"learning_rate": 0.00019925827794832056,
"loss": 0.1394,
"step": 28
},
{
"epoch": 0.18329053920600435,
"grad_norm": 0.1279296875,
"learning_rate": 0.00019917369195580063,
"loss": 0.1168,
"step": 29
},
{
"epoch": 0.18961090262690106,
"grad_norm": 0.1435546875,
"learning_rate": 0.00019908455965788067,
"loss": 0.147,
"step": 30
},
{
"epoch": 0.19593126604779776,
"grad_norm": 0.1318359375,
"learning_rate": 0.00019899088514055004,
"loss": 0.13,
"step": 31
},
{
"epoch": 0.20225162946869446,
"grad_norm": 0.11865234375,
"learning_rate": 0.00019889267269802176,
"loss": 0.1279,
"step": 32
},
{
"epoch": 0.20225162946869446,
"eval_loss": 0.137903094291687,
"eval_runtime": 55.5398,
"eval_samples_per_second": 19.193,
"eval_steps_per_second": 19.193,
"step": 32
},
{
"epoch": 0.20857199288959116,
"grad_norm": 0.125,
"learning_rate": 0.00019878992683253582,
"loss": 0.123,
"step": 33
},
{
"epoch": 0.21489235631048786,
"grad_norm": 0.125,
"learning_rate": 0.00019868265225415265,
"loss": 0.1264,
"step": 34
},
{
"epoch": 0.22121271973138457,
"grad_norm": 0.1279296875,
"learning_rate": 0.00019857085388053723,
"loss": 0.1295,
"step": 35
},
{
"epoch": 0.22753308315228127,
"grad_norm": 0.138671875,
"learning_rate": 0.00019845453683673368,
"loss": 0.1448,
"step": 36
},
{
"epoch": 0.23385344657317797,
"grad_norm": 0.126953125,
"learning_rate": 0.00019833370645493047,
"loss": 0.1293,
"step": 37
},
{
"epoch": 0.24017380999407467,
"grad_norm": 0.130859375,
"learning_rate": 0.0001982083682742156,
"loss": 0.1387,
"step": 38
},
{
"epoch": 0.24649417341497137,
"grad_norm": 0.10888671875,
"learning_rate": 0.00019807852804032305,
"loss": 0.1027,
"step": 39
},
{
"epoch": 0.2528145368358681,
"grad_norm": 0.1240234375,
"learning_rate": 0.00019794419170536916,
"loss": 0.1166,
"step": 40
},
{
"epoch": 0.2591349002567648,
"grad_norm": 0.1337890625,
"learning_rate": 0.00019780536542758,
"loss": 0.1218,
"step": 41
},
{
"epoch": 0.2654552636776615,
"grad_norm": 0.11865234375,
"learning_rate": 0.00019766205557100868,
"loss": 0.1291,
"step": 42
},
{
"epoch": 0.2717756270985582,
"grad_norm": 0.138671875,
"learning_rate": 0.00019751426870524407,
"loss": 0.1409,
"step": 43
},
{
"epoch": 0.2780959905194549,
"grad_norm": 0.12109375,
"learning_rate": 0.00019736201160510931,
"loss": 0.1275,
"step": 44
},
{
"epoch": 0.2844163539403516,
"grad_norm": 0.125,
"learning_rate": 0.0001972052912503514,
"loss": 0.1319,
"step": 45
},
{
"epoch": 0.2907367173612483,
"grad_norm": 0.12158203125,
"learning_rate": 0.00019704411482532116,
"loss": 0.134,
"step": 46
},
{
"epoch": 0.297057080782145,
"grad_norm": 0.119140625,
"learning_rate": 0.00019687848971864389,
"loss": 0.1334,
"step": 47
},
{
"epoch": 0.3033774442030417,
"grad_norm": 0.119140625,
"learning_rate": 0.0001967084235228807,
"loss": 0.1166,
"step": 48
},
{
"epoch": 0.3033774442030417,
"eval_loss": 0.12646569311618805,
"eval_runtime": 55.7378,
"eval_samples_per_second": 19.125,
"eval_steps_per_second": 19.125,
"step": 48
},
{
"epoch": 0.3096978076239384,
"grad_norm": 0.12109375,
"learning_rate": 0.00019653392403418043,
"loss": 0.1242,
"step": 49
},
{
"epoch": 0.3160181710448351,
"grad_norm": 0.1201171875,
"learning_rate": 0.0001963549992519223,
"loss": 0.1309,
"step": 50
},
{
"epoch": 0.3223385344657318,
"grad_norm": 0.1044921875,
"learning_rate": 0.00019617165737834916,
"loss": 0.1041,
"step": 51
},
{
"epoch": 0.3286588978866285,
"grad_norm": 0.1318359375,
"learning_rate": 0.0001959839068181914,
"loss": 0.1219,
"step": 52
},
{
"epoch": 0.3349792613075252,
"grad_norm": 0.12109375,
"learning_rate": 0.00019579175617828187,
"loss": 0.1421,
"step": 53
},
{
"epoch": 0.3412996247284219,
"grad_norm": 0.11767578125,
"learning_rate": 0.00019559521426716118,
"loss": 0.1176,
"step": 54
},
{
"epoch": 0.3476199881493186,
"grad_norm": 0.119140625,
"learning_rate": 0.0001953942900946739,
"loss": 0.1206,
"step": 55
},
{
"epoch": 0.3539403515702153,
"grad_norm": 0.12158203125,
"learning_rate": 0.00019518899287155556,
"loss": 0.1174,
"step": 56
},
{
"epoch": 0.360260714991112,
"grad_norm": 0.1181640625,
"learning_rate": 0.0001949793320090105,
"loss": 0.1228,
"step": 57
},
{
"epoch": 0.3665810784120087,
"grad_norm": 0.13671875,
"learning_rate": 0.00019476531711828027,
"loss": 0.125,
"step": 58
},
{
"epoch": 0.3729014418329054,
"grad_norm": 0.11962890625,
"learning_rate": 0.0001945469580102031,
"loss": 0.1094,
"step": 59
},
{
"epoch": 0.3792218052538021,
"grad_norm": 0.10498046875,
"learning_rate": 0.0001943242646947643,
"loss": 0.0983,
"step": 60
},
{
"epoch": 0.3855421686746988,
"grad_norm": 0.1220703125,
"learning_rate": 0.00019409724738063714,
"loss": 0.1164,
"step": 61
},
{
"epoch": 0.3918625320955955,
"grad_norm": 0.11767578125,
"learning_rate": 0.00019386591647471506,
"loss": 0.1124,
"step": 62
},
{
"epoch": 0.3981828955164922,
"grad_norm": 0.11669921875,
"learning_rate": 0.00019363028258163447,
"loss": 0.1188,
"step": 63
},
{
"epoch": 0.4045032589373889,
"grad_norm": 0.12255859375,
"learning_rate": 0.00019339035650328869,
"loss": 0.1335,
"step": 64
},
{
"epoch": 0.4045032589373889,
"eval_loss": 0.11877521872520447,
"eval_runtime": 55.4677,
"eval_samples_per_second": 19.218,
"eval_steps_per_second": 19.218,
"step": 64
},
{
"epoch": 0.4108236223582856,
"grad_norm": 0.1279296875,
"learning_rate": 0.0001931461492383327,
"loss": 0.1447,
"step": 65
},
{
"epoch": 0.4171439857791823,
"grad_norm": 0.1044921875,
"learning_rate": 0.00019289767198167916,
"loss": 0.1004,
"step": 66
},
{
"epoch": 0.423464349200079,
"grad_norm": 0.1171875,
"learning_rate": 0.00019264493612398481,
"loss": 0.1228,
"step": 67
},
{
"epoch": 0.42978471262097573,
"grad_norm": 0.099609375,
"learning_rate": 0.0001923879532511287,
"loss": 0.0963,
"step": 68
},
{
"epoch": 0.43610507604187243,
"grad_norm": 0.1015625,
"learning_rate": 0.0001921267351436808,
"loss": 0.1025,
"step": 69
},
{
"epoch": 0.44242543946276913,
"grad_norm": 0.123046875,
"learning_rate": 0.0001918612937763622,
"loss": 0.124,
"step": 70
},
{
"epoch": 0.44874580288366583,
"grad_norm": 0.109375,
"learning_rate": 0.00019159164131749587,
"loss": 0.1203,
"step": 71
},
{
"epoch": 0.45506616630456254,
"grad_norm": 0.109375,
"learning_rate": 0.00019131779012844912,
"loss": 0.1052,
"step": 72
},
{
"epoch": 0.46138652972545924,
"grad_norm": 0.1162109375,
"learning_rate": 0.00019103975276306678,
"loss": 0.1181,
"step": 73
},
{
"epoch": 0.46770689314635594,
"grad_norm": 0.11181640625,
"learning_rate": 0.00019075754196709572,
"loss": 0.1065,
"step": 74
},
{
"epoch": 0.47402725656725264,
"grad_norm": 0.109375,
"learning_rate": 0.0001904711706776006,
"loss": 0.1075,
"step": 75
},
{
"epoch": 0.48034761998814934,
"grad_norm": 0.11865234375,
"learning_rate": 0.00019018065202237083,
"loss": 0.1182,
"step": 76
},
{
"epoch": 0.48666798340904605,
"grad_norm": 0.09814453125,
"learning_rate": 0.00018988599931931866,
"loss": 0.0913,
"step": 77
},
{
"epoch": 0.49298834682994275,
"grad_norm": 0.10791015625,
"learning_rate": 0.0001895872260758688,
"loss": 0.103,
"step": 78
},
{
"epoch": 0.49930871025083945,
"grad_norm": 0.111328125,
"learning_rate": 0.00018928434598833912,
"loss": 0.1106,
"step": 79
},
{
"epoch": 0.5056290736717362,
"grad_norm": 0.12451171875,
"learning_rate": 0.00018897737294131284,
"loss": 0.1145,
"step": 80
},
{
"epoch": 0.5056290736717362,
"eval_loss": 0.11342703551054001,
"eval_runtime": 55.8581,
"eval_samples_per_second": 19.084,
"eval_steps_per_second": 19.084,
"step": 80
},
{
"epoch": 0.5119494370926329,
"grad_norm": 0.109375,
"learning_rate": 0.00018866632100700197,
"loss": 0.0943,
"step": 81
},
{
"epoch": 0.5182698005135296,
"grad_norm": 0.130859375,
"learning_rate": 0.0001883512044446023,
"loss": 0.1409,
"step": 82
},
{
"epoch": 0.5245901639344263,
"grad_norm": 0.1259765625,
"learning_rate": 0.00018803203769963967,
"loss": 0.1364,
"step": 83
},
{
"epoch": 0.530910527355323,
"grad_norm": 0.10107421875,
"learning_rate": 0.0001877088354033077,
"loss": 0.0904,
"step": 84
},
{
"epoch": 0.5372308907762197,
"grad_norm": 0.10107421875,
"learning_rate": 0.0001873816123717973,
"loss": 0.0942,
"step": 85
},
{
"epoch": 0.5435512541971164,
"grad_norm": 0.11767578125,
"learning_rate": 0.0001870503836056172,
"loss": 0.095,
"step": 86
},
{
"epoch": 0.5498716176180131,
"grad_norm": 0.10986328125,
"learning_rate": 0.00018671516428890648,
"loss": 0.1061,
"step": 87
},
{
"epoch": 0.5561919810389098,
"grad_norm": 0.11962890625,
"learning_rate": 0.00018637596978873835,
"loss": 0.1209,
"step": 88
},
{
"epoch": 0.5625123444598065,
"grad_norm": 0.12451171875,
"learning_rate": 0.00018603281565441585,
"loss": 0.1268,
"step": 89
},
{
"epoch": 0.5688327078807032,
"grad_norm": 0.11376953125,
"learning_rate": 0.00018568571761675893,
"loss": 0.1034,
"step": 90
},
{
"epoch": 0.5751530713015999,
"grad_norm": 0.12060546875,
"learning_rate": 0.00018533469158738344,
"loss": 0.1265,
"step": 91
},
{
"epoch": 0.5814734347224966,
"grad_norm": 0.11328125,
"learning_rate": 0.0001849797536579715,
"loss": 0.1123,
"step": 92
},
{
"epoch": 0.5877937981433933,
"grad_norm": 0.10498046875,
"learning_rate": 0.00018462092009953408,
"loss": 0.1041,
"step": 93
},
{
"epoch": 0.59411416156429,
"grad_norm": 0.1171875,
"learning_rate": 0.0001842582073616649,
"loss": 0.1197,
"step": 94
},
{
"epoch": 0.6004345249851867,
"grad_norm": 0.0986328125,
"learning_rate": 0.00018389163207178656,
"loss": 0.0961,
"step": 95
},
{
"epoch": 0.6067548884060834,
"grad_norm": 0.1025390625,
"learning_rate": 0.000183521211034388,
"loss": 0.1036,
"step": 96
},
{
"epoch": 0.6067548884060834,
"eval_loss": 0.10819214582443237,
"eval_runtime": 56.0566,
"eval_samples_per_second": 19.017,
"eval_steps_per_second": 19.017,
"step": 96
},
{
"epoch": 0.6130752518269801,
"grad_norm": 0.1015625,
"learning_rate": 0.00018314696123025454,
"loss": 0.0953,
"step": 97
},
{
"epoch": 0.6193956152478768,
"grad_norm": 0.10791015625,
"learning_rate": 0.00018276889981568906,
"loss": 0.091,
"step": 98
},
{
"epoch": 0.6257159786687735,
"grad_norm": 0.1103515625,
"learning_rate": 0.00018238704412172586,
"loss": 0.0936,
"step": 99
},
{
"epoch": 0.6320363420896702,
"grad_norm": 0.11572265625,
"learning_rate": 0.0001820014116533359,
"loss": 0.111,
"step": 100
},
{
"epoch": 0.6383567055105669,
"grad_norm": 0.1259765625,
"learning_rate": 0.00018161202008862458,
"loss": 0.1232,
"step": 101
},
{
"epoch": 0.6446770689314636,
"grad_norm": 0.11474609375,
"learning_rate": 0.00018121888727802113,
"loss": 0.1072,
"step": 102
},
{
"epoch": 0.6509974323523603,
"grad_norm": 0.1103515625,
"learning_rate": 0.00018082203124346045,
"loss": 0.1081,
"step": 103
},
{
"epoch": 0.657317795773257,
"grad_norm": 0.125,
"learning_rate": 0.0001804214701775569,
"loss": 0.1155,
"step": 104
},
{
"epoch": 0.6636381591941537,
"grad_norm": 0.11669921875,
"learning_rate": 0.00018001722244277035,
"loss": 0.104,
"step": 105
},
{
"epoch": 0.6699585226150504,
"grad_norm": 0.10400390625,
"learning_rate": 0.00017960930657056438,
"loss": 0.0984,
"step": 106
},
{
"epoch": 0.6762788860359471,
"grad_norm": 0.10205078125,
"learning_rate": 0.00017919774126055673,
"loss": 0.0931,
"step": 107
},
{
"epoch": 0.6825992494568438,
"grad_norm": 0.1044921875,
"learning_rate": 0.00017878254537966216,
"loss": 0.1035,
"step": 108
},
{
"epoch": 0.6889196128777405,
"grad_norm": 0.1142578125,
"learning_rate": 0.0001783637379612275,
"loss": 0.1046,
"step": 109
},
{
"epoch": 0.6952399762986372,
"grad_norm": 0.11328125,
"learning_rate": 0.00017794133820415916,
"loss": 0.1166,
"step": 110
},
{
"epoch": 0.7015603397195339,
"grad_norm": 0.10302734375,
"learning_rate": 0.00017751536547204295,
"loss": 0.0937,
"step": 111
},
{
"epoch": 0.7078807031404306,
"grad_norm": 0.10302734375,
"learning_rate": 0.0001770858392922565,
"loss": 0.0937,
"step": 112
},
{
"epoch": 0.7078807031404306,
"eval_loss": 0.10633409768342972,
"eval_runtime": 54.3141,
"eval_samples_per_second": 19.627,
"eval_steps_per_second": 19.627,
"step": 112
},
{
"epoch": 0.7142010665613273,
"grad_norm": 0.107421875,
"learning_rate": 0.00017665277935507398,
"loss": 0.0908,
"step": 113
},
{
"epoch": 0.720521429982224,
"grad_norm": 0.1025390625,
"learning_rate": 0.00017621620551276366,
"loss": 0.0896,
"step": 114
},
{
"epoch": 0.7268417934031207,
"grad_norm": 0.0986328125,
"learning_rate": 0.00017577613777867762,
"loss": 0.0809,
"step": 115
},
{
"epoch": 0.7331621568240174,
"grad_norm": 0.10400390625,
"learning_rate": 0.00017533259632633442,
"loss": 0.0895,
"step": 116
},
{
"epoch": 0.7394825202449141,
"grad_norm": 0.1298828125,
"learning_rate": 0.00017488560148849427,
"loss": 0.1193,
"step": 117
},
{
"epoch": 0.7458028836658108,
"grad_norm": 0.10791015625,
"learning_rate": 0.00017443517375622704,
"loss": 0.1217,
"step": 118
},
{
"epoch": 0.7521232470867075,
"grad_norm": 0.115234375,
"learning_rate": 0.0001739813337779727,
"loss": 0.1096,
"step": 119
},
{
"epoch": 0.7584436105076042,
"grad_norm": 0.10302734375,
"learning_rate": 0.00017352410235859503,
"loss": 0.0907,
"step": 120
},
{
"epoch": 0.7647639739285009,
"grad_norm": 0.11328125,
"learning_rate": 0.0001730635004584276,
"loss": 0.1081,
"step": 121
},
{
"epoch": 0.7710843373493976,
"grad_norm": 0.09765625,
"learning_rate": 0.0001725995491923131,
"loss": 0.0847,
"step": 122
},
{
"epoch": 0.7774047007702943,
"grad_norm": 0.1259765625,
"learning_rate": 0.0001721322698286354,
"loss": 0.1134,
"step": 123
},
{
"epoch": 0.783725064191191,
"grad_norm": 0.115234375,
"learning_rate": 0.00017166168378834448,
"loss": 0.1065,
"step": 124
},
{
"epoch": 0.7900454276120877,
"grad_norm": 0.11279296875,
"learning_rate": 0.00017118781264397446,
"loss": 0.1078,
"step": 125
},
{
"epoch": 0.7963657910329844,
"grad_norm": 0.11767578125,
"learning_rate": 0.00017071067811865476,
"loss": 0.1064,
"step": 126
},
{
"epoch": 0.8026861544538811,
"grad_norm": 0.1064453125,
"learning_rate": 0.0001702303020851142,
"loss": 0.1031,
"step": 127
},
{
"epoch": 0.8090065178747778,
"grad_norm": 0.1123046875,
"learning_rate": 0.00016974670656467824,
"loss": 0.0934,
"step": 128
},
{
"epoch": 0.8090065178747778,
"eval_loss": 0.1029018759727478,
"eval_runtime": 54.0096,
"eval_samples_per_second": 19.737,
"eval_steps_per_second": 19.737,
"step": 128
},
{
"epoch": 0.8153268812956745,
"grad_norm": 0.11181640625,
"learning_rate": 0.0001692599137262597,
"loss": 0.1022,
"step": 129
},
{
"epoch": 0.8216472447165712,
"grad_norm": 0.1025390625,
"learning_rate": 0.00016876994588534234,
"loss": 0.0809,
"step": 130
},
{
"epoch": 0.827967608137468,
"grad_norm": 0.12353515625,
"learning_rate": 0.00016827682550295785,
"loss": 0.0976,
"step": 131
},
{
"epoch": 0.8342879715583646,
"grad_norm": 0.11474609375,
"learning_rate": 0.0001677805751846563,
"loss": 0.1027,
"step": 132
},
{
"epoch": 0.8406083349792614,
"grad_norm": 0.1142578125,
"learning_rate": 0.00016728121767946977,
"loss": 0.0912,
"step": 133
},
{
"epoch": 0.846928698400158,
"grad_norm": 0.10498046875,
"learning_rate": 0.00016677877587886956,
"loss": 0.0913,
"step": 134
},
{
"epoch": 0.8532490618210548,
"grad_norm": 0.11474609375,
"learning_rate": 0.00016627327281571678,
"loss": 0.1078,
"step": 135
},
{
"epoch": 0.8595694252419515,
"grad_norm": 0.1435546875,
"learning_rate": 0.00016576473166320644,
"loss": 0.1356,
"step": 136
},
{
"epoch": 0.8658897886628482,
"grad_norm": 0.10791015625,
"learning_rate": 0.00016525317573380525,
"loss": 0.1007,
"step": 137
},
{
"epoch": 0.8722101520837449,
"grad_norm": 0.1044921875,
"learning_rate": 0.00016473862847818277,
"loss": 0.1005,
"step": 138
},
{
"epoch": 0.8785305155046416,
"grad_norm": 0.10546875,
"learning_rate": 0.00016422111348413657,
"loss": 0.1016,
"step": 139
},
{
"epoch": 0.8848508789255383,
"grad_norm": 0.1025390625,
"learning_rate": 0.00016370065447551078,
"loss": 0.0898,
"step": 140
},
{
"epoch": 0.891171242346435,
"grad_norm": 0.11181640625,
"learning_rate": 0.0001631772753111086,
"loss": 0.0933,
"step": 141
},
{
"epoch": 0.8974916057673317,
"grad_norm": 0.11376953125,
"learning_rate": 0.00016265099998359866,
"loss": 0.0991,
"step": 142
},
{
"epoch": 0.9038119691882284,
"grad_norm": 0.10595703125,
"learning_rate": 0.00016212185261841499,
"loss": 0.0904,
"step": 143
},
{
"epoch": 0.9101323326091251,
"grad_norm": 0.10693359375,
"learning_rate": 0.00016158985747265108,
"loss": 0.0975,
"step": 144
},
{
"epoch": 0.9101323326091251,
"eval_loss": 0.10078423470258713,
"eval_runtime": 52.712,
"eval_samples_per_second": 20.223,
"eval_steps_per_second": 20.223,
"step": 144
},
{
"epoch": 0.9164526960300218,
"grad_norm": 0.10009765625,
"learning_rate": 0.00016105503893394806,
"loss": 0.0921,
"step": 145
},
{
"epoch": 0.9227730594509185,
"grad_norm": 0.1025390625,
"learning_rate": 0.00016051742151937655,
"loss": 0.0927,
"step": 146
},
{
"epoch": 0.9290934228718152,
"grad_norm": 0.10107421875,
"learning_rate": 0.0001599770298743128,
"loss": 0.0915,
"step": 147
},
{
"epoch": 0.9354137862927119,
"grad_norm": 0.11279296875,
"learning_rate": 0.000159433888771309,
"loss": 0.1038,
"step": 148
},
{
"epoch": 0.9417341497136086,
"grad_norm": 0.11962890625,
"learning_rate": 0.00015888802310895742,
"loss": 0.1063,
"step": 149
},
{
"epoch": 0.9480545131345053,
"grad_norm": 0.10205078125,
"learning_rate": 0.00015833945791074943,
"loss": 0.0796,
"step": 150
},
{
"epoch": 0.954374876555402,
"grad_norm": 0.10986328125,
"learning_rate": 0.00015778821832392777,
"loss": 0.0984,
"step": 151
},
{
"epoch": 0.9606952399762987,
"grad_norm": 0.11865234375,
"learning_rate": 0.0001572343296183344,
"loss": 0.1029,
"step": 152
},
{
"epoch": 0.9670156033971954,
"grad_norm": 0.10546875,
"learning_rate": 0.00015667781718525157,
"loss": 0.0903,
"step": 153
},
{
"epoch": 0.9733359668180921,
"grad_norm": 0.10498046875,
"learning_rate": 0.00015611870653623825,
"loss": 0.0961,
"step": 154
},
{
"epoch": 0.9796563302389888,
"grad_norm": 0.09423828125,
"learning_rate": 0.00015555702330196023,
"loss": 0.0752,
"step": 155
},
{
"epoch": 0.9859766936598855,
"grad_norm": 0.111328125,
"learning_rate": 0.0001549927932310155,
"loss": 0.1046,
"step": 156
},
{
"epoch": 0.9922970570807822,
"grad_norm": 0.11083984375,
"learning_rate": 0.0001544260421887537,
"loss": 0.0966,
"step": 157
},
{
"epoch": 0.9986174205016789,
"grad_norm": 0.11474609375,
"learning_rate": 0.00015385679615609042,
"loss": 0.1116,
"step": 158
},
{
"epoch": 1.0049377839225755,
"grad_norm": 0.1005859375,
"learning_rate": 0.00015328508122831636,
"loss": 0.0839,
"step": 159
},
{
"epoch": 1.0112581473434723,
"grad_norm": 0.09912109375,
"learning_rate": 0.00015271092361390077,
"loss": 0.0657,
"step": 160
},
{
"epoch": 1.0112581473434723,
"eval_loss": 0.0979941338300705,
"eval_runtime": 52.8145,
"eval_samples_per_second": 20.184,
"eval_steps_per_second": 20.184,
"step": 160
},
{
"epoch": 1.017578510764369,
"grad_norm": 0.10107421875,
"learning_rate": 0.0001521343496332903,
"loss": 0.0778,
"step": 161
},
{
"epoch": 1.0238988741852657,
"grad_norm": 0.0869140625,
"learning_rate": 0.00015155538571770218,
"loss": 0.0658,
"step": 162
},
{
"epoch": 1.0302192376061623,
"grad_norm": 0.099609375,
"learning_rate": 0.00015097405840791276,
"loss": 0.0716,
"step": 163
},
{
"epoch": 1.036539601027059,
"grad_norm": 0.09228515625,
"learning_rate": 0.00015039039435304078,
"loss": 0.064,
"step": 164
},
{
"epoch": 1.0428599644479557,
"grad_norm": 0.12255859375,
"learning_rate": 0.00014980442030932558,
"loss": 0.0944,
"step": 165
},
{
"epoch": 1.0491803278688525,
"grad_norm": 0.1064453125,
"learning_rate": 0.00014921616313890072,
"loss": 0.0731,
"step": 166
},
{
"epoch": 1.055500691289749,
"grad_norm": 0.1005859375,
"learning_rate": 0.00014862564980856258,
"loss": 0.0557,
"step": 167
},
{
"epoch": 1.061821054710646,
"grad_norm": 0.13671875,
"learning_rate": 0.00014803290738853395,
"loss": 0.0874,
"step": 168
},
{
"epoch": 1.0681414181315425,
"grad_norm": 0.11328125,
"learning_rate": 0.00014743796305122331,
"loss": 0.0696,
"step": 169
},
{
"epoch": 1.0744617815524393,
"grad_norm": 0.111328125,
"learning_rate": 0.00014684084406997903,
"loss": 0.0681,
"step": 170
},
{
"epoch": 1.080782144973336,
"grad_norm": 0.107421875,
"learning_rate": 0.00014624157781783926,
"loss": 0.0692,
"step": 171
},
{
"epoch": 1.0871025083942327,
"grad_norm": 0.107421875,
"learning_rate": 0.0001456401917662769,
"loss": 0.0675,
"step": 172
},
{
"epoch": 1.0934228718151293,
"grad_norm": 0.115234375,
"learning_rate": 0.00014503671348394057,
"loss": 0.0689,
"step": 173
},
{
"epoch": 1.0997432352360261,
"grad_norm": 0.11572265625,
"learning_rate": 0.00014443117063539038,
"loss": 0.0824,
"step": 174
},
{
"epoch": 1.1060635986569227,
"grad_norm": 0.11279296875,
"learning_rate": 0.00014382359097983013,
"loss": 0.0708,
"step": 175
},
{
"epoch": 1.1123839620778195,
"grad_norm": 0.1064453125,
"learning_rate": 0.00014321400236983457,
"loss": 0.0671,
"step": 176
},
{
"epoch": 1.1123839620778195,
"eval_loss": 0.09898500144481659,
"eval_runtime": 52.0221,
"eval_samples_per_second": 20.491,
"eval_steps_per_second": 20.491,
"step": 176
},
{
"epoch": 1.1187043254987161,
"grad_norm": 0.10400390625,
"learning_rate": 0.00014260243275007265,
"loss": 0.0725,
"step": 177
},
{
"epoch": 1.125024688919613,
"grad_norm": 0.10888671875,
"learning_rate": 0.00014198891015602646,
"loss": 0.0704,
"step": 178
},
{
"epoch": 1.1313450523405095,
"grad_norm": 0.10009765625,
"learning_rate": 0.00014137346271270604,
"loss": 0.0675,
"step": 179
},
{
"epoch": 1.1376654157614063,
"grad_norm": 0.08984375,
"learning_rate": 0.0001407561186333601,
"loss": 0.0638,
"step": 180
},
{
"epoch": 1.143985779182303,
"grad_norm": 0.1171875,
"learning_rate": 0.00014013690621818262,
"loss": 0.0792,
"step": 181
},
{
"epoch": 1.1503061426031997,
"grad_norm": 0.09912109375,
"learning_rate": 0.00013951585385301555,
"loss": 0.0748,
"step": 182
},
{
"epoch": 1.1566265060240963,
"grad_norm": 0.10986328125,
"learning_rate": 0.0001388929900080476,
"loss": 0.0641,
"step": 183
},
{
"epoch": 1.1629468694449931,
"grad_norm": 0.1044921875,
"learning_rate": 0.000138268343236509,
"loss": 0.0655,
"step": 184
},
{
"epoch": 1.1692672328658897,
"grad_norm": 0.109375,
"learning_rate": 0.00013764194217336264,
"loss": 0.0696,
"step": 185
},
{
"epoch": 1.1755875962867866,
"grad_norm": 0.1103515625,
"learning_rate": 0.00013701381553399145,
"loss": 0.0648,
"step": 186
},
{
"epoch": 1.1819079597076831,
"grad_norm": 0.107421875,
"learning_rate": 0.00013638399211288188,
"loss": 0.0705,
"step": 187
},
{
"epoch": 1.18822832312858,
"grad_norm": 0.1201171875,
"learning_rate": 0.000135752500782304,
"loss": 0.074,
"step": 188
},
{
"epoch": 1.1945486865494765,
"grad_norm": 0.10595703125,
"learning_rate": 0.00013511937049098805,
"loss": 0.0616,
"step": 189
},
{
"epoch": 1.2008690499703734,
"grad_norm": 0.1025390625,
"learning_rate": 0.00013448463026279704,
"loss": 0.0688,
"step": 190
},
{
"epoch": 1.20718941339127,
"grad_norm": 0.091796875,
"learning_rate": 0.0001338483091953967,
"loss": 0.0506,
"step": 191
},
{
"epoch": 1.2135097768121668,
"grad_norm": 0.107421875,
"learning_rate": 0.0001332104364589212,
"loss": 0.0664,
"step": 192
},
{
"epoch": 1.2135097768121668,
"eval_loss": 0.09864839166402817,
"eval_runtime": 52.1317,
"eval_samples_per_second": 20.448,
"eval_steps_per_second": 20.448,
"step": 192
},
{
"epoch": 1.2198301402330634,
"grad_norm": 0.11181640625,
"learning_rate": 0.00013257104129463614,
"loss": 0.0759,
"step": 193
},
{
"epoch": 1.2261505036539602,
"grad_norm": 0.1083984375,
"learning_rate": 0.000131930153013598,
"loss": 0.0667,
"step": 194
},
{
"epoch": 1.2324708670748568,
"grad_norm": 0.10498046875,
"learning_rate": 0.00013128780099531056,
"loss": 0.0751,
"step": 195
},
{
"epoch": 1.2387912304957536,
"grad_norm": 0.10546875,
"learning_rate": 0.00013064401468637792,
"loss": 0.0703,
"step": 196
},
{
"epoch": 1.2451115939166502,
"grad_norm": 0.1064453125,
"learning_rate": 0.0001299988235991548,
"loss": 0.0687,
"step": 197
},
{
"epoch": 1.251431957337547,
"grad_norm": 0.11083984375,
"learning_rate": 0.00012935225731039348,
"loss": 0.073,
"step": 198
},
{
"epoch": 1.2577523207584436,
"grad_norm": 0.11279296875,
"learning_rate": 0.00012870434545988812,
"loss": 0.0754,
"step": 199
},
{
"epoch": 1.2640726841793404,
"grad_norm": 0.09375,
"learning_rate": 0.00012805511774911584,
"loss": 0.059,
"step": 200
},
{
"epoch": 1.270393047600237,
"grad_norm": 0.0927734375,
"learning_rate": 0.00012740460393987526,
"loss": 0.0574,
"step": 201
},
{
"epoch": 1.2767134110211338,
"grad_norm": 0.11572265625,
"learning_rate": 0.00012675283385292212,
"loss": 0.077,
"step": 202
},
{
"epoch": 1.2830337744420304,
"grad_norm": 0.10205078125,
"learning_rate": 0.0001260998373666022,
"loss": 0.0632,
"step": 203
},
{
"epoch": 1.2893541378629272,
"grad_norm": 0.10302734375,
"learning_rate": 0.00012544564441548182,
"loss": 0.0665,
"step": 204
},
{
"epoch": 1.2956745012838238,
"grad_norm": 0.09326171875,
"learning_rate": 0.00012479028498897535,
"loss": 0.058,
"step": 205
},
{
"epoch": 1.3019948647047206,
"grad_norm": 0.115234375,
"learning_rate": 0.00012413378912997058,
"loss": 0.0652,
"step": 206
},
{
"epoch": 1.3083152281256172,
"grad_norm": 0.08544921875,
"learning_rate": 0.0001234761869334515,
"loss": 0.0526,
"step": 207
},
{
"epoch": 1.314635591546514,
"grad_norm": 0.10595703125,
"learning_rate": 0.0001228175085451186,
"loss": 0.0735,
"step": 208
},
{
"epoch": 1.314635591546514,
"eval_loss": 0.09652113914489746,
"eval_runtime": 51.6293,
"eval_samples_per_second": 20.647,
"eval_steps_per_second": 20.647,
"step": 208
},
{
"epoch": 1.3209559549674106,
"grad_norm": 0.1259765625,
"learning_rate": 0.00012215778416000707,
"loss": 0.0833,
"step": 209
},
{
"epoch": 1.3272763183883074,
"grad_norm": 0.1103515625,
"learning_rate": 0.00012149704402110243,
"loss": 0.068,
"step": 210
},
{
"epoch": 1.333596681809204,
"grad_norm": 0.10107421875,
"learning_rate": 0.00012083531841795425,
"loss": 0.0639,
"step": 211
},
{
"epoch": 1.3399170452301008,
"grad_norm": 0.10302734375,
"learning_rate": 0.00012017263768528775,
"loss": 0.0671,
"step": 212
},
{
"epoch": 1.3462374086509974,
"grad_norm": 0.10595703125,
"learning_rate": 0.00011950903220161285,
"loss": 0.0655,
"step": 213
},
{
"epoch": 1.3525577720718942,
"grad_norm": 0.10498046875,
"learning_rate": 0.00011884453238783185,
"loss": 0.0741,
"step": 214
},
{
"epoch": 1.3588781354927908,
"grad_norm": 0.09619140625,
"learning_rate": 0.00011817916870584482,
"loss": 0.0609,
"step": 215
},
{
"epoch": 1.3651984989136876,
"grad_norm": 0.10546875,
"learning_rate": 0.00011751297165715309,
"loss": 0.074,
"step": 216
},
{
"epoch": 1.3715188623345842,
"grad_norm": 0.12158203125,
"learning_rate": 0.00011684597178146115,
"loss": 0.0724,
"step": 217
},
{
"epoch": 1.377839225755481,
"grad_norm": 0.1044921875,
"learning_rate": 0.0001161781996552765,
"loss": 0.0649,
"step": 218
},
{
"epoch": 1.3841595891763776,
"grad_norm": 0.1376953125,
"learning_rate": 0.00011550968589050799,
"loss": 0.0729,
"step": 219
},
{
"epoch": 1.3904799525972744,
"grad_norm": 0.1005859375,
"learning_rate": 0.00011484046113306262,
"loss": 0.0555,
"step": 220
},
{
"epoch": 1.396800316018171,
"grad_norm": 0.12060546875,
"learning_rate": 0.0001141705560614406,
"loss": 0.0804,
"step": 221
},
{
"epoch": 1.4031206794390678,
"grad_norm": 0.11181640625,
"learning_rate": 0.00011350000138532902,
"loss": 0.0694,
"step": 222
},
{
"epoch": 1.4094410428599644,
"grad_norm": 0.1162109375,
"learning_rate": 0.00011282882784419398,
"loss": 0.0784,
"step": 223
},
{
"epoch": 1.4157614062808612,
"grad_norm": 0.11181640625,
"learning_rate": 0.00011215706620587149,
"loss": 0.0694,
"step": 224
},
{
"epoch": 1.4157614062808612,
"eval_loss": 0.09438645094633102,
"eval_runtime": 52.0469,
"eval_samples_per_second": 20.482,
"eval_steps_per_second": 20.482,
"step": 224
},
{
"epoch": 1.4220817697017578,
"grad_norm": 0.10888671875,
"learning_rate": 0.00011148474726515716,
"loss": 0.0687,
"step": 225
},
{
"epoch": 1.4284021331226546,
"grad_norm": 0.1181640625,
"learning_rate": 0.00011081190184239419,
"loss": 0.0644,
"step": 226
},
{
"epoch": 1.4347224965435512,
"grad_norm": 0.11474609375,
"learning_rate": 0.0001101385607820608,
"loss": 0.0791,
"step": 227
},
{
"epoch": 1.441042859964448,
"grad_norm": 0.103515625,
"learning_rate": 0.0001094647549513561,
"loss": 0.0667,
"step": 228
},
{
"epoch": 1.4473632233853446,
"grad_norm": 0.109375,
"learning_rate": 0.00010879051523878522,
"loss": 0.0662,
"step": 229
},
{
"epoch": 1.4536835868062414,
"grad_norm": 0.11474609375,
"learning_rate": 0.00010811587255274313,
"loss": 0.0726,
"step": 230
},
{
"epoch": 1.460003950227138,
"grad_norm": 0.10400390625,
"learning_rate": 0.00010744085782009792,
"loss": 0.0639,
"step": 231
},
{
"epoch": 1.4663243136480348,
"grad_norm": 0.1181640625,
"learning_rate": 0.00010676550198477293,
"loss": 0.0728,
"step": 232
},
{
"epoch": 1.4726446770689314,
"grad_norm": 0.11669921875,
"learning_rate": 0.00010608983600632831,
"loss": 0.0784,
"step": 233
},
{
"epoch": 1.4789650404898282,
"grad_norm": 0.103515625,
"learning_rate": 0.00010541389085854176,
"loss": 0.0635,
"step": 234
},
{
"epoch": 1.4852854039107248,
"grad_norm": 0.10302734375,
"learning_rate": 0.00010473769752798859,
"loss": 0.0613,
"step": 235
},
{
"epoch": 1.4916057673316216,
"grad_norm": 0.107421875,
"learning_rate": 0.00010406128701262128,
"loss": 0.0712,
"step": 236
},
{
"epoch": 1.4979261307525182,
"grad_norm": 0.111328125,
"learning_rate": 0.00010338469032034845,
"loss": 0.069,
"step": 237
},
{
"epoch": 1.5042464941734148,
"grad_norm": 0.10400390625,
"learning_rate": 0.00010270793846761347,
"loss": 0.0643,
"step": 238
},
{
"epoch": 1.5105668575943116,
"grad_norm": 0.11865234375,
"learning_rate": 0.00010203106247797243,
"loss": 0.0768,
"step": 239
},
{
"epoch": 1.5168872210152085,
"grad_norm": 0.09521484375,
"learning_rate": 0.00010135409338067219,
"loss": 0.0555,
"step": 240
},
{
"epoch": 1.5168872210152085,
"eval_loss": 0.0922728106379509,
"eval_runtime": 52.2924,
"eval_samples_per_second": 20.385,
"eval_steps_per_second": 20.385,
"step": 240
},
{
"epoch": 1.523207584436105,
"grad_norm": 0.1044921875,
"learning_rate": 0.00010067706220922785,
"loss": 0.0578,
"step": 241
},
{
"epoch": 1.5295279478570016,
"grad_norm": 0.1103515625,
"learning_rate": 0.0001,
"loss": 0.0706,
"step": 242
},
{
"epoch": 1.5358483112778984,
"grad_norm": 0.09423828125,
"learning_rate": 9.932293779077216e-05,
"loss": 0.0602,
"step": 243
},
{
"epoch": 1.5421686746987953,
"grad_norm": 0.10791015625,
"learning_rate": 9.864590661932783e-05,
"loss": 0.0632,
"step": 244
},
{
"epoch": 1.5484890381196919,
"grad_norm": 0.1044921875,
"learning_rate": 9.796893752202758e-05,
"loss": 0.0628,
"step": 245
},
{
"epoch": 1.5548094015405884,
"grad_norm": 0.10009765625,
"learning_rate": 9.729206153238657e-05,
"loss": 0.0668,
"step": 246
},
{
"epoch": 1.5611297649614853,
"grad_norm": 0.11767578125,
"learning_rate": 9.661530967965156e-05,
"loss": 0.0734,
"step": 247
},
{
"epoch": 1.567450128382382,
"grad_norm": 0.09375,
"learning_rate": 9.59387129873787e-05,
"loss": 0.0584,
"step": 248
},
{
"epoch": 1.5737704918032787,
"grad_norm": 0.10595703125,
"learning_rate": 9.526230247201142e-05,
"loss": 0.0601,
"step": 249
},
{
"epoch": 1.5800908552241753,
"grad_norm": 0.08203125,
"learning_rate": 9.458610914145826e-05,
"loss": 0.046,
"step": 250
},
{
"epoch": 1.586411218645072,
"grad_norm": 0.1083984375,
"learning_rate": 9.391016399367172e-05,
"loss": 0.0624,
"step": 251
},
{
"epoch": 1.5927315820659689,
"grad_norm": 0.1171875,
"learning_rate": 9.323449801522709e-05,
"loss": 0.0755,
"step": 252
},
{
"epoch": 1.5990519454868655,
"grad_norm": 0.12158203125,
"learning_rate": 9.255914217990211e-05,
"loss": 0.0758,
"step": 253
},
{
"epoch": 1.605372308907762,
"grad_norm": 0.10107421875,
"learning_rate": 9.18841274472569e-05,
"loss": 0.0637,
"step": 254
},
{
"epoch": 1.6116926723286589,
"grad_norm": 0.10986328125,
"learning_rate": 9.120948476121479e-05,
"loss": 0.0654,
"step": 255
},
{
"epoch": 1.6180130357495557,
"grad_norm": 0.1123046875,
"learning_rate": 9.05352450486439e-05,
"loss": 0.0719,
"step": 256
},
{
"epoch": 1.6180130357495557,
"eval_loss": 0.09138685464859009,
"eval_runtime": 54.5034,
"eval_samples_per_second": 19.558,
"eval_steps_per_second": 19.558,
"step": 256
},
{
"epoch": 1.6243333991704523,
"grad_norm": 0.1044921875,
"learning_rate": 8.986143921793923e-05,
"loss": 0.0661,
"step": 257
},
{
"epoch": 1.6306537625913489,
"grad_norm": 0.11376953125,
"learning_rate": 8.918809815760585e-05,
"loss": 0.0764,
"step": 258
},
{
"epoch": 1.6369741260122457,
"grad_norm": 0.1025390625,
"learning_rate": 8.851525273484286e-05,
"loss": 0.0648,
"step": 259
},
{
"epoch": 1.6432944894331425,
"grad_norm": 0.11474609375,
"learning_rate": 8.78429337941285e-05,
"loss": 0.081,
"step": 260
},
{
"epoch": 1.649614852854039,
"grad_norm": 0.1201171875,
"learning_rate": 8.717117215580606e-05,
"loss": 0.0738,
"step": 261
},
{
"epoch": 1.6559352162749357,
"grad_norm": 0.10400390625,
"learning_rate": 8.649999861467099e-05,
"loss": 0.0688,
"step": 262
},
{
"epoch": 1.6622555796958325,
"grad_norm": 0.0908203125,
"learning_rate": 8.582944393855941e-05,
"loss": 0.0541,
"step": 263
},
{
"epoch": 1.6685759431167293,
"grad_norm": 0.10400390625,
"learning_rate": 8.515953886693739e-05,
"loss": 0.065,
"step": 264
},
{
"epoch": 1.674896306537626,
"grad_norm": 0.10205078125,
"learning_rate": 8.449031410949206e-05,
"loss": 0.0665,
"step": 265
},
{
"epoch": 1.6812166699585225,
"grad_norm": 0.1015625,
"learning_rate": 8.382180034472353e-05,
"loss": 0.0592,
"step": 266
},
{
"epoch": 1.6875370333794193,
"grad_norm": 0.10302734375,
"learning_rate": 8.315402821853886e-05,
"loss": 0.0592,
"step": 267
},
{
"epoch": 1.693857396800316,
"grad_norm": 0.103515625,
"learning_rate": 8.248702834284693e-05,
"loss": 0.0638,
"step": 268
},
{
"epoch": 1.7001777602212127,
"grad_norm": 0.103515625,
"learning_rate": 8.18208312941552e-05,
"loss": 0.0657,
"step": 269
},
{
"epoch": 1.7064981236421093,
"grad_norm": 0.12060546875,
"learning_rate": 8.115546761216822e-05,
"loss": 0.0802,
"step": 270
},
{
"epoch": 1.712818487063006,
"grad_norm": 0.111328125,
"learning_rate": 8.049096779838719e-05,
"loss": 0.0711,
"step": 271
},
{
"epoch": 1.719138850483903,
"grad_norm": 0.10986328125,
"learning_rate": 7.982736231471224e-05,
"loss": 0.071,
"step": 272
},
{
"epoch": 1.719138850483903,
"eval_loss": 0.08944293856620789,
"eval_runtime": 54.5995,
"eval_samples_per_second": 19.524,
"eval_steps_per_second": 19.524,
"step": 272
},
{
"epoch": 1.7254592139047995,
"grad_norm": 0.11279296875,
"learning_rate": 7.916468158204576e-05,
"loss": 0.0717,
"step": 273
},
{
"epoch": 1.731779577325696,
"grad_norm": 0.1064453125,
"learning_rate": 7.85029559788976e-05,
"loss": 0.0719,
"step": 274
},
{
"epoch": 1.738099940746593,
"grad_norm": 0.10302734375,
"learning_rate": 7.784221583999298e-05,
"loss": 0.064,
"step": 275
},
{
"epoch": 1.7444203041674897,
"grad_norm": 0.09765625,
"learning_rate": 7.718249145488142e-05,
"loss": 0.0589,
"step": 276
},
{
"epoch": 1.7507406675883863,
"grad_norm": 0.1064453125,
"learning_rate": 7.652381306654851e-05,
"loss": 0.0717,
"step": 277
},
{
"epoch": 1.757061031009283,
"grad_norm": 0.0908203125,
"learning_rate": 7.586621087002945e-05,
"loss": 0.052,
"step": 278
},
{
"epoch": 1.7633813944301797,
"grad_norm": 0.0947265625,
"learning_rate": 7.520971501102469e-05,
"loss": 0.0553,
"step": 279
},
{
"epoch": 1.7697017578510765,
"grad_norm": 0.11181640625,
"learning_rate": 7.455435558451823e-05,
"loss": 0.0703,
"step": 280
},
{
"epoch": 1.7760221212719731,
"grad_norm": 0.10546875,
"learning_rate": 7.390016263339782e-05,
"loss": 0.0644,
"step": 281
},
{
"epoch": 1.7823424846928697,
"grad_norm": 0.103515625,
"learning_rate": 7.324716614707793e-05,
"loss": 0.0646,
"step": 282
},
{
"epoch": 1.7886628481137665,
"grad_norm": 0.09912109375,
"learning_rate": 7.259539606012478e-05,
"loss": 0.0549,
"step": 283
},
{
"epoch": 1.7949832115346633,
"grad_norm": 0.11474609375,
"learning_rate": 7.194488225088417e-05,
"loss": 0.0675,
"step": 284
},
{
"epoch": 1.80130357495556,
"grad_norm": 0.12451171875,
"learning_rate": 7.129565454011189e-05,
"loss": 0.0788,
"step": 285
},
{
"epoch": 1.8076239383764565,
"grad_norm": 0.119140625,
"learning_rate": 7.064774268960653e-05,
"loss": 0.0845,
"step": 286
},
{
"epoch": 1.8139443017973533,
"grad_norm": 0.10693359375,
"learning_rate": 7.000117640084526e-05,
"loss": 0.064,
"step": 287
},
{
"epoch": 1.8202646652182501,
"grad_norm": 0.115234375,
"learning_rate": 6.93559853136221e-05,
"loss": 0.073,
"step": 288
},
{
"epoch": 1.8202646652182501,
"eval_loss": 0.0876438096165657,
"eval_runtime": 54.5873,
"eval_samples_per_second": 19.528,
"eval_steps_per_second": 19.528,
"step": 288
},
{
"epoch": 1.8265850286391467,
"grad_norm": 0.1044921875,
"learning_rate": 6.871219900468947e-05,
"loss": 0.0623,
"step": 289
},
{
"epoch": 1.8329053920600433,
"grad_norm": 0.10205078125,
"learning_rate": 6.806984698640202e-05,
"loss": 0.0584,
"step": 290
},
{
"epoch": 1.8392257554809401,
"grad_norm": 0.10400390625,
"learning_rate": 6.742895870536388e-05,
"loss": 0.0632,
"step": 291
},
{
"epoch": 1.845546118901837,
"grad_norm": 0.09912109375,
"learning_rate": 6.678956354107882e-05,
"loss": 0.0611,
"step": 292
},
{
"epoch": 1.8518664823227335,
"grad_norm": 0.10791015625,
"learning_rate": 6.615169080460331e-05,
"loss": 0.0623,
"step": 293
},
{
"epoch": 1.8581868457436301,
"grad_norm": 0.09130859375,
"learning_rate": 6.551536973720298e-05,
"loss": 0.0558,
"step": 294
},
{
"epoch": 1.864507209164527,
"grad_norm": 0.09814453125,
"learning_rate": 6.488062950901198e-05,
"loss": 0.0548,
"step": 295
},
{
"epoch": 1.8708275725854238,
"grad_norm": 0.1064453125,
"learning_rate": 6.4247499217696e-05,
"loss": 0.07,
"step": 296
},
{
"epoch": 1.8771479360063204,
"grad_norm": 0.09716796875,
"learning_rate": 6.361600788711816e-05,
"loss": 0.0584,
"step": 297
},
{
"epoch": 1.883468299427217,
"grad_norm": 0.10400390625,
"learning_rate": 6.298618446600856e-05,
"loss": 0.0602,
"step": 298
},
{
"epoch": 1.8897886628481138,
"grad_norm": 0.09716796875,
"learning_rate": 6.23580578266374e-05,
"loss": 0.0554,
"step": 299
},
{
"epoch": 1.8961090262690106,
"grad_norm": 0.10546875,
"learning_rate": 6.173165676349103e-05,
"loss": 0.0649,
"step": 300
},
{
"epoch": 1.9024293896899072,
"grad_norm": 0.11181640625,
"learning_rate": 6.11070099919524e-05,
"loss": 0.0689,
"step": 301
},
{
"epoch": 1.9087497531108037,
"grad_norm": 0.11328125,
"learning_rate": 6.048414614698448e-05,
"loss": 0.079,
"step": 302
},
{
"epoch": 1.9150701165317006,
"grad_norm": 0.1162109375,
"learning_rate": 5.9863093781817394e-05,
"loss": 0.0692,
"step": 303
},
{
"epoch": 1.9213904799525974,
"grad_norm": 0.09228515625,
"learning_rate": 5.924388136663992e-05,
"loss": 0.0543,
"step": 304
},
{
"epoch": 1.9213904799525974,
"eval_loss": 0.08687648177146912,
"eval_runtime": 54.6209,
"eval_samples_per_second": 19.516,
"eval_steps_per_second": 19.516,
"step": 304
},
{
"epoch": 1.927710843373494,
"grad_norm": 0.10107421875,
"learning_rate": 5.862653728729397e-05,
"loss": 0.0548,
"step": 305
},
{
"epoch": 1.9340312067943906,
"grad_norm": 0.09619140625,
"learning_rate": 5.801108984397354e-05,
"loss": 0.0491,
"step": 306
},
{
"epoch": 1.9403515702152874,
"grad_norm": 0.095703125,
"learning_rate": 5.739756724992736e-05,
"loss": 0.052,
"step": 307
},
{
"epoch": 1.9466719336361842,
"grad_norm": 0.10400390625,
"learning_rate": 5.6785997630165435e-05,
"loss": 0.0704,
"step": 308
},
{
"epoch": 1.9529922970570808,
"grad_norm": 0.10595703125,
"learning_rate": 5.61764090201699e-05,
"loss": 0.0694,
"step": 309
},
{
"epoch": 1.9593126604779774,
"grad_norm": 0.115234375,
"learning_rate": 5.5568829364609664e-05,
"loss": 0.0747,
"step": 310
},
{
"epoch": 1.9656330238988742,
"grad_norm": 0.1005859375,
"learning_rate": 5.4963286516059496e-05,
"loss": 0.0579,
"step": 311
},
{
"epoch": 1.971953387319771,
"grad_norm": 0.09814453125,
"learning_rate": 5.435980823372311e-05,
"loss": 0.057,
"step": 312
},
{
"epoch": 1.9782737507406676,
"grad_norm": 0.10205078125,
"learning_rate": 5.375842218216076e-05,
"loss": 0.0635,
"step": 313
},
{
"epoch": 1.9845941141615642,
"grad_norm": 0.10205078125,
"learning_rate": 5.3159155930021e-05,
"loss": 0.0592,
"step": 314
},
{
"epoch": 1.990914477582461,
"grad_norm": 0.09326171875,
"learning_rate": 5.25620369487767e-05,
"loss": 0.0571,
"step": 315
},
{
"epoch": 1.9972348410033578,
"grad_norm": 0.09228515625,
"learning_rate": 5.196709261146606e-05,
"loss": 0.0499,
"step": 316
},
{
"epoch": 2.0035552044242544,
"grad_norm": 0.0859375,
"learning_rate": 5.1374350191437446e-05,
"loss": 0.0486,
"step": 317
},
{
"epoch": 2.009875567845151,
"grad_norm": 0.078125,
"learning_rate": 5.078383686109926e-05,
"loss": 0.0442,
"step": 318
},
{
"epoch": 2.016195931266048,
"grad_norm": 0.08154296875,
"learning_rate": 5.0195579690674447e-05,
"loss": 0.0387,
"step": 319
},
{
"epoch": 2.0225162946869446,
"grad_norm": 0.08447265625,
"learning_rate": 4.9609605646959226e-05,
"loss": 0.043,
"step": 320
},
{
"epoch": 2.0225162946869446,
"eval_loss": 0.08655604720115662,
"eval_runtime": 54.8335,
"eval_samples_per_second": 19.441,
"eval_steps_per_second": 19.441,
"step": 320
},
{
"epoch": 2.028836658107841,
"grad_norm": 0.07958984375,
"learning_rate": 4.902594159208723e-05,
"loss": 0.0343,
"step": 321
},
{
"epoch": 2.035157021528738,
"grad_norm": 0.07958984375,
"learning_rate": 4.844461428229782e-05,
"loss": 0.0404,
"step": 322
},
{
"epoch": 2.041477384949635,
"grad_norm": 0.080078125,
"learning_rate": 4.786565036670972e-05,
"loss": 0.0369,
"step": 323
},
{
"epoch": 2.0477977483705314,
"grad_norm": 0.0908203125,
"learning_rate": 4.728907638609925e-05,
"loss": 0.0411,
"step": 324
},
{
"epoch": 2.054118111791428,
"grad_norm": 0.08349609375,
"learning_rate": 4.6714918771683646e-05,
"loss": 0.0356,
"step": 325
},
{
"epoch": 2.0604384752123246,
"grad_norm": 0.0908203125,
"learning_rate": 4.614320384390959e-05,
"loss": 0.0458,
"step": 326
},
{
"epoch": 2.0667588386332216,
"grad_norm": 0.095703125,
"learning_rate": 4.557395781124632e-05,
"loss": 0.0394,
"step": 327
},
{
"epoch": 2.073079202054118,
"grad_norm": 0.09423828125,
"learning_rate": 4.500720676898452e-05,
"loss": 0.037,
"step": 328
},
{
"epoch": 2.079399565475015,
"grad_norm": 0.0927734375,
"learning_rate": 4.444297669803981e-05,
"loss": 0.0416,
"step": 329
},
{
"epoch": 2.0857199288959114,
"grad_norm": 0.10302734375,
"learning_rate": 4.388129346376178e-05,
"loss": 0.0497,
"step": 330
},
{
"epoch": 2.092040292316808,
"grad_norm": 0.0849609375,
"learning_rate": 4.3322182814748436e-05,
"loss": 0.0294,
"step": 331
},
{
"epoch": 2.098360655737705,
"grad_norm": 0.0927734375,
"learning_rate": 4.276567038166563e-05,
"loss": 0.0325,
"step": 332
},
{
"epoch": 2.1046810191586016,
"grad_norm": 0.10302734375,
"learning_rate": 4.221178167607226e-05,
"loss": 0.036,
"step": 333
},
{
"epoch": 2.111001382579498,
"grad_norm": 0.0869140625,
"learning_rate": 4.16605420892506e-05,
"loss": 0.0337,
"step": 334
},
{
"epoch": 2.1173217460003952,
"grad_norm": 0.1142578125,
"learning_rate": 4.111197689104258e-05,
"loss": 0.0439,
"step": 335
},
{
"epoch": 2.123642109421292,
"grad_norm": 0.09619140625,
"learning_rate": 4.0566111228691064e-05,
"loss": 0.0333,
"step": 336
},
{
"epoch": 2.123642109421292,
"eval_loss": 0.09336408227682114,
"eval_runtime": 54.6766,
"eval_samples_per_second": 19.496,
"eval_steps_per_second": 19.496,
"step": 336
},
{
"epoch": 2.1299624728421884,
"grad_norm": 0.10498046875,
"learning_rate": 4.002297012568722e-05,
"loss": 0.043,
"step": 337
},
{
"epoch": 2.136282836263085,
"grad_norm": 0.1025390625,
"learning_rate": 3.948257848062351e-05,
"loss": 0.0376,
"step": 338
},
{
"epoch": 2.1426031996839816,
"grad_norm": 0.095703125,
"learning_rate": 3.894496106605197e-05,
"loss": 0.0318,
"step": 339
},
{
"epoch": 2.1489235631048786,
"grad_norm": 0.10595703125,
"learning_rate": 3.841014252734896e-05,
"loss": 0.0459,
"step": 340
},
{
"epoch": 2.1552439265257752,
"grad_norm": 0.09814453125,
"learning_rate": 3.787814738158504e-05,
"loss": 0.0384,
"step": 341
},
{
"epoch": 2.161564289946672,
"grad_norm": 0.099609375,
"learning_rate": 3.734900001640135e-05,
"loss": 0.0324,
"step": 342
},
{
"epoch": 2.167884653367569,
"grad_norm": 0.11328125,
"learning_rate": 3.6822724688891416e-05,
"loss": 0.0453,
"step": 343
},
{
"epoch": 2.1742050167884654,
"grad_norm": 0.10498046875,
"learning_rate": 3.629934552448925e-05,
"loss": 0.0399,
"step": 344
},
{
"epoch": 2.180525380209362,
"grad_norm": 0.1025390625,
"learning_rate": 3.5778886515863474e-05,
"loss": 0.0383,
"step": 345
},
{
"epoch": 2.1868457436302586,
"grad_norm": 0.09619140625,
"learning_rate": 3.5261371521817244e-05,
"loss": 0.0354,
"step": 346
},
{
"epoch": 2.1931661070511552,
"grad_norm": 0.099609375,
"learning_rate": 3.4746824266194744e-05,
"loss": 0.0379,
"step": 347
},
{
"epoch": 2.1994864704720523,
"grad_norm": 0.09912109375,
"learning_rate": 3.423526833679355e-05,
"loss": 0.0356,
"step": 348
},
{
"epoch": 2.205806833892949,
"grad_norm": 0.11376953125,
"learning_rate": 3.3726727184283236e-05,
"loss": 0.0467,
"step": 349
},
{
"epoch": 2.2121271973138454,
"grad_norm": 0.10107421875,
"learning_rate": 3.322122412113047e-05,
"loss": 0.0372,
"step": 350
},
{
"epoch": 2.2184475607347425,
"grad_norm": 0.10693359375,
"learning_rate": 3.271878232053025e-05,
"loss": 0.0484,
"step": 351
},
{
"epoch": 2.224767924155639,
"grad_norm": 0.109375,
"learning_rate": 3.2219424815343735e-05,
"loss": 0.0392,
"step": 352
},
{
"epoch": 2.224767924155639,
"eval_loss": 0.09237346053123474,
"eval_runtime": 52.5928,
"eval_samples_per_second": 20.269,
"eval_steps_per_second": 20.269,
"step": 352
},
{
"epoch": 2.2310882875765357,
"grad_norm": 0.10546875,
"learning_rate": 3.172317449704216e-05,
"loss": 0.0391,
"step": 353
},
{
"epoch": 2.2374086509974322,
"grad_norm": 0.1025390625,
"learning_rate": 3.123005411465766e-05,
"loss": 0.0366,
"step": 354
},
{
"epoch": 2.243729014418329,
"grad_norm": 0.10888671875,
"learning_rate": 3.0740086273740295e-05,
"loss": 0.0369,
"step": 355
},
{
"epoch": 2.250049377839226,
"grad_norm": 0.10546875,
"learning_rate": 3.0253293435321793e-05,
"loss": 0.0385,
"step": 356
},
{
"epoch": 2.2563697412601225,
"grad_norm": 0.1044921875,
"learning_rate": 2.9769697914885862e-05,
"loss": 0.036,
"step": 357
},
{
"epoch": 2.262690104681019,
"grad_norm": 0.115234375,
"learning_rate": 2.9289321881345254e-05,
"loss": 0.0437,
"step": 358
},
{
"epoch": 2.269010468101916,
"grad_norm": 0.10498046875,
"learning_rate": 2.881218735602553e-05,
"loss": 0.0392,
"step": 359
},
{
"epoch": 2.2753308315228127,
"grad_norm": 0.10400390625,
"learning_rate": 2.8338316211655536e-05,
"loss": 0.0396,
"step": 360
},
{
"epoch": 2.2816511949437093,
"grad_norm": 0.09814453125,
"learning_rate": 2.7867730171364624e-05,
"loss": 0.0391,
"step": 361
},
{
"epoch": 2.287971558364606,
"grad_norm": 0.10498046875,
"learning_rate": 2.7400450807686938e-05,
"loss": 0.0335,
"step": 362
},
{
"epoch": 2.2942919217855025,
"grad_norm": 0.103515625,
"learning_rate": 2.6936499541572445e-05,
"loss": 0.0351,
"step": 363
},
{
"epoch": 2.3006122852063995,
"grad_norm": 0.103515625,
"learning_rate": 2.647589764140499e-05,
"loss": 0.0373,
"step": 364
},
{
"epoch": 2.306932648627296,
"grad_norm": 0.1044921875,
"learning_rate": 2.60186662220273e-05,
"loss": 0.0411,
"step": 365
},
{
"epoch": 2.3132530120481927,
"grad_norm": 0.09423828125,
"learning_rate": 2.5564826243772966e-05,
"loss": 0.0343,
"step": 366
},
{
"epoch": 2.3195733754690897,
"grad_norm": 0.11376953125,
"learning_rate": 2.5114398511505734e-05,
"loss": 0.0447,
"step": 367
},
{
"epoch": 2.3258937388899863,
"grad_norm": 0.10693359375,
"learning_rate": 2.4667403673665623e-05,
"loss": 0.0453,
"step": 368
},
{
"epoch": 2.3258937388899863,
"eval_loss": 0.09191481024026871,
"eval_runtime": 52.7915,
"eval_samples_per_second": 20.193,
"eval_steps_per_second": 20.193,
"step": 368
},
{
"epoch": 2.332214102310883,
"grad_norm": 0.10595703125,
"learning_rate": 2.4223862221322424e-05,
"loss": 0.0316,
"step": 369
},
{
"epoch": 2.3385344657317795,
"grad_norm": 0.103515625,
"learning_rate": 2.3783794487236365e-05,
"loss": 0.0364,
"step": 370
},
{
"epoch": 2.344854829152676,
"grad_norm": 0.107421875,
"learning_rate": 2.3347220644926028e-05,
"loss": 0.0406,
"step": 371
},
{
"epoch": 2.351175192573573,
"grad_norm": 0.1064453125,
"learning_rate": 2.2914160707743538e-05,
"loss": 0.0428,
"step": 372
},
{
"epoch": 2.3574955559944697,
"grad_norm": 0.0927734375,
"learning_rate": 2.248463452795705e-05,
"loss": 0.0295,
"step": 373
},
{
"epoch": 2.3638159194153663,
"grad_norm": 0.10791015625,
"learning_rate": 2.205866179584084e-05,
"loss": 0.0383,
"step": 374
},
{
"epoch": 2.3701362828362633,
"grad_norm": 0.1064453125,
"learning_rate": 2.1636262038772504e-05,
"loss": 0.0377,
"step": 375
},
{
"epoch": 2.37645664625716,
"grad_norm": 0.1005859375,
"learning_rate": 2.121745462033784e-05,
"loss": 0.0432,
"step": 376
},
{
"epoch": 2.3827770096780565,
"grad_norm": 0.099609375,
"learning_rate": 2.080225873944328e-05,
"loss": 0.0363,
"step": 377
},
{
"epoch": 2.389097373098953,
"grad_norm": 0.11328125,
"learning_rate": 2.0390693429435627e-05,
"loss": 0.0406,
"step": 378
},
{
"epoch": 2.3954177365198497,
"grad_norm": 0.09912109375,
"learning_rate": 1.998277755722965e-05,
"loss": 0.0323,
"step": 379
},
{
"epoch": 2.4017380999407467,
"grad_norm": 0.10009765625,
"learning_rate": 1.957852982244309e-05,
"loss": 0.0359,
"step": 380
},
{
"epoch": 2.4080584633616433,
"grad_norm": 0.10693359375,
"learning_rate": 1.9177968756539567e-05,
"loss": 0.0369,
"step": 381
},
{
"epoch": 2.41437882678254,
"grad_norm": 0.119140625,
"learning_rate": 1.87811127219789e-05,
"loss": 0.044,
"step": 382
},
{
"epoch": 2.420699190203437,
"grad_norm": 0.11181640625,
"learning_rate": 1.838797991137543e-05,
"loss": 0.0406,
"step": 383
},
{
"epoch": 2.4270195536243335,
"grad_norm": 0.119140625,
"learning_rate": 1.7998588346664115e-05,
"loss": 0.0488,
"step": 384
},
{
"epoch": 2.4270195536243335,
"eval_loss": 0.09201209992170334,
"eval_runtime": 52.2314,
"eval_samples_per_second": 20.409,
"eval_steps_per_second": 20.409,
"step": 384
},
{
"epoch": 2.43333991704523,
"grad_norm": 0.1044921875,
"learning_rate": 1.761295587827416e-05,
"loss": 0.0399,
"step": 385
},
{
"epoch": 2.4396602804661267,
"grad_norm": 0.10986328125,
"learning_rate": 1.7231100184310956e-05,
"loss": 0.0451,
"step": 386
},
{
"epoch": 2.4459806438870233,
"grad_norm": 0.10888671875,
"learning_rate": 1.6853038769745467e-05,
"loss": 0.0423,
"step": 387
},
{
"epoch": 2.4523010073079203,
"grad_norm": 0.1083984375,
"learning_rate": 1.6478788965611993e-05,
"loss": 0.0339,
"step": 388
},
{
"epoch": 2.458621370728817,
"grad_norm": 0.09375,
"learning_rate": 1.6108367928213476e-05,
"loss": 0.0346,
"step": 389
},
{
"epoch": 2.4649417341497135,
"grad_norm": 0.09423828125,
"learning_rate": 1.5741792638335095e-05,
"loss": 0.0339,
"step": 390
},
{
"epoch": 2.4712620975706105,
"grad_norm": 0.1103515625,
"learning_rate": 1.5379079900465953e-05,
"loss": 0.0351,
"step": 391
},
{
"epoch": 2.477582460991507,
"grad_norm": 0.1083984375,
"learning_rate": 1.502024634202851e-05,
"loss": 0.0364,
"step": 392
},
{
"epoch": 2.4839028244124037,
"grad_norm": 0.0966796875,
"learning_rate": 1.4665308412616596e-05,
"loss": 0.0367,
"step": 393
},
{
"epoch": 2.4902231878333003,
"grad_norm": 0.107421875,
"learning_rate": 1.4314282383241096e-05,
"loss": 0.0376,
"step": 394
},
{
"epoch": 2.496543551254197,
"grad_norm": 0.1181640625,
"learning_rate": 1.3967184345584173e-05,
"loss": 0.0399,
"step": 395
},
{
"epoch": 2.502863914675094,
"grad_norm": 0.09912109375,
"learning_rate": 1.3624030211261685e-05,
"loss": 0.034,
"step": 396
},
{
"epoch": 2.5091842780959905,
"grad_norm": 0.0908203125,
"learning_rate": 1.3284835711093535e-05,
"loss": 0.0284,
"step": 397
},
{
"epoch": 2.515504641516887,
"grad_norm": 0.0869140625,
"learning_rate": 1.2949616394382802e-05,
"loss": 0.0286,
"step": 398
},
{
"epoch": 2.521825004937784,
"grad_norm": 0.095703125,
"learning_rate": 1.2618387628202699e-05,
"loss": 0.0349,
"step": 399
},
{
"epoch": 2.5281453683586808,
"grad_norm": 0.10107421875,
"learning_rate": 1.2291164596692305e-05,
"loss": 0.0361,
"step": 400
},
{
"epoch": 2.5281453683586808,
"eval_loss": 0.09150896966457367,
"eval_runtime": 53.9759,
"eval_samples_per_second": 19.75,
"eval_steps_per_second": 19.75,
"step": 400
},
{
"epoch": 2.5344657317795773,
"grad_norm": 0.09912109375,
"learning_rate": 1.196796230036038e-05,
"loss": 0.0372,
"step": 401
},
{
"epoch": 2.540786095200474,
"grad_norm": 0.1083984375,
"learning_rate": 1.1648795555397719e-05,
"loss": 0.0364,
"step": 402
},
{
"epoch": 2.5471064586213705,
"grad_norm": 0.10205078125,
"learning_rate": 1.1333678992998043e-05,
"loss": 0.0391,
"step": 403
},
{
"epoch": 2.5534268220422676,
"grad_norm": 0.10693359375,
"learning_rate": 1.1022627058687163e-05,
"loss": 0.0405,
"step": 404
},
{
"epoch": 2.559747185463164,
"grad_norm": 0.1083984375,
"learning_rate": 1.071565401166087e-05,
"loss": 0.0387,
"step": 405
},
{
"epoch": 2.5660675488840607,
"grad_norm": 0.111328125,
"learning_rate": 1.0412773924131203e-05,
"loss": 0.0407,
"step": 406
},
{
"epoch": 2.572387912304958,
"grad_norm": 0.10595703125,
"learning_rate": 1.0114000680681357e-05,
"loss": 0.0392,
"step": 407
},
{
"epoch": 2.5787082757258544,
"grad_norm": 0.10400390625,
"learning_rate": 9.819347977629202e-06,
"loss": 0.0392,
"step": 408
},
{
"epoch": 2.585028639146751,
"grad_norm": 0.115234375,
"learning_rate": 9.528829322399414e-06,
"loss": 0.0344,
"step": 409
},
{
"epoch": 2.5913490025676476,
"grad_norm": 0.09912109375,
"learning_rate": 9.242458032904311e-06,
"loss": 0.0334,
"step": 410
},
{
"epoch": 2.597669365988544,
"grad_norm": 0.103515625,
"learning_rate": 8.96024723693324e-06,
"loss": 0.034,
"step": 411
},
{
"epoch": 2.603989729409441,
"grad_norm": 0.09716796875,
"learning_rate": 8.682209871550884e-06,
"loss": 0.0339,
"step": 412
},
{
"epoch": 2.6103100928303378,
"grad_norm": 0.0888671875,
"learning_rate": 8.408358682504147e-06,
"loss": 0.032,
"step": 413
},
{
"epoch": 2.6166304562512344,
"grad_norm": 0.10888671875,
"learning_rate": 8.138706223637827e-06,
"loss": 0.0387,
"step": 414
},
{
"epoch": 2.6229508196721314,
"grad_norm": 0.09814453125,
"learning_rate": 7.873264856319218e-06,
"loss": 0.0376,
"step": 415
},
{
"epoch": 2.629271183093028,
"grad_norm": 0.10986328125,
"learning_rate": 7.612046748871327e-06,
"loss": 0.0357,
"step": 416
},
{
"epoch": 2.629271183093028,
"eval_loss": 0.09121152013540268,
"eval_runtime": 54.7566,
"eval_samples_per_second": 19.468,
"eval_steps_per_second": 19.468,
"step": 416
},
{
"epoch": 2.6355915465139246,
"grad_norm": 0.10595703125,
"learning_rate": 7.3550638760152e-06,
"loss": 0.0411,
"step": 417
},
{
"epoch": 2.641911909934821,
"grad_norm": 0.10888671875,
"learning_rate": 7.102328018320858e-06,
"loss": 0.037,
"step": 418
},
{
"epoch": 2.6482322733557178,
"grad_norm": 0.09912109375,
"learning_rate": 6.853850761667291e-06,
"loss": 0.0343,
"step": 419
},
{
"epoch": 2.654552636776615,
"grad_norm": 0.107421875,
"learning_rate": 6.609643496711349e-06,
"loss": 0.037,
"step": 420
},
{
"epoch": 2.6608730001975114,
"grad_norm": 0.10205078125,
"learning_rate": 6.36971741836555e-06,
"loss": 0.0407,
"step": 421
},
{
"epoch": 2.667193363618408,
"grad_norm": 0.1005859375,
"learning_rate": 6.13408352528495e-06,
"loss": 0.0346,
"step": 422
},
{
"epoch": 2.673513727039305,
"grad_norm": 0.09716796875,
"learning_rate": 5.902752619362861e-06,
"loss": 0.0376,
"step": 423
},
{
"epoch": 2.6798340904602016,
"grad_norm": 0.10546875,
"learning_rate": 5.6757353052356964e-06,
"loss": 0.0344,
"step": 424
},
{
"epoch": 2.686154453881098,
"grad_norm": 0.10498046875,
"learning_rate": 5.453041989796903e-06,
"loss": 0.0392,
"step": 425
},
{
"epoch": 2.692474817301995,
"grad_norm": 0.1181640625,
"learning_rate": 5.2346828817197655e-06,
"loss": 0.0408,
"step": 426
},
{
"epoch": 2.6987951807228914,
"grad_norm": 0.0966796875,
"learning_rate": 5.020667990989525e-06,
"loss": 0.0343,
"step": 427
},
{
"epoch": 2.7051155441437884,
"grad_norm": 0.099609375,
"learning_rate": 4.811007128444445e-06,
"loss": 0.0412,
"step": 428
},
{
"epoch": 2.711435907564685,
"grad_norm": 0.09521484375,
"learning_rate": 4.605709905326117e-06,
"loss": 0.0317,
"step": 429
},
{
"epoch": 2.7177562709855816,
"grad_norm": 0.10205078125,
"learning_rate": 4.404785732838846e-06,
"loss": 0.0368,
"step": 430
},
{
"epoch": 2.7240766344064786,
"grad_norm": 0.1015625,
"learning_rate": 4.2082438217181385e-06,
"loss": 0.0333,
"step": 431
},
{
"epoch": 2.730396997827375,
"grad_norm": 0.09130859375,
"learning_rate": 4.016093181808623e-06,
"loss": 0.0364,
"step": 432
},
{
"epoch": 2.730396997827375,
"eval_loss": 0.09115952998399734,
"eval_runtime": 55.8478,
"eval_samples_per_second": 19.088,
"eval_steps_per_second": 19.088,
"step": 432
},
{
"epoch": 2.736717361248272,
"grad_norm": 0.0849609375,
"learning_rate": 3.828342621650882e-06,
"loss": 0.0275,
"step": 433
},
{
"epoch": 2.7430377246691684,
"grad_norm": 0.111328125,
"learning_rate": 3.6450007480777093e-06,
"loss": 0.0382,
"step": 434
},
{
"epoch": 2.749358088090065,
"grad_norm": 0.1171875,
"learning_rate": 3.466075965819582e-06,
"loss": 0.0385,
"step": 435
},
{
"epoch": 2.755678451510962,
"grad_norm": 0.0947265625,
"learning_rate": 3.2915764771193292e-06,
"loss": 0.0375,
"step": 436
},
{
"epoch": 2.7619988149318586,
"grad_norm": 0.10986328125,
"learning_rate": 3.1215102813561435e-06,
"loss": 0.0384,
"step": 437
},
{
"epoch": 2.768319178352755,
"grad_norm": 0.1044921875,
"learning_rate": 2.9558851746788517e-06,
"loss": 0.0409,
"step": 438
},
{
"epoch": 2.7746395417736522,
"grad_norm": 0.1015625,
"learning_rate": 2.7947087496486e-06,
"loss": 0.0369,
"step": 439
},
{
"epoch": 2.780959905194549,
"grad_norm": 0.1044921875,
"learning_rate": 2.6379883948907e-06,
"loss": 0.0381,
"step": 440
},
{
"epoch": 2.7872802686154454,
"grad_norm": 0.095703125,
"learning_rate": 2.4857312947559443e-06,
"loss": 0.0351,
"step": 441
},
{
"epoch": 2.793600632036342,
"grad_norm": 0.10546875,
"learning_rate": 2.3379444289913342e-06,
"loss": 0.0404,
"step": 442
},
{
"epoch": 2.7999209954572386,
"grad_norm": 0.09130859375,
"learning_rate": 2.194634572420029e-06,
"loss": 0.0329,
"step": 443
},
{
"epoch": 2.8062413588781356,
"grad_norm": 0.10791015625,
"learning_rate": 2.0558082946308232e-06,
"loss": 0.0372,
"step": 444
},
{
"epoch": 2.8125617222990322,
"grad_norm": 0.0986328125,
"learning_rate": 1.921471959676957e-06,
"loss": 0.0335,
"step": 445
},
{
"epoch": 2.818882085719929,
"grad_norm": 0.091796875,
"learning_rate": 1.7916317257844039e-06,
"loss": 0.0317,
"step": 446
},
{
"epoch": 2.825202449140826,
"grad_norm": 0.095703125,
"learning_rate": 1.6662935450695416e-06,
"loss": 0.0358,
"step": 447
},
{
"epoch": 2.8315228125617224,
"grad_norm": 0.0966796875,
"learning_rate": 1.545463163266303e-06,
"loss": 0.0365,
"step": 448
},
{
"epoch": 2.8315228125617224,
"eval_loss": 0.09120116382837296,
"eval_runtime": 55.6608,
"eval_samples_per_second": 19.152,
"eval_steps_per_second": 19.152,
"step": 448
},
{
"epoch": 2.837843175982619,
"grad_norm": 0.103515625,
"learning_rate": 1.4291461194628098e-06,
"loss": 0.0378,
"step": 449
},
{
"epoch": 2.8441635394035156,
"grad_norm": 0.0849609375,
"learning_rate": 1.317347745847386e-06,
"loss": 0.0271,
"step": 450
},
{
"epoch": 2.850483902824412,
"grad_norm": 0.095703125,
"learning_rate": 1.2100731674641984e-06,
"loss": 0.0369,
"step": 451
},
{
"epoch": 2.8568042662453093,
"grad_norm": 0.09765625,
"learning_rate": 1.107327301978245e-06,
"loss": 0.0353,
"step": 452
},
{
"epoch": 2.863124629666206,
"grad_norm": 0.09326171875,
"learning_rate": 1.0091148594499666e-06,
"loss": 0.0304,
"step": 453
},
{
"epoch": 2.8694449930871024,
"grad_norm": 0.1064453125,
"learning_rate": 9.154403421193225e-07,
"loss": 0.0438,
"step": 454
},
{
"epoch": 2.8757653565079995,
"grad_norm": 0.10107421875,
"learning_rate": 8.263080441993753e-07,
"loss": 0.037,
"step": 455
},
{
"epoch": 2.882085719928896,
"grad_norm": 0.1123046875,
"learning_rate": 7.417220516794499e-07,
"loss": 0.0434,
"step": 456
},
{
"epoch": 2.8884060833497927,
"grad_norm": 0.09814453125,
"learning_rate": 6.616862421378489e-07,
"loss": 0.0383,
"step": 457
},
{
"epoch": 2.8947264467706892,
"grad_norm": 0.10302734375,
"learning_rate": 5.862042845640403e-07,
"loss": 0.0427,
"step": 458
},
{
"epoch": 2.901046810191586,
"grad_norm": 0.09130859375,
"learning_rate": 5.152796391905357e-07,
"loss": 0.0325,
"step": 459
},
{
"epoch": 2.907367173612483,
"grad_norm": 0.107421875,
"learning_rate": 4.489155573341841e-07,
"loss": 0.0365,
"step": 460
},
{
"epoch": 2.9136875370333795,
"grad_norm": 0.1083984375,
"learning_rate": 3.871150812472246e-07,
"loss": 0.0379,
"step": 461
},
{
"epoch": 2.920007900454276,
"grad_norm": 0.10302734375,
"learning_rate": 3.298810439777311e-07,
"loss": 0.0381,
"step": 462
},
{
"epoch": 2.926328263875173,
"grad_norm": 0.099609375,
"learning_rate": 2.7721606923978293e-07,
"loss": 0.0396,
"step": 463
},
{
"epoch": 2.9326486272960697,
"grad_norm": 0.0986328125,
"learning_rate": 2.2912257129320547e-07,
"loss": 0.0338,
"step": 464
},
{
"epoch": 2.9326486272960697,
"eval_loss": 0.09114257246255875,
"eval_runtime": 55.6557,
"eval_samples_per_second": 19.153,
"eval_steps_per_second": 19.153,
"step": 464
},
{
"epoch": 2.9389689907169663,
"grad_norm": 0.10986328125,
"learning_rate": 1.8560275483286982e-07,
"loss": 0.0375,
"step": 465
},
{
"epoch": 2.945289354137863,
"grad_norm": 0.09619140625,
"learning_rate": 1.4665861488761813e-07,
"loss": 0.0318,
"step": 466
},
{
"epoch": 2.9516097175587594,
"grad_norm": 0.10302734375,
"learning_rate": 1.1229193672881444e-07,
"loss": 0.032,
"step": 467
},
{
"epoch": 2.9579300809796565,
"grad_norm": 0.0947265625,
"learning_rate": 8.250429578855467e-08,
"loss": 0.0302,
"step": 468
},
{
"epoch": 2.964250444400553,
"grad_norm": 0.10498046875,
"learning_rate": 5.7297057587346604e-08,
"loss": 0.0393,
"step": 469
},
{
"epoch": 2.9705708078214497,
"grad_norm": 0.10009765625,
"learning_rate": 3.667137767160433e-08,
"loss": 0.0346,
"step": 470
},
{
"epoch": 2.9768911712423467,
"grad_norm": 0.09716796875,
"learning_rate": 2.0628201560635162e-08,
"loss": 0.0319,
"step": 471
},
{
"epoch": 2.9832115346632433,
"grad_norm": 0.103515625,
"learning_rate": 9.168264703285356e-09,
"loss": 0.0429,
"step": 472
},
{
"epoch": 2.98953189808414,
"grad_norm": 0.111328125,
"learning_rate": 2.292092444255989e-09,
"loss": 0.0469,
"step": 473
},
{
"epoch": 2.9958522615050365,
"grad_norm": 0.09375,
"learning_rate": 0.0,
"loss": 0.0287,
"step": 474
}
],
"logging_steps": 1,
"max_steps": 474,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 79,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.077799609706086e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}