Mistral-Codon-v1-117M / trainer_state.json
RaphaelMourad's picture
Upload 10 files
7499bab verified
raw
history blame
No virus
27.4 kB
{
"best_metric": 2.51826810836792,
"best_model_checkpoint": "./results/models/checkpoint-60605",
"epoch": 31.0,
"eval_steps": 500,
"global_step": 60605,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.2557544757033248,
"grad_norm": 0.228515625,
"learning_rate": 0.0009948849104859335,
"loss": 3.8573,
"step": 500
},
{
"epoch": 0.5115089514066496,
"grad_norm": 0.27734375,
"learning_rate": 0.000989769820971867,
"loss": 3.7698,
"step": 1000
},
{
"epoch": 0.7672634271099744,
"grad_norm": 0.26171875,
"learning_rate": 0.0009846547314578005,
"loss": 3.7481,
"step": 1500
},
{
"epoch": 1.0,
"eval_loss": 3.7266085147857666,
"eval_runtime": 1.0357,
"eval_samples_per_second": 482.766,
"eval_steps_per_second": 0.966,
"step": 1955
},
{
"epoch": 1.0230179028132993,
"grad_norm": 0.376953125,
"learning_rate": 0.000979539641943734,
"loss": 3.7319,
"step": 2000
},
{
"epoch": 1.278772378516624,
"grad_norm": 1.28125,
"learning_rate": 0.0009744245524296675,
"loss": 3.7118,
"step": 2500
},
{
"epoch": 1.5345268542199488,
"grad_norm": 0.25,
"learning_rate": 0.000969309462915601,
"loss": 3.6851,
"step": 3000
},
{
"epoch": 1.7902813299232738,
"grad_norm": 0.279296875,
"learning_rate": 0.0009641943734015346,
"loss": 3.628,
"step": 3500
},
{
"epoch": 2.0,
"eval_loss": 3.5190136432647705,
"eval_runtime": 1.0477,
"eval_samples_per_second": 477.242,
"eval_steps_per_second": 0.954,
"step": 3910
},
{
"epoch": 2.0460358056265986,
"grad_norm": 0.345703125,
"learning_rate": 0.0009590792838874681,
"loss": 3.5437,
"step": 4000
},
{
"epoch": 2.3017902813299234,
"grad_norm": 0.279296875,
"learning_rate": 0.0009539641943734016,
"loss": 3.456,
"step": 4500
},
{
"epoch": 2.557544757033248,
"grad_norm": 0.275390625,
"learning_rate": 0.0009488491048593351,
"loss": 3.3486,
"step": 5000
},
{
"epoch": 2.813299232736573,
"grad_norm": 0.306640625,
"learning_rate": 0.0009437340153452686,
"loss": 3.2918,
"step": 5500
},
{
"epoch": 3.0,
"eval_loss": 3.189316511154175,
"eval_runtime": 1.0569,
"eval_samples_per_second": 473.077,
"eval_steps_per_second": 0.946,
"step": 5865
},
{
"epoch": 3.0690537084398977,
"grad_norm": 0.27734375,
"learning_rate": 0.0009386189258312021,
"loss": 3.2061,
"step": 6000
},
{
"epoch": 3.3248081841432224,
"grad_norm": 0.326171875,
"learning_rate": 0.0009335038363171356,
"loss": 3.1412,
"step": 6500
},
{
"epoch": 3.580562659846547,
"grad_norm": 0.26171875,
"learning_rate": 0.0009283887468030691,
"loss": 3.0956,
"step": 7000
},
{
"epoch": 3.836317135549872,
"grad_norm": 0.271484375,
"learning_rate": 0.0009232736572890026,
"loss": 3.0555,
"step": 7500
},
{
"epoch": 4.0,
"eval_loss": 3.0206379890441895,
"eval_runtime": 1.0312,
"eval_samples_per_second": 484.854,
"eval_steps_per_second": 0.97,
"step": 7820
},
{
"epoch": 4.092071611253197,
"grad_norm": 0.2490234375,
"learning_rate": 0.0009181585677749361,
"loss": 3.014,
"step": 8000
},
{
"epoch": 4.3478260869565215,
"grad_norm": 0.287109375,
"learning_rate": 0.0009130434782608695,
"loss": 2.9683,
"step": 8500
},
{
"epoch": 4.603580562659847,
"grad_norm": 0.265625,
"learning_rate": 0.0009079283887468031,
"loss": 2.9468,
"step": 9000
},
{
"epoch": 4.859335038363171,
"grad_norm": 0.267578125,
"learning_rate": 0.0009028132992327366,
"loss": 2.9253,
"step": 9500
},
{
"epoch": 5.0,
"eval_loss": 2.9162545204162598,
"eval_runtime": 1.0514,
"eval_samples_per_second": 475.536,
"eval_steps_per_second": 0.951,
"step": 9775
},
{
"epoch": 5.115089514066496,
"grad_norm": 0.3203125,
"learning_rate": 0.0008976982097186701,
"loss": 2.8851,
"step": 10000
},
{
"epoch": 5.370843989769821,
"grad_norm": 0.275390625,
"learning_rate": 0.0008925831202046036,
"loss": 2.8639,
"step": 10500
},
{
"epoch": 5.626598465473146,
"grad_norm": 0.294921875,
"learning_rate": 0.000887468030690537,
"loss": 2.8469,
"step": 11000
},
{
"epoch": 5.882352941176471,
"grad_norm": 0.26171875,
"learning_rate": 0.0008823529411764706,
"loss": 2.8301,
"step": 11500
},
{
"epoch": 6.0,
"eval_loss": 2.8372726440429688,
"eval_runtime": 1.0374,
"eval_samples_per_second": 481.997,
"eval_steps_per_second": 0.964,
"step": 11730
},
{
"epoch": 6.138107416879795,
"grad_norm": 0.27734375,
"learning_rate": 0.0008772378516624041,
"loss": 2.7998,
"step": 12000
},
{
"epoch": 6.3938618925831205,
"grad_norm": 0.255859375,
"learning_rate": 0.0008721227621483376,
"loss": 2.7802,
"step": 12500
},
{
"epoch": 6.649616368286445,
"grad_norm": 0.294921875,
"learning_rate": 0.0008670076726342711,
"loss": 2.7704,
"step": 13000
},
{
"epoch": 6.90537084398977,
"grad_norm": 0.2890625,
"learning_rate": 0.0008618925831202045,
"loss": 2.7605,
"step": 13500
},
{
"epoch": 7.0,
"eval_loss": 2.7794747352600098,
"eval_runtime": 1.0401,
"eval_samples_per_second": 480.743,
"eval_steps_per_second": 0.961,
"step": 13685
},
{
"epoch": 7.161125319693094,
"grad_norm": 0.279296875,
"learning_rate": 0.0008567774936061381,
"loss": 2.7314,
"step": 14000
},
{
"epoch": 7.41687979539642,
"grad_norm": 0.345703125,
"learning_rate": 0.0008516624040920716,
"loss": 2.7203,
"step": 14500
},
{
"epoch": 7.672634271099744,
"grad_norm": 0.318359375,
"learning_rate": 0.0008465473145780051,
"loss": 2.713,
"step": 15000
},
{
"epoch": 7.928388746803069,
"grad_norm": 0.31640625,
"learning_rate": 0.0008414322250639387,
"loss": 2.7029,
"step": 15500
},
{
"epoch": 8.0,
"eval_loss": 2.7313828468322754,
"eval_runtime": 1.0364,
"eval_samples_per_second": 482.427,
"eval_steps_per_second": 0.965,
"step": 15640
},
{
"epoch": 8.184143222506394,
"grad_norm": 0.3046875,
"learning_rate": 0.000836317135549872,
"loss": 2.6727,
"step": 16000
},
{
"epoch": 8.43989769820972,
"grad_norm": 0.259765625,
"learning_rate": 0.0008312020460358057,
"loss": 2.6709,
"step": 16500
},
{
"epoch": 8.695652173913043,
"grad_norm": 0.28125,
"learning_rate": 0.0008260869565217392,
"loss": 2.6632,
"step": 17000
},
{
"epoch": 8.951406649616368,
"grad_norm": 0.298828125,
"learning_rate": 0.0008209718670076727,
"loss": 2.6586,
"step": 17500
},
{
"epoch": 9.0,
"eval_loss": 2.697967767715454,
"eval_runtime": 1.5778,
"eval_samples_per_second": 316.897,
"eval_steps_per_second": 0.634,
"step": 17595
},
{
"epoch": 9.207161125319693,
"grad_norm": 0.328125,
"learning_rate": 0.0008158567774936062,
"loss": 2.6339,
"step": 18000
},
{
"epoch": 9.462915601023019,
"grad_norm": 0.28515625,
"learning_rate": 0.0008107416879795396,
"loss": 2.6225,
"step": 18500
},
{
"epoch": 9.718670076726342,
"grad_norm": 0.361328125,
"learning_rate": 0.0008056265984654732,
"loss": 2.6241,
"step": 19000
},
{
"epoch": 9.974424552429667,
"grad_norm": 0.2890625,
"learning_rate": 0.0008005115089514067,
"loss": 2.6182,
"step": 19500
},
{
"epoch": 10.0,
"eval_loss": 2.673384189605713,
"eval_runtime": 1.0686,
"eval_samples_per_second": 467.914,
"eval_steps_per_second": 0.936,
"step": 19550
},
{
"epoch": 10.230179028132993,
"grad_norm": 0.28125,
"learning_rate": 0.0007953964194373402,
"loss": 2.5897,
"step": 20000
},
{
"epoch": 10.485933503836318,
"grad_norm": 0.28515625,
"learning_rate": 0.0007902813299232737,
"loss": 2.5888,
"step": 20500
},
{
"epoch": 10.741687979539641,
"grad_norm": 0.294921875,
"learning_rate": 0.0007851662404092071,
"loss": 2.5878,
"step": 21000
},
{
"epoch": 10.997442455242966,
"grad_norm": 0.298828125,
"learning_rate": 0.0007800511508951407,
"loss": 2.5869,
"step": 21500
},
{
"epoch": 11.0,
"eval_loss": 2.6476247310638428,
"eval_runtime": 1.0522,
"eval_samples_per_second": 475.199,
"eval_steps_per_second": 0.95,
"step": 21505
},
{
"epoch": 11.253196930946292,
"grad_norm": 0.318359375,
"learning_rate": 0.0007749360613810742,
"loss": 2.5571,
"step": 22000
},
{
"epoch": 11.508951406649617,
"grad_norm": 0.3828125,
"learning_rate": 0.0007698209718670077,
"loss": 2.5603,
"step": 22500
},
{
"epoch": 11.764705882352942,
"grad_norm": 0.2890625,
"learning_rate": 0.0007647058823529411,
"loss": 2.5602,
"step": 23000
},
{
"epoch": 12.0,
"eval_loss": 2.6259398460388184,
"eval_runtime": 1.0589,
"eval_samples_per_second": 472.168,
"eval_steps_per_second": 0.944,
"step": 23460
},
{
"epoch": 12.020460358056265,
"grad_norm": 0.31640625,
"learning_rate": 0.0007595907928388746,
"loss": 2.554,
"step": 23500
},
{
"epoch": 12.27621483375959,
"grad_norm": 0.345703125,
"learning_rate": 0.0007544757033248082,
"loss": 2.532,
"step": 24000
},
{
"epoch": 12.531969309462916,
"grad_norm": 0.314453125,
"learning_rate": 0.0007493606138107417,
"loss": 2.53,
"step": 24500
},
{
"epoch": 12.787723785166241,
"grad_norm": 0.32421875,
"learning_rate": 0.0007442455242966752,
"loss": 2.5295,
"step": 25000
},
{
"epoch": 13.0,
"eval_loss": 2.6141037940979004,
"eval_runtime": 1.0318,
"eval_samples_per_second": 484.612,
"eval_steps_per_second": 0.969,
"step": 25415
},
{
"epoch": 13.043478260869565,
"grad_norm": 0.412109375,
"learning_rate": 0.0007391304347826086,
"loss": 2.5263,
"step": 25500
},
{
"epoch": 13.29923273657289,
"grad_norm": 0.30078125,
"learning_rate": 0.0007340153452685422,
"loss": 2.5082,
"step": 26000
},
{
"epoch": 13.554987212276215,
"grad_norm": 0.3203125,
"learning_rate": 0.0007289002557544757,
"loss": 2.5115,
"step": 26500
},
{
"epoch": 13.81074168797954,
"grad_norm": 0.306640625,
"learning_rate": 0.0007237851662404093,
"loss": 2.5082,
"step": 27000
},
{
"epoch": 14.0,
"eval_loss": 2.5942933559417725,
"eval_runtime": 1.0321,
"eval_samples_per_second": 484.463,
"eval_steps_per_second": 0.969,
"step": 27370
},
{
"epoch": 14.066496163682864,
"grad_norm": 0.32421875,
"learning_rate": 0.0007186700767263428,
"loss": 2.4949,
"step": 27500
},
{
"epoch": 14.322250639386189,
"grad_norm": 0.345703125,
"learning_rate": 0.0007135549872122762,
"loss": 2.4873,
"step": 28000
},
{
"epoch": 14.578005115089514,
"grad_norm": 0.333984375,
"learning_rate": 0.0007084398976982098,
"loss": 2.4895,
"step": 28500
},
{
"epoch": 14.83375959079284,
"grad_norm": 0.318359375,
"learning_rate": 0.0007033248081841433,
"loss": 2.4875,
"step": 29000
},
{
"epoch": 15.0,
"eval_loss": 2.5941433906555176,
"eval_runtime": 1.0612,
"eval_samples_per_second": 471.171,
"eval_steps_per_second": 0.942,
"step": 29325
},
{
"epoch": 15.089514066496164,
"grad_norm": 0.33203125,
"learning_rate": 0.0006982097186700768,
"loss": 2.4796,
"step": 29500
},
{
"epoch": 15.345268542199488,
"grad_norm": 0.330078125,
"learning_rate": 0.0006930946291560103,
"loss": 2.4671,
"step": 30000
},
{
"epoch": 15.601023017902813,
"grad_norm": 0.3828125,
"learning_rate": 0.0006879795396419437,
"loss": 2.4647,
"step": 30500
},
{
"epoch": 15.856777493606138,
"grad_norm": 0.33203125,
"learning_rate": 0.0006828644501278773,
"loss": 2.4729,
"step": 31000
},
{
"epoch": 16.0,
"eval_loss": 2.5811538696289062,
"eval_runtime": 1.0329,
"eval_samples_per_second": 484.078,
"eval_steps_per_second": 0.968,
"step": 31280
},
{
"epoch": 16.11253196930946,
"grad_norm": 0.341796875,
"learning_rate": 0.0006777493606138108,
"loss": 2.4574,
"step": 31500
},
{
"epoch": 16.36828644501279,
"grad_norm": 0.30859375,
"learning_rate": 0.0006726342710997443,
"loss": 2.4535,
"step": 32000
},
{
"epoch": 16.624040920716112,
"grad_norm": 0.33984375,
"learning_rate": 0.0006675191815856778,
"loss": 2.452,
"step": 32500
},
{
"epoch": 16.87979539641944,
"grad_norm": 0.30859375,
"learning_rate": 0.0006624040920716112,
"loss": 2.4508,
"step": 33000
},
{
"epoch": 17.0,
"eval_loss": 2.5716660022735596,
"eval_runtime": 1.0296,
"eval_samples_per_second": 485.606,
"eval_steps_per_second": 0.971,
"step": 33235
},
{
"epoch": 17.135549872122763,
"grad_norm": 0.318359375,
"learning_rate": 0.0006572890025575448,
"loss": 2.4415,
"step": 33500
},
{
"epoch": 17.391304347826086,
"grad_norm": 0.302734375,
"learning_rate": 0.0006521739130434783,
"loss": 2.4275,
"step": 34000
},
{
"epoch": 17.647058823529413,
"grad_norm": 0.302734375,
"learning_rate": 0.0006470588235294118,
"loss": 2.4398,
"step": 34500
},
{
"epoch": 17.902813299232736,
"grad_norm": 0.302734375,
"learning_rate": 0.0006419437340153452,
"loss": 2.4446,
"step": 35000
},
{
"epoch": 18.0,
"eval_loss": 2.5545594692230225,
"eval_runtime": 1.0484,
"eval_samples_per_second": 476.904,
"eval_steps_per_second": 0.954,
"step": 35190
},
{
"epoch": 18.15856777493606,
"grad_norm": 0.3515625,
"learning_rate": 0.0006368286445012787,
"loss": 2.4267,
"step": 35500
},
{
"epoch": 18.414322250639387,
"grad_norm": 0.369140625,
"learning_rate": 0.0006317135549872123,
"loss": 2.4183,
"step": 36000
},
{
"epoch": 18.67007672634271,
"grad_norm": 0.333984375,
"learning_rate": 0.0006265984654731458,
"loss": 2.4242,
"step": 36500
},
{
"epoch": 18.925831202046037,
"grad_norm": 0.34375,
"learning_rate": 0.0006214833759590793,
"loss": 2.4312,
"step": 37000
},
{
"epoch": 19.0,
"eval_loss": 2.5581214427948,
"eval_runtime": 1.082,
"eval_samples_per_second": 462.11,
"eval_steps_per_second": 0.924,
"step": 37145
},
{
"epoch": 19.18158567774936,
"grad_norm": 0.33984375,
"learning_rate": 0.0006163682864450127,
"loss": 2.4121,
"step": 37500
},
{
"epoch": 19.437340153452684,
"grad_norm": 0.3125,
"learning_rate": 0.0006112531969309462,
"loss": 2.4054,
"step": 38000
},
{
"epoch": 19.69309462915601,
"grad_norm": 0.326171875,
"learning_rate": 0.0006061381074168799,
"loss": 2.4158,
"step": 38500
},
{
"epoch": 19.948849104859335,
"grad_norm": 0.326171875,
"learning_rate": 0.0006010230179028134,
"loss": 2.4166,
"step": 39000
},
{
"epoch": 20.0,
"eval_loss": 2.545928478240967,
"eval_runtime": 1.0301,
"eval_samples_per_second": 485.371,
"eval_steps_per_second": 0.971,
"step": 39100
},
{
"epoch": 20.20460358056266,
"grad_norm": 0.33203125,
"learning_rate": 0.0005959079283887469,
"loss": 2.4008,
"step": 39500
},
{
"epoch": 20.460358056265985,
"grad_norm": 0.345703125,
"learning_rate": 0.0005907928388746803,
"loss": 2.3997,
"step": 40000
},
{
"epoch": 20.71611253196931,
"grad_norm": 0.337890625,
"learning_rate": 0.0005856777493606138,
"loss": 2.4061,
"step": 40500
},
{
"epoch": 20.971867007672635,
"grad_norm": 0.296875,
"learning_rate": 0.0005805626598465474,
"loss": 2.4055,
"step": 41000
},
{
"epoch": 21.0,
"eval_loss": 2.5422630310058594,
"eval_runtime": 1.0775,
"eval_samples_per_second": 464.02,
"eval_steps_per_second": 0.928,
"step": 41055
},
{
"epoch": 21.22762148337596,
"grad_norm": 0.291015625,
"learning_rate": 0.0005754475703324809,
"loss": 2.3843,
"step": 41500
},
{
"epoch": 21.483375959079282,
"grad_norm": 0.314453125,
"learning_rate": 0.0005703324808184144,
"loss": 2.3914,
"step": 42000
},
{
"epoch": 21.73913043478261,
"grad_norm": 0.322265625,
"learning_rate": 0.0005652173913043478,
"loss": 2.3954,
"step": 42500
},
{
"epoch": 21.994884910485933,
"grad_norm": 0.30859375,
"learning_rate": 0.0005601023017902813,
"loss": 2.3992,
"step": 43000
},
{
"epoch": 22.0,
"eval_loss": 2.538604736328125,
"eval_runtime": 1.0718,
"eval_samples_per_second": 466.508,
"eval_steps_per_second": 0.933,
"step": 43010
},
{
"epoch": 22.25063938618926,
"grad_norm": 0.3359375,
"learning_rate": 0.0005549872122762149,
"loss": 2.3769,
"step": 43500
},
{
"epoch": 22.506393861892583,
"grad_norm": 0.357421875,
"learning_rate": 0.0005498721227621484,
"loss": 2.3806,
"step": 44000
},
{
"epoch": 22.762148337595907,
"grad_norm": 0.328125,
"learning_rate": 0.0005447570332480819,
"loss": 2.3905,
"step": 44500
},
{
"epoch": 23.0,
"eval_loss": 2.5324785709381104,
"eval_runtime": 1.0484,
"eval_samples_per_second": 476.898,
"eval_steps_per_second": 0.954,
"step": 44965
},
{
"epoch": 23.017902813299234,
"grad_norm": 0.376953125,
"learning_rate": 0.0005396419437340153,
"loss": 2.3874,
"step": 45000
},
{
"epoch": 23.273657289002557,
"grad_norm": 0.35546875,
"learning_rate": 0.0005345268542199488,
"loss": 2.3744,
"step": 45500
},
{
"epoch": 23.529411764705884,
"grad_norm": 0.328125,
"learning_rate": 0.0005294117647058824,
"loss": 2.3785,
"step": 46000
},
{
"epoch": 23.785166240409207,
"grad_norm": 0.3359375,
"learning_rate": 0.0005242966751918159,
"loss": 2.3759,
"step": 46500
},
{
"epoch": 24.0,
"eval_loss": 2.5388312339782715,
"eval_runtime": 1.0338,
"eval_samples_per_second": 483.635,
"eval_steps_per_second": 0.967,
"step": 46920
},
{
"epoch": 24.04092071611253,
"grad_norm": 0.330078125,
"learning_rate": 0.0005191815856777494,
"loss": 2.3774,
"step": 47000
},
{
"epoch": 24.296675191815858,
"grad_norm": 0.37109375,
"learning_rate": 0.0005140664961636828,
"loss": 2.367,
"step": 47500
},
{
"epoch": 24.55242966751918,
"grad_norm": 0.3359375,
"learning_rate": 0.0005089514066496163,
"loss": 2.3701,
"step": 48000
},
{
"epoch": 24.808184143222505,
"grad_norm": 0.328125,
"learning_rate": 0.0005038363171355499,
"loss": 2.3722,
"step": 48500
},
{
"epoch": 25.0,
"eval_loss": 2.528256893157959,
"eval_runtime": 1.03,
"eval_samples_per_second": 485.431,
"eval_steps_per_second": 0.971,
"step": 48875
},
{
"epoch": 25.06393861892583,
"grad_norm": 0.302734375,
"learning_rate": 0.0004987212276214833,
"loss": 2.3668,
"step": 49000
},
{
"epoch": 25.319693094629155,
"grad_norm": 0.306640625,
"learning_rate": 0.0004936061381074169,
"loss": 2.3593,
"step": 49500
},
{
"epoch": 25.575447570332482,
"grad_norm": 0.37890625,
"learning_rate": 0.0004884910485933504,
"loss": 2.3647,
"step": 50000
},
{
"epoch": 25.831202046035806,
"grad_norm": 0.34765625,
"learning_rate": 0.0004833759590792839,
"loss": 2.3704,
"step": 50500
},
{
"epoch": 26.0,
"eval_loss": 2.5277769565582275,
"eval_runtime": 1.6093,
"eval_samples_per_second": 310.697,
"eval_steps_per_second": 0.621,
"step": 50830
},
{
"epoch": 26.08695652173913,
"grad_norm": 0.32421875,
"learning_rate": 0.0004782608695652174,
"loss": 2.3599,
"step": 51000
},
{
"epoch": 26.342710997442456,
"grad_norm": 0.33984375,
"learning_rate": 0.0004731457800511509,
"loss": 2.3516,
"step": 51500
},
{
"epoch": 26.59846547314578,
"grad_norm": 0.31640625,
"learning_rate": 0.0004680306905370844,
"loss": 2.3585,
"step": 52000
},
{
"epoch": 26.854219948849106,
"grad_norm": 0.33984375,
"learning_rate": 0.00046291560102301786,
"loss": 2.3591,
"step": 52500
},
{
"epoch": 27.0,
"eval_loss": 2.5286903381347656,
"eval_runtime": 1.0654,
"eval_samples_per_second": 469.299,
"eval_steps_per_second": 0.939,
"step": 52785
},
{
"epoch": 27.10997442455243,
"grad_norm": 0.357421875,
"learning_rate": 0.0004578005115089514,
"loss": 2.3548,
"step": 53000
},
{
"epoch": 27.365728900255753,
"grad_norm": 0.326171875,
"learning_rate": 0.0004526854219948849,
"loss": 2.3496,
"step": 53500
},
{
"epoch": 27.62148337595908,
"grad_norm": 0.31640625,
"learning_rate": 0.00044757033248081843,
"loss": 2.3506,
"step": 54000
},
{
"epoch": 27.877237851662404,
"grad_norm": 0.33984375,
"learning_rate": 0.00044245524296675193,
"loss": 2.3552,
"step": 54500
},
{
"epoch": 28.0,
"eval_loss": 2.523589849472046,
"eval_runtime": 1.0502,
"eval_samples_per_second": 476.079,
"eval_steps_per_second": 0.952,
"step": 54740
},
{
"epoch": 28.132992327365727,
"grad_norm": 0.337890625,
"learning_rate": 0.0004373401534526854,
"loss": 2.3445,
"step": 55000
},
{
"epoch": 28.388746803069054,
"grad_norm": 0.34765625,
"learning_rate": 0.00043222506393861894,
"loss": 2.3447,
"step": 55500
},
{
"epoch": 28.644501278772378,
"grad_norm": 0.318359375,
"learning_rate": 0.00042710997442455245,
"loss": 2.3485,
"step": 56000
},
{
"epoch": 28.900255754475705,
"grad_norm": 0.330078125,
"learning_rate": 0.00042199488491048595,
"loss": 2.3509,
"step": 56500
},
{
"epoch": 29.0,
"eval_loss": 2.5270164012908936,
"eval_runtime": 1.0578,
"eval_samples_per_second": 472.684,
"eval_steps_per_second": 0.945,
"step": 56695
},
{
"epoch": 29.156010230179028,
"grad_norm": 0.392578125,
"learning_rate": 0.00041687979539641946,
"loss": 2.3418,
"step": 57000
},
{
"epoch": 29.41176470588235,
"grad_norm": 0.3046875,
"learning_rate": 0.0004117647058823529,
"loss": 2.3364,
"step": 57500
},
{
"epoch": 29.66751918158568,
"grad_norm": 0.33203125,
"learning_rate": 0.00040664961636828646,
"loss": 2.3473,
"step": 58000
},
{
"epoch": 29.923273657289002,
"grad_norm": 0.41015625,
"learning_rate": 0.00040153452685421997,
"loss": 2.3423,
"step": 58500
},
{
"epoch": 30.0,
"eval_loss": 2.5237414836883545,
"eval_runtime": 1.0886,
"eval_samples_per_second": 459.31,
"eval_steps_per_second": 0.919,
"step": 58650
},
{
"epoch": 30.17902813299233,
"grad_norm": 0.31640625,
"learning_rate": 0.00039641943734015347,
"loss": 2.3403,
"step": 59000
},
{
"epoch": 30.434782608695652,
"grad_norm": 0.34375,
"learning_rate": 0.000391304347826087,
"loss": 2.3339,
"step": 59500
},
{
"epoch": 30.690537084398976,
"grad_norm": 0.30078125,
"learning_rate": 0.0003861892583120204,
"loss": 2.3387,
"step": 60000
},
{
"epoch": 30.946291560102303,
"grad_norm": 0.357421875,
"learning_rate": 0.000381074168797954,
"loss": 2.3386,
"step": 60500
},
{
"epoch": 31.0,
"eval_loss": 2.51826810836792,
"eval_runtime": 1.0097,
"eval_samples_per_second": 495.204,
"eval_steps_per_second": 0.99,
"step": 60605
}
],
"logging_steps": 500,
"max_steps": 97750,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.461299764647219e+18,
"train_batch_size": 512,
"trial_name": null,
"trial_params": null
}