GaetanMichelet's picture
Model save
c8bc8c4 verified
raw
history blame
No virus
32.9 kB
{
"best_metric": 0.49422508478164673,
"best_model_checkpoint": "data/Llama-31-8B_task-3_180-samples_config-4/checkpoint-229",
"epoch": 34.0,
"eval_steps": 500,
"global_step": 289,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11764705882352941,
"grad_norm": 2.2058663368225098,
"learning_rate": 8.333333333333334e-08,
"loss": 2.3266,
"step": 1
},
{
"epoch": 0.23529411764705882,
"grad_norm": 3.2251737117767334,
"learning_rate": 1.6666666666666668e-07,
"loss": 2.8101,
"step": 2
},
{
"epoch": 0.47058823529411764,
"grad_norm": 2.427489757537842,
"learning_rate": 3.3333333333333335e-07,
"loss": 2.4918,
"step": 4
},
{
"epoch": 0.7058823529411765,
"grad_norm": 2.110276460647583,
"learning_rate": 5.000000000000001e-07,
"loss": 2.416,
"step": 6
},
{
"epoch": 0.9411764705882353,
"grad_norm": 2.285691499710083,
"learning_rate": 6.666666666666667e-07,
"loss": 2.5495,
"step": 8
},
{
"epoch": 0.9411764705882353,
"eval_loss": 2.5076982975006104,
"eval_runtime": 31.8608,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 8
},
{
"epoch": 1.1764705882352942,
"grad_norm": 2.757939577102661,
"learning_rate": 8.333333333333333e-07,
"loss": 2.4129,
"step": 10
},
{
"epoch": 1.4117647058823528,
"grad_norm": 3.1854565143585205,
"learning_rate": 1.0000000000000002e-06,
"loss": 2.6068,
"step": 12
},
{
"epoch": 1.6470588235294117,
"grad_norm": 2.928661346435547,
"learning_rate": 1.1666666666666668e-06,
"loss": 2.66,
"step": 14
},
{
"epoch": 1.8823529411764706,
"grad_norm": 2.948481321334839,
"learning_rate": 1.3333333333333334e-06,
"loss": 2.3733,
"step": 16
},
{
"epoch": 2.0,
"eval_loss": 2.4775068759918213,
"eval_runtime": 31.8538,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 17
},
{
"epoch": 2.1176470588235294,
"grad_norm": 2.4443531036376953,
"learning_rate": 1.5e-06,
"loss": 2.422,
"step": 18
},
{
"epoch": 2.3529411764705883,
"grad_norm": 2.2713582515716553,
"learning_rate": 1.6666666666666667e-06,
"loss": 2.3315,
"step": 20
},
{
"epoch": 2.588235294117647,
"grad_norm": 2.6579792499542236,
"learning_rate": 1.8333333333333333e-06,
"loss": 2.5587,
"step": 22
},
{
"epoch": 2.8235294117647056,
"grad_norm": 2.5377650260925293,
"learning_rate": 2.0000000000000003e-06,
"loss": 2.46,
"step": 24
},
{
"epoch": 2.9411764705882355,
"eval_loss": 2.4217796325683594,
"eval_runtime": 31.8528,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 25
},
{
"epoch": 3.0588235294117645,
"grad_norm": 2.9668917655944824,
"learning_rate": 2.166666666666667e-06,
"loss": 2.5217,
"step": 26
},
{
"epoch": 3.2941176470588234,
"grad_norm": 2.460824728012085,
"learning_rate": 2.3333333333333336e-06,
"loss": 2.2339,
"step": 28
},
{
"epoch": 3.5294117647058822,
"grad_norm": 3.505033493041992,
"learning_rate": 2.5e-06,
"loss": 2.4788,
"step": 30
},
{
"epoch": 3.764705882352941,
"grad_norm": 2.1192469596862793,
"learning_rate": 2.666666666666667e-06,
"loss": 2.2748,
"step": 32
},
{
"epoch": 4.0,
"grad_norm": 2.7211689949035645,
"learning_rate": 2.8333333333333335e-06,
"loss": 2.4585,
"step": 34
},
{
"epoch": 4.0,
"eval_loss": 2.3277575969696045,
"eval_runtime": 31.8566,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 34
},
{
"epoch": 4.235294117647059,
"grad_norm": 2.7326908111572266,
"learning_rate": 3e-06,
"loss": 2.3561,
"step": 36
},
{
"epoch": 4.470588235294118,
"grad_norm": 2.5006515979766846,
"learning_rate": 3.1666666666666667e-06,
"loss": 2.1582,
"step": 38
},
{
"epoch": 4.705882352941177,
"grad_norm": 2.728483200073242,
"learning_rate": 3.3333333333333333e-06,
"loss": 2.2463,
"step": 40
},
{
"epoch": 4.9411764705882355,
"grad_norm": 3.0043787956237793,
"learning_rate": 3.5e-06,
"loss": 2.2624,
"step": 42
},
{
"epoch": 4.9411764705882355,
"eval_loss": 2.191901683807373,
"eval_runtime": 31.8501,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 42
},
{
"epoch": 5.176470588235294,
"grad_norm": 2.0914738178253174,
"learning_rate": 3.6666666666666666e-06,
"loss": 2.0377,
"step": 44
},
{
"epoch": 5.411764705882353,
"grad_norm": 3.9215309619903564,
"learning_rate": 3.833333333333334e-06,
"loss": 2.3093,
"step": 46
},
{
"epoch": 5.647058823529412,
"grad_norm": 2.7781357765197754,
"learning_rate": 4.000000000000001e-06,
"loss": 2.0491,
"step": 48
},
{
"epoch": 5.882352941176471,
"grad_norm": 2.7123777866363525,
"learning_rate": 4.166666666666667e-06,
"loss": 2.0553,
"step": 50
},
{
"epoch": 6.0,
"eval_loss": 1.9704445600509644,
"eval_runtime": 31.8507,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 51
},
{
"epoch": 6.117647058823529,
"grad_norm": 3.2437472343444824,
"learning_rate": 4.333333333333334e-06,
"loss": 1.9741,
"step": 52
},
{
"epoch": 6.352941176470588,
"grad_norm": 2.4424147605895996,
"learning_rate": 4.5e-06,
"loss": 1.8914,
"step": 54
},
{
"epoch": 6.588235294117647,
"grad_norm": 2.605942964553833,
"learning_rate": 4.666666666666667e-06,
"loss": 1.8738,
"step": 56
},
{
"epoch": 6.823529411764706,
"grad_norm": 2.119314432144165,
"learning_rate": 4.833333333333333e-06,
"loss": 1.7403,
"step": 58
},
{
"epoch": 6.9411764705882355,
"eval_loss": 1.7065722942352295,
"eval_runtime": 31.853,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 59
},
{
"epoch": 7.0588235294117645,
"grad_norm": 1.8015623092651367,
"learning_rate": 5e-06,
"loss": 1.6225,
"step": 60
},
{
"epoch": 7.294117647058823,
"grad_norm": 2.412097215652466,
"learning_rate": 5.1666666666666675e-06,
"loss": 1.6174,
"step": 62
},
{
"epoch": 7.529411764705882,
"grad_norm": 2.3760602474212646,
"learning_rate": 5.333333333333334e-06,
"loss": 1.5403,
"step": 64
},
{
"epoch": 7.764705882352941,
"grad_norm": 2.5603954792022705,
"learning_rate": 5.500000000000001e-06,
"loss": 1.4378,
"step": 66
},
{
"epoch": 8.0,
"grad_norm": 2.450331211090088,
"learning_rate": 5.666666666666667e-06,
"loss": 1.3756,
"step": 68
},
{
"epoch": 8.0,
"eval_loss": 1.3616687059402466,
"eval_runtime": 31.8467,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 68
},
{
"epoch": 8.235294117647058,
"grad_norm": 1.7808512449264526,
"learning_rate": 5.833333333333334e-06,
"loss": 1.2903,
"step": 70
},
{
"epoch": 8.470588235294118,
"grad_norm": 2.280879497528076,
"learning_rate": 6e-06,
"loss": 1.2624,
"step": 72
},
{
"epoch": 8.705882352941176,
"grad_norm": 2.584132671356201,
"learning_rate": 6.166666666666667e-06,
"loss": 1.0856,
"step": 74
},
{
"epoch": 8.941176470588236,
"grad_norm": 1.6824058294296265,
"learning_rate": 6.333333333333333e-06,
"loss": 1.11,
"step": 76
},
{
"epoch": 8.941176470588236,
"eval_loss": 1.0612744092941284,
"eval_runtime": 31.847,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 76
},
{
"epoch": 9.176470588235293,
"grad_norm": 1.8082739114761353,
"learning_rate": 6.5000000000000004e-06,
"loss": 0.9939,
"step": 78
},
{
"epoch": 9.411764705882353,
"grad_norm": 1.7640292644500732,
"learning_rate": 6.666666666666667e-06,
"loss": 0.9378,
"step": 80
},
{
"epoch": 9.647058823529411,
"grad_norm": 1.609857201576233,
"learning_rate": 6.833333333333334e-06,
"loss": 0.7986,
"step": 82
},
{
"epoch": 9.882352941176471,
"grad_norm": 1.5625256299972534,
"learning_rate": 7e-06,
"loss": 0.7161,
"step": 84
},
{
"epoch": 10.0,
"eval_loss": 0.7772443890571594,
"eval_runtime": 31.8458,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 85
},
{
"epoch": 10.117647058823529,
"grad_norm": 1.2514125108718872,
"learning_rate": 7.166666666666667e-06,
"loss": 0.7059,
"step": 86
},
{
"epoch": 10.352941176470589,
"grad_norm": 0.9835730195045471,
"learning_rate": 7.333333333333333e-06,
"loss": 0.5005,
"step": 88
},
{
"epoch": 10.588235294117647,
"grad_norm": 0.8222648501396179,
"learning_rate": 7.500000000000001e-06,
"loss": 0.7141,
"step": 90
},
{
"epoch": 10.823529411764707,
"grad_norm": 1.2220810651779175,
"learning_rate": 7.666666666666667e-06,
"loss": 0.7609,
"step": 92
},
{
"epoch": 10.941176470588236,
"eval_loss": 0.6786696910858154,
"eval_runtime": 31.8483,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 93
},
{
"epoch": 11.058823529411764,
"grad_norm": 0.6009970307350159,
"learning_rate": 7.833333333333333e-06,
"loss": 0.6874,
"step": 94
},
{
"epoch": 11.294117647058824,
"grad_norm": 0.7795246243476868,
"learning_rate": 8.000000000000001e-06,
"loss": 0.7462,
"step": 96
},
{
"epoch": 11.529411764705882,
"grad_norm": 0.6197288036346436,
"learning_rate": 8.166666666666668e-06,
"loss": 0.6408,
"step": 98
},
{
"epoch": 11.764705882352942,
"grad_norm": 0.6273565292358398,
"learning_rate": 8.333333333333334e-06,
"loss": 0.4579,
"step": 100
},
{
"epoch": 12.0,
"grad_norm": 0.45476672053337097,
"learning_rate": 8.5e-06,
"loss": 0.4358,
"step": 102
},
{
"epoch": 12.0,
"eval_loss": 0.6182084083557129,
"eval_runtime": 31.8454,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 102
},
{
"epoch": 12.235294117647058,
"grad_norm": 0.7051817774772644,
"learning_rate": 8.666666666666668e-06,
"loss": 0.6431,
"step": 104
},
{
"epoch": 12.470588235294118,
"grad_norm": 0.5281614065170288,
"learning_rate": 8.833333333333334e-06,
"loss": 0.518,
"step": 106
},
{
"epoch": 12.705882352941176,
"grad_norm": 0.5485586524009705,
"learning_rate": 9e-06,
"loss": 0.4787,
"step": 108
},
{
"epoch": 12.941176470588236,
"grad_norm": 0.9739407300949097,
"learning_rate": 9.166666666666666e-06,
"loss": 0.4774,
"step": 110
},
{
"epoch": 12.941176470588236,
"eval_loss": 0.5912042260169983,
"eval_runtime": 31.8561,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 110
},
{
"epoch": 13.176470588235293,
"grad_norm": 0.3852427899837494,
"learning_rate": 9.333333333333334e-06,
"loss": 0.476,
"step": 112
},
{
"epoch": 13.411764705882353,
"grad_norm": 0.3773607909679413,
"learning_rate": 9.5e-06,
"loss": 0.4432,
"step": 114
},
{
"epoch": 13.647058823529411,
"grad_norm": 0.5833092331886292,
"learning_rate": 9.666666666666667e-06,
"loss": 0.4677,
"step": 116
},
{
"epoch": 13.882352941176471,
"grad_norm": 0.525675356388092,
"learning_rate": 9.833333333333333e-06,
"loss": 0.5569,
"step": 118
},
{
"epoch": 14.0,
"eval_loss": 0.5746464133262634,
"eval_runtime": 31.8497,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 119
},
{
"epoch": 14.117647058823529,
"grad_norm": 0.46034735441207886,
"learning_rate": 1e-05,
"loss": 0.6595,
"step": 120
},
{
"epoch": 14.352941176470589,
"grad_norm": 0.9925610423088074,
"learning_rate": 9.999915384288723e-06,
"loss": 0.5066,
"step": 122
},
{
"epoch": 14.588235294117647,
"grad_norm": 0.3283170163631439,
"learning_rate": 9.999661540018812e-06,
"loss": 0.5017,
"step": 124
},
{
"epoch": 14.823529411764707,
"grad_norm": 0.4828791320323944,
"learning_rate": 9.999238475781957e-06,
"loss": 0.427,
"step": 126
},
{
"epoch": 14.941176470588236,
"eval_loss": 0.5487004518508911,
"eval_runtime": 31.8495,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 127
},
{
"epoch": 15.058823529411764,
"grad_norm": 0.44912636280059814,
"learning_rate": 9.99864620589731e-06,
"loss": 0.4705,
"step": 128
},
{
"epoch": 15.294117647058824,
"grad_norm": 0.43985074758529663,
"learning_rate": 9.997884750411004e-06,
"loss": 0.3865,
"step": 130
},
{
"epoch": 15.529411764705882,
"grad_norm": 1.4928995370864868,
"learning_rate": 9.99695413509548e-06,
"loss": 0.483,
"step": 132
},
{
"epoch": 15.764705882352942,
"grad_norm": 0.3390346169471741,
"learning_rate": 9.995854391448607e-06,
"loss": 0.5068,
"step": 134
},
{
"epoch": 16.0,
"grad_norm": 0.33858582377433777,
"learning_rate": 9.994585556692624e-06,
"loss": 0.4672,
"step": 136
},
{
"epoch": 16.0,
"eval_loss": 0.533892035484314,
"eval_runtime": 31.8489,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 136
},
{
"epoch": 16.235294117647058,
"grad_norm": 0.3130759298801422,
"learning_rate": 9.993147673772869e-06,
"loss": 0.3805,
"step": 138
},
{
"epoch": 16.470588235294116,
"grad_norm": 0.32997503876686096,
"learning_rate": 9.991540791356342e-06,
"loss": 0.6362,
"step": 140
},
{
"epoch": 16.705882352941178,
"grad_norm": 1.398141622543335,
"learning_rate": 9.989764963830038e-06,
"loss": 0.4943,
"step": 142
},
{
"epoch": 16.941176470588236,
"grad_norm": 0.19720911979675293,
"learning_rate": 9.987820251299121e-06,
"loss": 0.3495,
"step": 144
},
{
"epoch": 16.941176470588236,
"eval_loss": 0.552502453327179,
"eval_runtime": 31.8645,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 144
},
{
"epoch": 17.176470588235293,
"grad_norm": 0.21927881240844727,
"learning_rate": 9.985706719584888e-06,
"loss": 0.4342,
"step": 146
},
{
"epoch": 17.41176470588235,
"grad_norm": 0.2400996834039688,
"learning_rate": 9.98342444022253e-06,
"loss": 0.4801,
"step": 148
},
{
"epoch": 17.647058823529413,
"grad_norm": 0.2719495892524719,
"learning_rate": 9.980973490458728e-06,
"loss": 0.4199,
"step": 150
},
{
"epoch": 17.88235294117647,
"grad_norm": 0.18624863028526306,
"learning_rate": 9.978353953249023e-06,
"loss": 0.4731,
"step": 152
},
{
"epoch": 18.0,
"eval_loss": 0.5323101878166199,
"eval_runtime": 31.8439,
"eval_samples_per_second": 1.131,
"eval_steps_per_second": 1.131,
"step": 153
},
{
"epoch": 18.11764705882353,
"grad_norm": 0.18268544971942902,
"learning_rate": 9.975565917255017e-06,
"loss": 0.3459,
"step": 154
},
{
"epoch": 18.352941176470587,
"grad_norm": 0.30436864495277405,
"learning_rate": 9.972609476841368e-06,
"loss": 0.58,
"step": 156
},
{
"epoch": 18.58823529411765,
"grad_norm": 0.2947259843349457,
"learning_rate": 9.9694847320726e-06,
"loss": 0.4085,
"step": 158
},
{
"epoch": 18.823529411764707,
"grad_norm": 0.24882009625434875,
"learning_rate": 9.966191788709716e-06,
"loss": 0.3913,
"step": 160
},
{
"epoch": 18.941176470588236,
"eval_loss": 0.5243125557899475,
"eval_runtime": 31.8447,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 161
},
{
"epoch": 19.058823529411764,
"grad_norm": 0.31586578488349915,
"learning_rate": 9.962730758206612e-06,
"loss": 0.5022,
"step": 162
},
{
"epoch": 19.294117647058822,
"grad_norm": 0.21429723501205444,
"learning_rate": 9.959101757706308e-06,
"loss": 0.3682,
"step": 164
},
{
"epoch": 19.529411764705884,
"grad_norm": 0.29345616698265076,
"learning_rate": 9.955304910036993e-06,
"loss": 0.4185,
"step": 166
},
{
"epoch": 19.764705882352942,
"grad_norm": 0.24088309705257416,
"learning_rate": 9.951340343707852e-06,
"loss": 0.4027,
"step": 168
},
{
"epoch": 20.0,
"grad_norm": 0.3225376307964325,
"learning_rate": 9.947208192904722e-06,
"loss": 0.5624,
"step": 170
},
{
"epoch": 20.0,
"eval_loss": 0.5252914428710938,
"eval_runtime": 31.8447,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 170
},
{
"epoch": 20.235294117647058,
"grad_norm": 0.24431054294109344,
"learning_rate": 9.942908597485558e-06,
"loss": 0.3565,
"step": 172
},
{
"epoch": 20.470588235294116,
"grad_norm": 0.30302417278289795,
"learning_rate": 9.938441702975689e-06,
"loss": 0.4944,
"step": 174
},
{
"epoch": 20.705882352941178,
"grad_norm": 0.25407275557518005,
"learning_rate": 9.933807660562898e-06,
"loss": 0.3397,
"step": 176
},
{
"epoch": 20.941176470588236,
"grad_norm": 0.3202497363090515,
"learning_rate": 9.929006627092298e-06,
"loss": 0.4684,
"step": 178
},
{
"epoch": 20.941176470588236,
"eval_loss": 0.5222189426422119,
"eval_runtime": 31.8492,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 178
},
{
"epoch": 21.176470588235293,
"grad_norm": 0.2713940441608429,
"learning_rate": 9.924038765061042e-06,
"loss": 0.5016,
"step": 180
},
{
"epoch": 21.41176470588235,
"grad_norm": 0.2259199619293213,
"learning_rate": 9.918904242612794e-06,
"loss": 0.5315,
"step": 182
},
{
"epoch": 21.647058823529413,
"grad_norm": 0.2454088032245636,
"learning_rate": 9.913603233532067e-06,
"loss": 0.474,
"step": 184
},
{
"epoch": 21.88235294117647,
"grad_norm": 0.18309438228607178,
"learning_rate": 9.908135917238321e-06,
"loss": 0.3029,
"step": 186
},
{
"epoch": 22.0,
"eval_loss": 0.5099583864212036,
"eval_runtime": 31.8371,
"eval_samples_per_second": 1.131,
"eval_steps_per_second": 1.131,
"step": 187
},
{
"epoch": 22.11764705882353,
"grad_norm": 0.3067823648452759,
"learning_rate": 9.902502478779897e-06,
"loss": 0.3838,
"step": 188
},
{
"epoch": 22.352941176470587,
"grad_norm": 0.2594364881515503,
"learning_rate": 9.896703108827758e-06,
"loss": 0.48,
"step": 190
},
{
"epoch": 22.58823529411765,
"grad_norm": 0.27693891525268555,
"learning_rate": 9.890738003669029e-06,
"loss": 0.4363,
"step": 192
},
{
"epoch": 22.823529411764707,
"grad_norm": 0.304267019033432,
"learning_rate": 9.884607365200355e-06,
"loss": 0.3522,
"step": 194
},
{
"epoch": 22.941176470588236,
"eval_loss": 0.5085048079490662,
"eval_runtime": 31.8502,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 195
},
{
"epoch": 23.058823529411764,
"grad_norm": 0.17085126042366028,
"learning_rate": 9.878311400921072e-06,
"loss": 0.2722,
"step": 196
},
{
"epoch": 23.294117647058822,
"grad_norm": 0.4169885218143463,
"learning_rate": 9.871850323926178e-06,
"loss": 0.4118,
"step": 198
},
{
"epoch": 23.529411764705884,
"grad_norm": 0.25028589367866516,
"learning_rate": 9.86522435289912e-06,
"loss": 0.3777,
"step": 200
},
{
"epoch": 23.764705882352942,
"grad_norm": 0.3203675150871277,
"learning_rate": 9.858433712104403e-06,
"loss": 0.5195,
"step": 202
},
{
"epoch": 24.0,
"grad_norm": 0.3470858931541443,
"learning_rate": 9.851478631379982e-06,
"loss": 0.3855,
"step": 204
},
{
"epoch": 24.0,
"eval_loss": 0.49713531136512756,
"eval_runtime": 31.8498,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 204
},
{
"epoch": 24.235294117647058,
"grad_norm": 0.3214435875415802,
"learning_rate": 9.844359346129504e-06,
"loss": 0.476,
"step": 206
},
{
"epoch": 24.470588235294116,
"grad_norm": 0.3332065939903259,
"learning_rate": 9.83707609731432e-06,
"loss": 0.4383,
"step": 208
},
{
"epoch": 24.705882352941178,
"grad_norm": 0.22289708256721497,
"learning_rate": 9.829629131445342e-06,
"loss": 0.3256,
"step": 210
},
{
"epoch": 24.941176470588236,
"grad_norm": 0.25234198570251465,
"learning_rate": 9.822018700574696e-06,
"loss": 0.317,
"step": 212
},
{
"epoch": 24.941176470588236,
"eval_loss": 0.504925549030304,
"eval_runtime": 31.8487,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 212
},
{
"epoch": 25.176470588235293,
"grad_norm": 0.3678690493106842,
"learning_rate": 9.81424506228719e-06,
"loss": 0.4398,
"step": 214
},
{
"epoch": 25.41176470588235,
"grad_norm": 0.31491589546203613,
"learning_rate": 9.806308479691595e-06,
"loss": 0.3335,
"step": 216
},
{
"epoch": 25.647058823529413,
"grad_norm": 0.3429551124572754,
"learning_rate": 9.798209221411748e-06,
"loss": 0.4467,
"step": 218
},
{
"epoch": 25.88235294117647,
"grad_norm": 0.30439844727516174,
"learning_rate": 9.789947561577445e-06,
"loss": 0.338,
"step": 220
},
{
"epoch": 26.0,
"eval_loss": 0.5015532970428467,
"eval_runtime": 31.8415,
"eval_samples_per_second": 1.131,
"eval_steps_per_second": 1.131,
"step": 221
},
{
"epoch": 26.11764705882353,
"grad_norm": 0.2530220150947571,
"learning_rate": 9.781523779815178e-06,
"loss": 0.3514,
"step": 222
},
{
"epoch": 26.352941176470587,
"grad_norm": 0.2835986018180847,
"learning_rate": 9.77293816123866e-06,
"loss": 0.3022,
"step": 224
},
{
"epoch": 26.58823529411765,
"grad_norm": 0.3710886538028717,
"learning_rate": 9.764190996439181e-06,
"loss": 0.4567,
"step": 226
},
{
"epoch": 26.823529411764707,
"grad_norm": 0.3602848947048187,
"learning_rate": 9.755282581475769e-06,
"loss": 0.391,
"step": 228
},
{
"epoch": 26.941176470588236,
"eval_loss": 0.49422508478164673,
"eval_runtime": 31.8515,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 229
},
{
"epoch": 27.058823529411764,
"grad_norm": 0.3394179940223694,
"learning_rate": 9.74621321786517e-06,
"loss": 0.3986,
"step": 230
},
{
"epoch": 27.294117647058822,
"grad_norm": 0.4131983518600464,
"learning_rate": 9.736983212571646e-06,
"loss": 0.359,
"step": 232
},
{
"epoch": 27.529411764705884,
"grad_norm": 0.35208216309547424,
"learning_rate": 9.727592877996585e-06,
"loss": 0.2981,
"step": 234
},
{
"epoch": 27.764705882352942,
"grad_norm": 0.42573440074920654,
"learning_rate": 9.718042531967918e-06,
"loss": 0.4258,
"step": 236
},
{
"epoch": 28.0,
"grad_norm": 0.3431633412837982,
"learning_rate": 9.708332497729378e-06,
"loss": 0.3964,
"step": 238
},
{
"epoch": 28.0,
"eval_loss": 0.5010454058647156,
"eval_runtime": 31.8416,
"eval_samples_per_second": 1.131,
"eval_steps_per_second": 1.131,
"step": 238
},
{
"epoch": 28.235294117647058,
"grad_norm": 0.31860944628715515,
"learning_rate": 9.698463103929542e-06,
"loss": 0.2962,
"step": 240
},
{
"epoch": 28.470588235294116,
"grad_norm": 0.34731313586235046,
"learning_rate": 9.688434684610725e-06,
"loss": 0.3673,
"step": 242
},
{
"epoch": 28.705882352941178,
"grad_norm": 0.36615273356437683,
"learning_rate": 9.678247579197658e-06,
"loss": 0.5272,
"step": 244
},
{
"epoch": 28.941176470588236,
"grad_norm": 0.2973138689994812,
"learning_rate": 9.667902132486009e-06,
"loss": 0.2951,
"step": 246
},
{
"epoch": 28.941176470588236,
"eval_loss": 0.509841799736023,
"eval_runtime": 31.843,
"eval_samples_per_second": 1.131,
"eval_steps_per_second": 1.131,
"step": 246
},
{
"epoch": 29.176470588235293,
"grad_norm": 0.3237411379814148,
"learning_rate": 9.657398694630713e-06,
"loss": 0.2387,
"step": 248
},
{
"epoch": 29.41176470588235,
"grad_norm": 0.31316205859184265,
"learning_rate": 9.646737621134112e-06,
"loss": 0.3701,
"step": 250
},
{
"epoch": 29.647058823529413,
"grad_norm": 0.41230136156082153,
"learning_rate": 9.635919272833938e-06,
"loss": 0.4157,
"step": 252
},
{
"epoch": 29.88235294117647,
"grad_norm": 0.4581688940525055,
"learning_rate": 9.62494401589108e-06,
"loss": 0.4021,
"step": 254
},
{
"epoch": 30.0,
"eval_loss": 0.5068490505218506,
"eval_runtime": 31.8451,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 255
},
{
"epoch": 30.11764705882353,
"grad_norm": 0.3983169198036194,
"learning_rate": 9.613812221777212e-06,
"loss": 0.3278,
"step": 256
},
{
"epoch": 30.352941176470587,
"grad_norm": 0.26158151030540466,
"learning_rate": 9.602524267262202e-06,
"loss": 0.1625,
"step": 258
},
{
"epoch": 30.58823529411765,
"grad_norm": 0.4597890377044678,
"learning_rate": 9.591080534401371e-06,
"loss": 0.4323,
"step": 260
},
{
"epoch": 30.823529411764707,
"grad_norm": 0.4650469422340393,
"learning_rate": 9.579481410522556e-06,
"loss": 0.4021,
"step": 262
},
{
"epoch": 30.941176470588236,
"eval_loss": 0.507030189037323,
"eval_runtime": 31.8372,
"eval_samples_per_second": 1.131,
"eval_steps_per_second": 1.131,
"step": 263
},
{
"epoch": 31.058823529411764,
"grad_norm": 0.41834914684295654,
"learning_rate": 9.567727288213005e-06,
"loss": 0.3066,
"step": 264
},
{
"epoch": 31.294117647058822,
"grad_norm": 0.40843620896339417,
"learning_rate": 9.555818565306086e-06,
"loss": 0.288,
"step": 266
},
{
"epoch": 31.529411764705884,
"grad_norm": 0.3444281816482544,
"learning_rate": 9.543755644867823e-06,
"loss": 0.3391,
"step": 268
},
{
"epoch": 31.764705882352942,
"grad_norm": 0.42617908120155334,
"learning_rate": 9.531538935183252e-06,
"loss": 0.396,
"step": 270
},
{
"epoch": 32.0,
"grad_norm": 0.47771233320236206,
"learning_rate": 9.519168849742603e-06,
"loss": 0.3456,
"step": 272
},
{
"epoch": 32.0,
"eval_loss": 0.5024969577789307,
"eval_runtime": 31.841,
"eval_samples_per_second": 1.131,
"eval_steps_per_second": 1.131,
"step": 272
},
{
"epoch": 32.23529411764706,
"grad_norm": 0.5327393412590027,
"learning_rate": 9.506645807227311e-06,
"loss": 0.3042,
"step": 274
},
{
"epoch": 32.470588235294116,
"grad_norm": 0.35559046268463135,
"learning_rate": 9.493970231495836e-06,
"loss": 0.386,
"step": 276
},
{
"epoch": 32.705882352941174,
"grad_norm": 0.3480581045150757,
"learning_rate": 9.481142551569318e-06,
"loss": 0.2265,
"step": 278
},
{
"epoch": 32.94117647058823,
"grad_norm": 0.5482520461082458,
"learning_rate": 9.468163201617063e-06,
"loss": 0.4431,
"step": 280
},
{
"epoch": 32.94117647058823,
"eval_loss": 0.5049856901168823,
"eval_runtime": 31.8427,
"eval_samples_per_second": 1.131,
"eval_steps_per_second": 1.131,
"step": 280
},
{
"epoch": 33.1764705882353,
"grad_norm": 0.3856636583805084,
"learning_rate": 9.45503262094184e-06,
"loss": 0.1491,
"step": 282
},
{
"epoch": 33.411764705882355,
"grad_norm": 0.5228236317634583,
"learning_rate": 9.441751253965022e-06,
"loss": 0.3196,
"step": 284
},
{
"epoch": 33.64705882352941,
"grad_norm": 0.38300809264183044,
"learning_rate": 9.428319550211531e-06,
"loss": 0.2612,
"step": 286
},
{
"epoch": 33.88235294117647,
"grad_norm": 0.5598320364952087,
"learning_rate": 9.414737964294636e-06,
"loss": 0.4131,
"step": 288
},
{
"epoch": 34.0,
"eval_loss": 0.5093716382980347,
"eval_runtime": 31.8401,
"eval_samples_per_second": 1.131,
"eval_steps_per_second": 1.131,
"step": 289
},
{
"epoch": 34.0,
"step": 289,
"total_flos": 3.6265283397720474e+17,
"train_loss": 0.8757064017663777,
"train_runtime": 13296.8495,
"train_samples_per_second": 1.534,
"train_steps_per_second": 0.09
}
],
"logging_steps": 2,
"max_steps": 1200,
"num_input_tokens_seen": 0,
"num_train_epochs": 150,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 7,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.6265283397720474e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}