|
{ |
|
"best_metric": 1.0, |
|
"best_model_checkpoint": "bridalMakeupClassifier_binary/checkpoint-414", |
|
"epoch": 20.0, |
|
"eval_steps": 500, |
|
"global_step": 460, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.21739130434782608, |
|
"grad_norm": 7.482993125915527, |
|
"learning_rate": 5.4347826086956525e-06, |
|
"loss": 0.7413, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.43478260869565216, |
|
"grad_norm": 5.045105934143066, |
|
"learning_rate": 1.0869565217391305e-05, |
|
"loss": 0.6285, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.6521739130434783, |
|
"grad_norm": 3.057001829147339, |
|
"learning_rate": 1.630434782608696e-05, |
|
"loss": 0.4579, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.8695652173913043, |
|
"grad_norm": 2.394951581954956, |
|
"learning_rate": 2.173913043478261e-05, |
|
"loss": 0.2966, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.9661538461538461, |
|
"eval_f1": 0.9378531073446328, |
|
"eval_loss": 0.12898202240467072, |
|
"eval_precision": 0.9431818181818182, |
|
"eval_recall": 0.9325842696629213, |
|
"eval_runtime": 0.6116, |
|
"eval_samples_per_second": 531.418, |
|
"eval_steps_per_second": 17.986, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.0869565217391304, |
|
"grad_norm": 2.818490982055664, |
|
"learning_rate": 2.7173913043478262e-05, |
|
"loss": 0.1788, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.3043478260869565, |
|
"grad_norm": 2.19661545753479, |
|
"learning_rate": 3.260869565217392e-05, |
|
"loss": 0.1157, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.5217391304347827, |
|
"grad_norm": 1.980846643447876, |
|
"learning_rate": 3.804347826086957e-05, |
|
"loss": 0.077, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.7391304347826086, |
|
"grad_norm": 1.7503933906555176, |
|
"learning_rate": 4.347826086956522e-05, |
|
"loss": 0.076, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.9565217391304348, |
|
"grad_norm": 4.313005447387695, |
|
"learning_rate": 4.891304347826087e-05, |
|
"loss": 0.1233, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9876923076923076, |
|
"eval_f1": 0.9777777777777779, |
|
"eval_loss": 0.04074087738990784, |
|
"eval_precision": 0.967032967032967, |
|
"eval_recall": 0.9887640449438202, |
|
"eval_runtime": 0.614, |
|
"eval_samples_per_second": 529.309, |
|
"eval_steps_per_second": 17.915, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 2.1739130434782608, |
|
"grad_norm": 2.5787644386291504, |
|
"learning_rate": 4.9516908212560386e-05, |
|
"loss": 0.0834, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.391304347826087, |
|
"grad_norm": 2.1645395755767822, |
|
"learning_rate": 4.891304347826087e-05, |
|
"loss": 0.0538, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 2.608695652173913, |
|
"grad_norm": 2.0289194583892822, |
|
"learning_rate": 4.830917874396135e-05, |
|
"loss": 0.0749, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.8260869565217392, |
|
"grad_norm": 1.7418752908706665, |
|
"learning_rate": 4.770531400966184e-05, |
|
"loss": 0.0469, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9815384615384616, |
|
"eval_f1": 0.967391304347826, |
|
"eval_loss": 0.059426549822092056, |
|
"eval_precision": 0.9368421052631579, |
|
"eval_recall": 1.0, |
|
"eval_runtime": 0.6222, |
|
"eval_samples_per_second": 522.372, |
|
"eval_steps_per_second": 17.68, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 3.0434782608695654, |
|
"grad_norm": 4.78839635848999, |
|
"learning_rate": 4.710144927536232e-05, |
|
"loss": 0.0729, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 3.260869565217391, |
|
"grad_norm": 2.100724697113037, |
|
"learning_rate": 4.64975845410628e-05, |
|
"loss": 0.0588, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 3.4782608695652173, |
|
"grad_norm": 2.4715771675109863, |
|
"learning_rate": 4.589371980676328e-05, |
|
"loss": 0.0537, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 3.6956521739130435, |
|
"grad_norm": 2.045220375061035, |
|
"learning_rate": 4.528985507246377e-05, |
|
"loss": 0.0451, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 3.9130434782608696, |
|
"grad_norm": 2.7747068405151367, |
|
"learning_rate": 4.4685990338164255e-05, |
|
"loss": 0.0394, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9876923076923076, |
|
"eval_f1": 0.9777777777777779, |
|
"eval_loss": 0.055703382939100266, |
|
"eval_precision": 0.967032967032967, |
|
"eval_recall": 0.9887640449438202, |
|
"eval_runtime": 0.6278, |
|
"eval_samples_per_second": 517.661, |
|
"eval_steps_per_second": 17.521, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 4.130434782608695, |
|
"grad_norm": 1.9641977548599243, |
|
"learning_rate": 4.408212560386474e-05, |
|
"loss": 0.0728, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 4.3478260869565215, |
|
"grad_norm": 3.0312821865081787, |
|
"learning_rate": 4.347826086956522e-05, |
|
"loss": 0.0611, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 4.565217391304348, |
|
"grad_norm": 2.8319077491760254, |
|
"learning_rate": 4.2874396135265707e-05, |
|
"loss": 0.0467, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 4.782608695652174, |
|
"grad_norm": 0.7883936762809753, |
|
"learning_rate": 4.2270531400966186e-05, |
|
"loss": 0.0433, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 3.4693968296051025, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.0909, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.9907692307692307, |
|
"eval_f1": 0.9834254143646408, |
|
"eval_loss": 0.04008618742227554, |
|
"eval_precision": 0.967391304347826, |
|
"eval_recall": 1.0, |
|
"eval_runtime": 0.6227, |
|
"eval_samples_per_second": 521.945, |
|
"eval_steps_per_second": 17.666, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 5.217391304347826, |
|
"grad_norm": 2.560603618621826, |
|
"learning_rate": 4.106280193236715e-05, |
|
"loss": 0.0683, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 5.434782608695652, |
|
"grad_norm": 1.258954644203186, |
|
"learning_rate": 4.045893719806764e-05, |
|
"loss": 0.045, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 5.6521739130434785, |
|
"grad_norm": 1.6040618419647217, |
|
"learning_rate": 3.985507246376812e-05, |
|
"loss": 0.0334, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 5.869565217391305, |
|
"grad_norm": 2.980376720428467, |
|
"learning_rate": 3.92512077294686e-05, |
|
"loss": 0.05, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.9876923076923076, |
|
"eval_f1": 0.9777777777777779, |
|
"eval_loss": 0.025248104706406593, |
|
"eval_precision": 0.967032967032967, |
|
"eval_recall": 0.9887640449438202, |
|
"eval_runtime": 0.6255, |
|
"eval_samples_per_second": 519.608, |
|
"eval_steps_per_second": 17.587, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 6.086956521739131, |
|
"grad_norm": 1.547420859336853, |
|
"learning_rate": 3.864734299516908e-05, |
|
"loss": 0.0215, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 6.304347826086957, |
|
"grad_norm": 1.0269052982330322, |
|
"learning_rate": 3.804347826086957e-05, |
|
"loss": 0.0513, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 6.521739130434782, |
|
"grad_norm": 1.4050897359848022, |
|
"learning_rate": 3.743961352657005e-05, |
|
"loss": 0.0324, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 6.739130434782608, |
|
"grad_norm": 2.9087464809417725, |
|
"learning_rate": 3.6835748792270534e-05, |
|
"loss": 0.0246, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 6.956521739130435, |
|
"grad_norm": 2.945462942123413, |
|
"learning_rate": 3.6231884057971014e-05, |
|
"loss": 0.0451, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.9876923076923076, |
|
"eval_f1": 0.9772727272727273, |
|
"eval_loss": 0.02794536016881466, |
|
"eval_precision": 0.9885057471264368, |
|
"eval_recall": 0.9662921348314607, |
|
"eval_runtime": 0.638, |
|
"eval_samples_per_second": 509.374, |
|
"eval_steps_per_second": 17.24, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 7.173913043478261, |
|
"grad_norm": 0.28256598114967346, |
|
"learning_rate": 3.56280193236715e-05, |
|
"loss": 0.0499, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 7.391304347826087, |
|
"grad_norm": 2.3740286827087402, |
|
"learning_rate": 3.502415458937198e-05, |
|
"loss": 0.0358, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 7.608695652173913, |
|
"grad_norm": 1.025823950767517, |
|
"learning_rate": 3.4420289855072465e-05, |
|
"loss": 0.0306, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 7.826086956521739, |
|
"grad_norm": 0.7179997563362122, |
|
"learning_rate": 3.381642512077295e-05, |
|
"loss": 0.0231, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.9938461538461538, |
|
"eval_f1": 0.9888888888888888, |
|
"eval_loss": 0.02782590501010418, |
|
"eval_precision": 0.978021978021978, |
|
"eval_recall": 1.0, |
|
"eval_runtime": 0.6231, |
|
"eval_samples_per_second": 521.596, |
|
"eval_steps_per_second": 17.654, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 8.043478260869565, |
|
"grad_norm": 0.5594707131385803, |
|
"learning_rate": 3.321256038647343e-05, |
|
"loss": 0.024, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 8.26086956521739, |
|
"grad_norm": 2.8453502655029297, |
|
"learning_rate": 3.260869565217392e-05, |
|
"loss": 0.03, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 8.478260869565217, |
|
"grad_norm": 1.5159822702407837, |
|
"learning_rate": 3.2004830917874396e-05, |
|
"loss": 0.0203, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 8.695652173913043, |
|
"grad_norm": 2.250065326690674, |
|
"learning_rate": 3.140096618357488e-05, |
|
"loss": 0.0282, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 8.91304347826087, |
|
"grad_norm": 3.3784470558166504, |
|
"learning_rate": 3.079710144927536e-05, |
|
"loss": 0.0404, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.9876923076923076, |
|
"eval_f1": 0.9775280898876404, |
|
"eval_loss": 0.025601999834179878, |
|
"eval_precision": 0.9775280898876404, |
|
"eval_recall": 0.9775280898876404, |
|
"eval_runtime": 0.6288, |
|
"eval_samples_per_second": 516.888, |
|
"eval_steps_per_second": 17.495, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 9.130434782608695, |
|
"grad_norm": 4.156078815460205, |
|
"learning_rate": 3.0193236714975848e-05, |
|
"loss": 0.0135, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 9.347826086956522, |
|
"grad_norm": 1.2355906963348389, |
|
"learning_rate": 2.9589371980676327e-05, |
|
"loss": 0.0377, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 9.565217391304348, |
|
"grad_norm": 4.165219783782959, |
|
"learning_rate": 2.8985507246376814e-05, |
|
"loss": 0.0354, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 9.782608695652174, |
|
"grad_norm": 1.0178390741348267, |
|
"learning_rate": 2.8381642512077293e-05, |
|
"loss": 0.0331, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 4.175224304199219, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.0297, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.9907692307692307, |
|
"eval_f1": 0.9832402234636872, |
|
"eval_loss": 0.02601032890379429, |
|
"eval_precision": 0.9777777777777777, |
|
"eval_recall": 0.9887640449438202, |
|
"eval_runtime": 0.6221, |
|
"eval_samples_per_second": 522.441, |
|
"eval_steps_per_second": 17.683, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 10.217391304347826, |
|
"grad_norm": 0.14615581929683685, |
|
"learning_rate": 2.7173913043478262e-05, |
|
"loss": 0.009, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 10.434782608695652, |
|
"grad_norm": 5.359668731689453, |
|
"learning_rate": 2.6570048309178748e-05, |
|
"loss": 0.0176, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 10.652173913043478, |
|
"grad_norm": 0.387389212846756, |
|
"learning_rate": 2.5966183574879227e-05, |
|
"loss": 0.0251, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 10.869565217391305, |
|
"grad_norm": 1.7574446201324463, |
|
"learning_rate": 2.5362318840579714e-05, |
|
"loss": 0.0327, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy": 0.9938461538461538, |
|
"eval_f1": 0.9888888888888888, |
|
"eval_loss": 0.02298262156546116, |
|
"eval_precision": 0.978021978021978, |
|
"eval_recall": 1.0, |
|
"eval_runtime": 0.63, |
|
"eval_samples_per_second": 515.887, |
|
"eval_steps_per_second": 17.461, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 11.08695652173913, |
|
"grad_norm": 1.124037504196167, |
|
"learning_rate": 2.4758454106280193e-05, |
|
"loss": 0.0397, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 11.304347826086957, |
|
"grad_norm": 2.474566698074341, |
|
"learning_rate": 2.4154589371980676e-05, |
|
"loss": 0.0427, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 11.521739130434783, |
|
"grad_norm": 1.6105717420578003, |
|
"learning_rate": 2.355072463768116e-05, |
|
"loss": 0.0157, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 11.73913043478261, |
|
"grad_norm": 0.6210281252861023, |
|
"learning_rate": 2.294685990338164e-05, |
|
"loss": 0.0297, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 11.956521739130435, |
|
"grad_norm": 2.6540639400482178, |
|
"learning_rate": 2.2342995169082127e-05, |
|
"loss": 0.0221, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.9969230769230769, |
|
"eval_f1": 0.9944134078212291, |
|
"eval_loss": 0.01398832444101572, |
|
"eval_precision": 0.9888888888888889, |
|
"eval_recall": 1.0, |
|
"eval_runtime": 0.6237, |
|
"eval_samples_per_second": 521.045, |
|
"eval_steps_per_second": 17.635, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 12.173913043478262, |
|
"grad_norm": 2.6183152198791504, |
|
"learning_rate": 2.173913043478261e-05, |
|
"loss": 0.0267, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 12.391304347826088, |
|
"grad_norm": 0.8627565503120422, |
|
"learning_rate": 2.1135265700483093e-05, |
|
"loss": 0.0195, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 12.608695652173914, |
|
"grad_norm": 0.576085090637207, |
|
"learning_rate": 2.0531400966183576e-05, |
|
"loss": 0.0322, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 12.826086956521738, |
|
"grad_norm": 3.6716318130493164, |
|
"learning_rate": 1.992753623188406e-05, |
|
"loss": 0.0294, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_accuracy": 0.9969230769230769, |
|
"eval_f1": 0.9944134078212291, |
|
"eval_loss": 0.01057437900453806, |
|
"eval_precision": 0.9888888888888889, |
|
"eval_recall": 1.0, |
|
"eval_runtime": 0.6324, |
|
"eval_samples_per_second": 513.948, |
|
"eval_steps_per_second": 17.395, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 13.043478260869565, |
|
"grad_norm": 0.19212637841701508, |
|
"learning_rate": 1.932367149758454e-05, |
|
"loss": 0.0298, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 13.26086956521739, |
|
"grad_norm": 1.9307483434677124, |
|
"learning_rate": 1.8719806763285024e-05, |
|
"loss": 0.0231, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 13.478260869565217, |
|
"grad_norm": 2.444979667663574, |
|
"learning_rate": 1.8115942028985507e-05, |
|
"loss": 0.0201, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 13.695652173913043, |
|
"grad_norm": 2.9044065475463867, |
|
"learning_rate": 1.751207729468599e-05, |
|
"loss": 0.0243, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 13.91304347826087, |
|
"grad_norm": 1.5113251209259033, |
|
"learning_rate": 1.6908212560386476e-05, |
|
"loss": 0.0292, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.9969230769230769, |
|
"eval_f1": 0.9944134078212291, |
|
"eval_loss": 0.013215974904596806, |
|
"eval_precision": 0.9888888888888889, |
|
"eval_recall": 1.0, |
|
"eval_runtime": 0.6382, |
|
"eval_samples_per_second": 509.263, |
|
"eval_steps_per_second": 17.237, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 14.130434782608695, |
|
"grad_norm": 0.16032224893569946, |
|
"learning_rate": 1.630434782608696e-05, |
|
"loss": 0.0056, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 14.347826086956522, |
|
"grad_norm": 1.7882447242736816, |
|
"learning_rate": 1.570048309178744e-05, |
|
"loss": 0.0203, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 14.565217391304348, |
|
"grad_norm": 1.206709861755371, |
|
"learning_rate": 1.5096618357487924e-05, |
|
"loss": 0.0205, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 14.782608695652174, |
|
"grad_norm": 2.7993412017822266, |
|
"learning_rate": 1.4492753623188407e-05, |
|
"loss": 0.0227, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"grad_norm": 1.1518446207046509, |
|
"learning_rate": 1.388888888888889e-05, |
|
"loss": 0.0064, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy": 0.9907692307692307, |
|
"eval_f1": 0.9834254143646408, |
|
"eval_loss": 0.02308599278330803, |
|
"eval_precision": 0.967391304347826, |
|
"eval_recall": 1.0, |
|
"eval_runtime": 0.6311, |
|
"eval_samples_per_second": 514.958, |
|
"eval_steps_per_second": 17.429, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 15.217391304347826, |
|
"grad_norm": 1.4991214275360107, |
|
"learning_rate": 1.3285024154589374e-05, |
|
"loss": 0.0152, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 15.434782608695652, |
|
"grad_norm": 1.1912800073623657, |
|
"learning_rate": 1.2681159420289857e-05, |
|
"loss": 0.0066, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 15.652173913043478, |
|
"grad_norm": 0.4402332603931427, |
|
"learning_rate": 1.2077294685990338e-05, |
|
"loss": 0.0051, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 15.869565217391305, |
|
"grad_norm": 0.8945227861404419, |
|
"learning_rate": 1.147342995169082e-05, |
|
"loss": 0.02, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.9969230769230769, |
|
"eval_f1": 0.9944134078212291, |
|
"eval_loss": 0.008727076463401318, |
|
"eval_precision": 0.9888888888888889, |
|
"eval_recall": 1.0, |
|
"eval_runtime": 0.6362, |
|
"eval_samples_per_second": 510.858, |
|
"eval_steps_per_second": 17.291, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 16.08695652173913, |
|
"grad_norm": 1.353935718536377, |
|
"learning_rate": 1.0869565217391305e-05, |
|
"loss": 0.0422, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 16.304347826086957, |
|
"grad_norm": 2.2760603427886963, |
|
"learning_rate": 1.0265700483091788e-05, |
|
"loss": 0.0314, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 16.52173913043478, |
|
"grad_norm": 1.7415324449539185, |
|
"learning_rate": 9.66183574879227e-06, |
|
"loss": 0.0105, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 16.73913043478261, |
|
"grad_norm": 0.31125152111053467, |
|
"learning_rate": 9.057971014492753e-06, |
|
"loss": 0.0249, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 16.956521739130434, |
|
"grad_norm": 3.789961814880371, |
|
"learning_rate": 8.454106280193238e-06, |
|
"loss": 0.0356, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_accuracy": 0.9969230769230769, |
|
"eval_f1": 0.9944134078212291, |
|
"eval_loss": 0.011419730260968208, |
|
"eval_precision": 0.9888888888888889, |
|
"eval_recall": 1.0, |
|
"eval_runtime": 0.6273, |
|
"eval_samples_per_second": 518.129, |
|
"eval_steps_per_second": 17.537, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 17.17391304347826, |
|
"grad_norm": 0.2875424027442932, |
|
"learning_rate": 7.85024154589372e-06, |
|
"loss": 0.0102, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 17.391304347826086, |
|
"grad_norm": 1.2512189149856567, |
|
"learning_rate": 7.246376811594203e-06, |
|
"loss": 0.0114, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 17.608695652173914, |
|
"grad_norm": 2.634803295135498, |
|
"learning_rate": 6.642512077294687e-06, |
|
"loss": 0.0274, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 17.82608695652174, |
|
"grad_norm": 1.524092674255371, |
|
"learning_rate": 6.038647342995169e-06, |
|
"loss": 0.0232, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_accuracy": 1.0, |
|
"eval_f1": 1.0, |
|
"eval_loss": 0.00724475271999836, |
|
"eval_precision": 1.0, |
|
"eval_recall": 1.0, |
|
"eval_runtime": 0.6289, |
|
"eval_samples_per_second": 516.744, |
|
"eval_steps_per_second": 17.49, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 18.043478260869566, |
|
"grad_norm": 0.8228383660316467, |
|
"learning_rate": 5.4347826086956525e-06, |
|
"loss": 0.0187, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 18.26086956521739, |
|
"grad_norm": 0.26993948221206665, |
|
"learning_rate": 4.830917874396135e-06, |
|
"loss": 0.0176, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 18.47826086956522, |
|
"grad_norm": 1.4451013803482056, |
|
"learning_rate": 4.227053140096619e-06, |
|
"loss": 0.017, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 18.695652173913043, |
|
"grad_norm": 1.0776654481887817, |
|
"learning_rate": 3.6231884057971017e-06, |
|
"loss": 0.0225, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 18.91304347826087, |
|
"grad_norm": 0.6803100109100342, |
|
"learning_rate": 3.0193236714975845e-06, |
|
"loss": 0.0351, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_accuracy": 0.9969230769230769, |
|
"eval_f1": 0.9944134078212291, |
|
"eval_loss": 0.008730148896574974, |
|
"eval_precision": 0.9888888888888889, |
|
"eval_recall": 1.0, |
|
"eval_runtime": 0.629, |
|
"eval_samples_per_second": 516.656, |
|
"eval_steps_per_second": 17.487, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 19.130434782608695, |
|
"grad_norm": 3.2962541580200195, |
|
"learning_rate": 2.4154589371980677e-06, |
|
"loss": 0.0138, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 19.347826086956523, |
|
"grad_norm": 2.5289676189422607, |
|
"learning_rate": 1.8115942028985508e-06, |
|
"loss": 0.0192, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 19.565217391304348, |
|
"grad_norm": 0.09774911403656006, |
|
"learning_rate": 1.2077294685990338e-06, |
|
"loss": 0.0136, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 19.782608695652176, |
|
"grad_norm": 0.9112738370895386, |
|
"learning_rate": 6.038647342995169e-07, |
|
"loss": 0.0213, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"grad_norm": 0.6033933162689209, |
|
"learning_rate": 0.0, |
|
"loss": 0.0155, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy": 0.9969230769230769, |
|
"eval_f1": 0.9944134078212291, |
|
"eval_loss": 0.007528003770858049, |
|
"eval_precision": 0.9888888888888889, |
|
"eval_recall": 1.0, |
|
"eval_runtime": 0.6414, |
|
"eval_samples_per_second": 506.707, |
|
"eval_steps_per_second": 17.15, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"step": 460, |
|
"total_flos": 1.4530811161131418e+18, |
|
"train_loss": 0.0580909106111073, |
|
"train_runtime": 291.8902, |
|
"train_samples_per_second": 200.281, |
|
"train_steps_per_second": 1.576 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 460, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 20, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.4530811161131418e+18, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|