|
{ |
|
"best_metric": 0.07230885326862335, |
|
"best_model_checkpoint": "./vit-base-beans/checkpoint-1120", |
|
"epoch": 3.9130434782608696, |
|
"global_step": 1440, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0001986413043478261, |
|
"loss": 0.4743, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001972826086956522, |
|
"loss": 0.1414, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019592391304347828, |
|
"loss": 0.1163, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019456521739130434, |
|
"loss": 0.1002, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"eval_accuracy": 0.9591280653950953, |
|
"eval_loss": 0.15037867426872253, |
|
"eval_runtime": 17.5557, |
|
"eval_samples_per_second": 41.81, |
|
"eval_steps_per_second": 5.24, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019320652173913046, |
|
"loss": 0.1429, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019184782608695653, |
|
"loss": 0.1156, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00019048913043478262, |
|
"loss": 0.1891, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0001891304347826087, |
|
"loss": 0.1599, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_accuracy": 0.9659400544959128, |
|
"eval_loss": 0.11539561301469803, |
|
"eval_runtime": 17.7744, |
|
"eval_samples_per_second": 41.295, |
|
"eval_steps_per_second": 5.176, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0001877717391304348, |
|
"loss": 0.1202, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0001864130434782609, |
|
"loss": 0.0683, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00018505434782608698, |
|
"loss": 0.1397, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00018369565217391304, |
|
"loss": 0.1965, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_accuracy": 0.9618528610354223, |
|
"eval_loss": 0.12847301363945007, |
|
"eval_runtime": 17.015, |
|
"eval_samples_per_second": 43.138, |
|
"eval_steps_per_second": 5.407, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00018233695652173913, |
|
"loss": 0.1995, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00018097826086956522, |
|
"loss": 0.0787, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0001796195652173913, |
|
"loss": 0.149, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0001782608695652174, |
|
"loss": 0.1032, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"eval_accuracy": 0.9632152588555858, |
|
"eval_loss": 0.12126161903142929, |
|
"eval_runtime": 17.4318, |
|
"eval_samples_per_second": 42.107, |
|
"eval_steps_per_second": 5.278, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0001769021739130435, |
|
"loss": 0.0208, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00017554347826086956, |
|
"loss": 0.122, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00017418478260869567, |
|
"loss": 0.0703, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00017282608695652174, |
|
"loss": 0.0667, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.10148755460977554, |
|
"eval_runtime": 17.4321, |
|
"eval_samples_per_second": 42.106, |
|
"eval_steps_per_second": 5.278, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00017146739130434783, |
|
"loss": 0.1348, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00017010869565217392, |
|
"loss": 0.0833, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00016875, |
|
"loss": 0.1079, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0001673913043478261, |
|
"loss": 0.0919, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"eval_accuracy": 0.9659400544959128, |
|
"eval_loss": 0.11204984039068222, |
|
"eval_runtime": 18.116, |
|
"eval_samples_per_second": 40.517, |
|
"eval_steps_per_second": 5.078, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0001660326086956522, |
|
"loss": 0.1255, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00016467391304347828, |
|
"loss": 0.0411, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00016331521739130434, |
|
"loss": 0.1268, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00016195652173913046, |
|
"loss": 0.0758, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"eval_accuracy": 0.9645776566757494, |
|
"eval_loss": 0.12156815826892853, |
|
"eval_runtime": 17.22, |
|
"eval_samples_per_second": 42.625, |
|
"eval_steps_per_second": 5.343, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00016059782608695652, |
|
"loss": 0.0613, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00015923913043478262, |
|
"loss": 0.0498, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0001578804347826087, |
|
"loss": 0.0627, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0001565217391304348, |
|
"loss": 0.0477, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"eval_accuracy": 0.9632152588555858, |
|
"eval_loss": 0.13319940865039825, |
|
"eval_runtime": 17.3065, |
|
"eval_samples_per_second": 42.412, |
|
"eval_steps_per_second": 5.316, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00015516304347826086, |
|
"loss": 0.1778, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00015380434782608698, |
|
"loss": 0.199, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00015244565217391304, |
|
"loss": 0.0943, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00015108695652173913, |
|
"loss": 0.0873, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.10057565569877625, |
|
"eval_runtime": 17.221, |
|
"eval_samples_per_second": 42.622, |
|
"eval_steps_per_second": 5.342, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00014972826086956522, |
|
"loss": 0.0584, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.0001483695652173913, |
|
"loss": 0.1169, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.0001470108695652174, |
|
"loss": 0.05, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0001456521739130435, |
|
"loss": 0.1141, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"eval_accuracy": 0.9659400544959128, |
|
"eval_loss": 0.0971706286072731, |
|
"eval_runtime": 17.0669, |
|
"eval_samples_per_second": 43.007, |
|
"eval_steps_per_second": 5.391, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00014429347826086956, |
|
"loss": 0.0666, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.00014293478260869567, |
|
"loss": 0.0744, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00014157608695652174, |
|
"loss": 0.0991, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.00014021739130434783, |
|
"loss": 0.0581, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.08075320720672607, |
|
"eval_runtime": 17.6899, |
|
"eval_samples_per_second": 41.492, |
|
"eval_steps_per_second": 5.201, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.00013885869565217392, |
|
"loss": 0.0953, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.0001375, |
|
"loss": 0.054, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.0001361413043478261, |
|
"loss": 0.0671, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.0001347826086956522, |
|
"loss": 0.0721, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.1063413992524147, |
|
"eval_runtime": 17.0062, |
|
"eval_samples_per_second": 43.161, |
|
"eval_steps_per_second": 5.41, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00013342391304347828, |
|
"loss": 0.0161, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00013206521739130434, |
|
"loss": 0.0899, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00013070652173913046, |
|
"loss": 0.2204, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.00012934782608695652, |
|
"loss": 0.1076, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.10360449552536011, |
|
"eval_runtime": 17.2631, |
|
"eval_samples_per_second": 42.519, |
|
"eval_steps_per_second": 5.329, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00012798913043478261, |
|
"loss": 0.1218, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.0001266304347826087, |
|
"loss": 0.0423, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.0001252717391304348, |
|
"loss": 0.0945, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.00012391304347826086, |
|
"loss": 0.103, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.0836576595902443, |
|
"eval_runtime": 17.3063, |
|
"eval_samples_per_second": 42.412, |
|
"eval_steps_per_second": 5.316, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.00012255434782608698, |
|
"loss": 0.0906, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.00012119565217391304, |
|
"loss": 0.0521, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.00011983695652173914, |
|
"loss": 0.0843, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.00011847826086956522, |
|
"loss": 0.0865, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.07968933880329132, |
|
"eval_runtime": 17.7434, |
|
"eval_samples_per_second": 41.367, |
|
"eval_steps_per_second": 5.185, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.00011711956521739131, |
|
"loss": 0.0621, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.00011576086956521739, |
|
"loss": 0.0528, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00011440217391304349, |
|
"loss": 0.0554, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.00011304347826086956, |
|
"loss": 0.035, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.07715282589197159, |
|
"eval_runtime": 16.9577, |
|
"eval_samples_per_second": 43.284, |
|
"eval_steps_per_second": 5.425, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.00011168478260869566, |
|
"loss": 0.0544, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.00011032608695652174, |
|
"loss": 0.0654, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.00010896739130434783, |
|
"loss": 0.1072, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.0001076086956521739, |
|
"loss": 0.0721, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"eval_accuracy": 0.9659400544959128, |
|
"eval_loss": 0.11290551722049713, |
|
"eval_runtime": 17.1536, |
|
"eval_samples_per_second": 42.79, |
|
"eval_steps_per_second": 5.363, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.00010625000000000001, |
|
"loss": 0.0804, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.0001048913043478261, |
|
"loss": 0.0376, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.00010353260869565218, |
|
"loss": 0.0543, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.00010217391304347828, |
|
"loss": 0.0365, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"eval_accuracy": 0.9645776566757494, |
|
"eval_loss": 0.14068667590618134, |
|
"eval_runtime": 17.8845, |
|
"eval_samples_per_second": 41.041, |
|
"eval_steps_per_second": 5.144, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.00010081521739130436, |
|
"loss": 0.0898, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 9.945652173913043e-05, |
|
"loss": 0.0295, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 9.809782608695652e-05, |
|
"loss": 0.0819, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 9.673913043478261e-05, |
|
"loss": 0.066, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"eval_accuracy": 0.9618528610354223, |
|
"eval_loss": 0.12197968363761902, |
|
"eval_runtime": 17.0527, |
|
"eval_samples_per_second": 43.043, |
|
"eval_steps_per_second": 5.395, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 9.538043478260869e-05, |
|
"loss": 0.0357, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 9.402173913043478e-05, |
|
"loss": 0.1752, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 9.266304347826087e-05, |
|
"loss": 0.0781, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 9.130434782608696e-05, |
|
"loss": 0.0515, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.0839233249425888, |
|
"eval_runtime": 17.1241, |
|
"eval_samples_per_second": 42.864, |
|
"eval_steps_per_second": 5.373, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 8.994565217391305e-05, |
|
"loss": 0.092, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 8.858695652173914e-05, |
|
"loss": 0.0738, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 8.722826086956522e-05, |
|
"loss": 0.0888, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 8.586956521739131e-05, |
|
"loss": 0.0223, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.0724789947271347, |
|
"eval_runtime": 17.0736, |
|
"eval_samples_per_second": 42.99, |
|
"eval_steps_per_second": 5.388, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 8.45108695652174e-05, |
|
"loss": 0.0657, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 8.315217391304349e-05, |
|
"loss": 0.0554, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 8.179347826086957e-05, |
|
"loss": 0.038, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 8.043478260869566e-05, |
|
"loss": 0.0131, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"eval_accuracy": 0.9632152588555858, |
|
"eval_loss": 0.13555656373500824, |
|
"eval_runtime": 17.1277, |
|
"eval_samples_per_second": 42.855, |
|
"eval_steps_per_second": 5.371, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 7.907608695652175e-05, |
|
"loss": 0.0298, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 7.771739130434783e-05, |
|
"loss": 0.1934, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 7.635869565217392e-05, |
|
"loss": 0.0707, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.0701, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"eval_accuracy": 0.9673024523160763, |
|
"eval_loss": 0.09190992265939713, |
|
"eval_runtime": 17.6207, |
|
"eval_samples_per_second": 41.656, |
|
"eval_steps_per_second": 5.221, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 7.364130434782608e-05, |
|
"loss": 0.055, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 7.228260869565217e-05, |
|
"loss": 0.0723, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 7.092391304347826e-05, |
|
"loss": 0.0969, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 6.956521739130436e-05, |
|
"loss": 0.0605, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.07686856389045715, |
|
"eval_runtime": 17.3639, |
|
"eval_samples_per_second": 42.272, |
|
"eval_steps_per_second": 5.298, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 6.820652173913043e-05, |
|
"loss": 0.0986, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 6.684782608695652e-05, |
|
"loss": 0.0368, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 6.548913043478261e-05, |
|
"loss": 0.0392, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 6.413043478260869e-05, |
|
"loss": 0.0382, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.08056215196847916, |
|
"eval_runtime": 17.5095, |
|
"eval_samples_per_second": 41.92, |
|
"eval_steps_per_second": 5.254, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 6.277173913043478e-05, |
|
"loss": 0.0602, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 6.141304347826087e-05, |
|
"loss": 0.0264, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 6.0054347826086955e-05, |
|
"loss": 0.0458, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 5.869565217391305e-05, |
|
"loss": 0.0409, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"eval_accuracy": 0.952316076294278, |
|
"eval_loss": 0.0872640386223793, |
|
"eval_runtime": 17.1006, |
|
"eval_samples_per_second": 42.922, |
|
"eval_steps_per_second": 5.38, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 5.7336956521739136e-05, |
|
"loss": 0.0527, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 5.5978260869565226e-05, |
|
"loss": 0.0658, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 5.461956521739131e-05, |
|
"loss": 0.0592, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 5.32608695652174e-05, |
|
"loss": 0.0302, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.13446882367134094, |
|
"eval_runtime": 17.5217, |
|
"eval_samples_per_second": 41.891, |
|
"eval_steps_per_second": 5.251, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 5.1902173913043484e-05, |
|
"loss": 0.1134, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 5.054347826086957e-05, |
|
"loss": 0.0676, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 4.918478260869566e-05, |
|
"loss": 0.0615, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 4.782608695652174e-05, |
|
"loss": 0.0528, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.07230885326862335, |
|
"eval_runtime": 16.9893, |
|
"eval_samples_per_second": 43.204, |
|
"eval_steps_per_second": 5.415, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 4.646739130434783e-05, |
|
"loss": 0.0578, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 4.5108695652173916e-05, |
|
"loss": 0.0393, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 4.375e-05, |
|
"loss": 0.05, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 4.239130434782609e-05, |
|
"loss": 0.0657, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.09432989358901978, |
|
"eval_runtime": 17.3576, |
|
"eval_samples_per_second": 42.287, |
|
"eval_steps_per_second": 5.3, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 4.1032608695652174e-05, |
|
"loss": 0.0554, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 3.9673913043478264e-05, |
|
"loss": 0.0391, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 3.831521739130435e-05, |
|
"loss": 0.0367, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 3.695652173913043e-05, |
|
"loss": 0.0546, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.1177757978439331, |
|
"eval_runtime": 17.0297, |
|
"eval_samples_per_second": 43.101, |
|
"eval_steps_per_second": 5.402, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 3.559782608695653e-05, |
|
"loss": 0.0321, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 3.423913043478261e-05, |
|
"loss": 0.0274, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 3.2880434782608696e-05, |
|
"loss": 0.0554, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 3.152173913043479e-05, |
|
"loss": 0.0307, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.11735932528972626, |
|
"eval_runtime": 17.2834, |
|
"eval_samples_per_second": 42.468, |
|
"eval_steps_per_second": 5.323, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 3.016304347826087e-05, |
|
"loss": 0.0863, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 2.8804347826086957e-05, |
|
"loss": 0.066, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 2.7445652173913044e-05, |
|
"loss": 0.0261, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 2.608695652173913e-05, |
|
"loss": 0.0388, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.07767504453659058, |
|
"eval_runtime": 17.498, |
|
"eval_samples_per_second": 41.948, |
|
"eval_steps_per_second": 5.258, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 2.472826086956522e-05, |
|
"loss": 0.0422, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 2.3369565217391306e-05, |
|
"loss": 0.0442, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 2.2010869565217393e-05, |
|
"loss": 0.0217, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 2.065217391304348e-05, |
|
"loss": 0.0127, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.09292852133512497, |
|
"eval_runtime": 17.0066, |
|
"eval_samples_per_second": 43.16, |
|
"eval_steps_per_second": 5.41, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 1.9293478260869567e-05, |
|
"loss": 0.0559, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 1.793478260869565e-05, |
|
"loss": 0.0453, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 1.6576086956521738e-05, |
|
"loss": 0.0321, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 1.5217391304347828e-05, |
|
"loss": 0.0408, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.08991661667823792, |
|
"eval_runtime": 16.9692, |
|
"eval_samples_per_second": 43.255, |
|
"eval_steps_per_second": 5.422, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 1.3858695652173914e-05, |
|
"loss": 0.038, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 1.25e-05, |
|
"loss": 0.0416, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 1.1141304347826088e-05, |
|
"loss": 0.1011, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 9.782608695652175e-06, |
|
"loss": 0.0445, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.08311757445335388, |
|
"eval_runtime": 17.0031, |
|
"eval_samples_per_second": 43.169, |
|
"eval_steps_per_second": 5.411, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 8.423913043478262e-06, |
|
"loss": 0.0526, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 7.065217391304347e-06, |
|
"loss": 0.0458, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 5.706521739130435e-06, |
|
"loss": 0.0837, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 4.347826086956522e-06, |
|
"loss": 0.0493, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"eval_accuracy": 0.9632152588555858, |
|
"eval_loss": 0.07967270910739899, |
|
"eval_runtime": 16.9823, |
|
"eval_samples_per_second": 43.222, |
|
"eval_steps_per_second": 5.417, |
|
"step": 1440 |
|
} |
|
], |
|
"max_steps": 1472, |
|
"num_train_epochs": 4, |
|
"total_flos": 1.781944272047278e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|