|
{ |
|
"best_metric": 0.058743953704833984, |
|
"best_model_checkpoint": "./vit-base-beans/checkpoint-1000", |
|
"epoch": 3.9130434782608696, |
|
"global_step": 1440, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0001986413043478261, |
|
"loss": 0.4939, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001972826086956522, |
|
"loss": 0.0893, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019592391304347828, |
|
"loss": 0.1067, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019456521739130434, |
|
"loss": 0.1309, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"eval_accuracy": 0.9645776566757494, |
|
"eval_loss": 0.1362440586090088, |
|
"eval_runtime": 17.6545, |
|
"eval_samples_per_second": 41.576, |
|
"eval_steps_per_second": 5.211, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019320652173913046, |
|
"loss": 0.1611, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019184782608695653, |
|
"loss": 0.1387, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00019048913043478262, |
|
"loss": 0.1267, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0001891304347826087, |
|
"loss": 0.1563, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_accuracy": 0.9632152588555858, |
|
"eval_loss": 0.15953560173511505, |
|
"eval_runtime": 18.8416, |
|
"eval_samples_per_second": 38.956, |
|
"eval_steps_per_second": 4.883, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0001877717391304348, |
|
"loss": 0.1176, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0001864130434782609, |
|
"loss": 0.0943, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00018505434782608698, |
|
"loss": 0.1512, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00018369565217391304, |
|
"loss": 0.1858, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_accuracy": 0.946866485013624, |
|
"eval_loss": 0.20780953764915466, |
|
"eval_runtime": 17.8429, |
|
"eval_samples_per_second": 41.137, |
|
"eval_steps_per_second": 5.156, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00018233695652173913, |
|
"loss": 0.1897, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00018097826086956522, |
|
"loss": 0.1031, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0001796195652173913, |
|
"loss": 0.0836, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0001782608695652174, |
|
"loss": 0.072, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.10965152829885483, |
|
"eval_runtime": 17.2333, |
|
"eval_samples_per_second": 42.592, |
|
"eval_steps_per_second": 5.338, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0001769021739130435, |
|
"loss": 0.0967, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00017554347826086956, |
|
"loss": 0.0429, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00017418478260869567, |
|
"loss": 0.0393, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00017282608695652174, |
|
"loss": 0.1578, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"eval_accuracy": 0.9673024523160763, |
|
"eval_loss": 0.11375105381011963, |
|
"eval_runtime": 17.5835, |
|
"eval_samples_per_second": 41.744, |
|
"eval_steps_per_second": 5.232, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00017146739130434783, |
|
"loss": 0.1322, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00017010869565217392, |
|
"loss": 0.0823, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00016875, |
|
"loss": 0.1165, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0001673913043478261, |
|
"loss": 0.1477, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"eval_accuracy": 0.9673024523160763, |
|
"eval_loss": 0.10707248002290726, |
|
"eval_runtime": 18.2425, |
|
"eval_samples_per_second": 40.236, |
|
"eval_steps_per_second": 5.043, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0001660326086956522, |
|
"loss": 0.1144, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00016467391304347828, |
|
"loss": 0.1695, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00016331521739130434, |
|
"loss": 0.1258, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00016195652173913046, |
|
"loss": 0.1107, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"eval_accuracy": 0.9700272479564033, |
|
"eval_loss": 0.08703844249248505, |
|
"eval_runtime": 17.4796, |
|
"eval_samples_per_second": 41.992, |
|
"eval_steps_per_second": 5.263, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00016059782608695652, |
|
"loss": 0.1093, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00015923913043478262, |
|
"loss": 0.0835, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0001578804347826087, |
|
"loss": 0.0558, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0001565217391304348, |
|
"loss": 0.1394, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"eval_accuracy": 0.9673024523160763, |
|
"eval_loss": 0.11179673671722412, |
|
"eval_runtime": 17.932, |
|
"eval_samples_per_second": 40.932, |
|
"eval_steps_per_second": 5.13, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00015516304347826086, |
|
"loss": 0.1003, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00015380434782608698, |
|
"loss": 0.0771, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00015244565217391304, |
|
"loss": 0.1046, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00015108695652173913, |
|
"loss": 0.0496, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_accuracy": 0.9700272479564033, |
|
"eval_loss": 0.1116204559803009, |
|
"eval_runtime": 17.3489, |
|
"eval_samples_per_second": 42.308, |
|
"eval_steps_per_second": 5.303, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00014972826086956522, |
|
"loss": 0.2176, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.0001483695652173913, |
|
"loss": 0.0762, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.0001470108695652174, |
|
"loss": 0.0792, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0001456521739130435, |
|
"loss": 0.0501, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.09637871384620667, |
|
"eval_runtime": 17.4012, |
|
"eval_samples_per_second": 42.181, |
|
"eval_steps_per_second": 5.287, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00014429347826086956, |
|
"loss": 0.0874, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.00014293478260869567, |
|
"loss": 0.075, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00014157608695652174, |
|
"loss": 0.1461, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.00014021739130434783, |
|
"loss": 0.0853, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"eval_accuracy": 0.9700272479564033, |
|
"eval_loss": 0.11846118420362473, |
|
"eval_runtime": 17.3926, |
|
"eval_samples_per_second": 42.202, |
|
"eval_steps_per_second": 5.29, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.00013885869565217392, |
|
"loss": 0.1182, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.0001375, |
|
"loss": 0.0839, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.0001361413043478261, |
|
"loss": 0.0657, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.0001347826086956522, |
|
"loss": 0.0362, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.0951201394200325, |
|
"eval_runtime": 17.6244, |
|
"eval_samples_per_second": 41.647, |
|
"eval_steps_per_second": 5.22, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00013342391304347828, |
|
"loss": 0.0846, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00013206521739130434, |
|
"loss": 0.1286, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00013070652173913046, |
|
"loss": 0.0269, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.00012934782608695652, |
|
"loss": 0.0898, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.08077488094568253, |
|
"eval_runtime": 17.4813, |
|
"eval_samples_per_second": 41.988, |
|
"eval_steps_per_second": 5.263, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00012798913043478261, |
|
"loss": 0.1056, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.0001266304347826087, |
|
"loss": 0.0592, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.0001252717391304348, |
|
"loss": 0.064, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.00012391304347826086, |
|
"loss": 0.04, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"eval_accuracy": 0.9645776566757494, |
|
"eval_loss": 0.1037047803401947, |
|
"eval_runtime": 18.122, |
|
"eval_samples_per_second": 40.503, |
|
"eval_steps_per_second": 5.077, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.00012255434782608698, |
|
"loss": 0.0988, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.00012119565217391304, |
|
"loss": 0.0948, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.00011983695652173914, |
|
"loss": 0.1029, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.00011847826086956522, |
|
"loss": 0.106, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.07200556993484497, |
|
"eval_runtime": 17.4833, |
|
"eval_samples_per_second": 41.983, |
|
"eval_steps_per_second": 5.262, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.00011711956521739131, |
|
"loss": 0.0976, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.00011576086956521739, |
|
"loss": 0.0603, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00011440217391304349, |
|
"loss": 0.0507, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.00011304347826086956, |
|
"loss": 0.0753, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.07421814650297165, |
|
"eval_runtime": 17.6534, |
|
"eval_samples_per_second": 41.578, |
|
"eval_steps_per_second": 5.211, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.00011168478260869566, |
|
"loss": 0.0664, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.00011032608695652174, |
|
"loss": 0.0628, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.00010896739130434783, |
|
"loss": 0.1395, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.0001076086956521739, |
|
"loss": 0.0774, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.07968690246343613, |
|
"eval_runtime": 17.474, |
|
"eval_samples_per_second": 42.005, |
|
"eval_steps_per_second": 5.265, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.00010625000000000001, |
|
"loss": 0.0545, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.0001048913043478261, |
|
"loss": 0.0493, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.00010353260869565218, |
|
"loss": 0.0773, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.00010217391304347828, |
|
"loss": 0.0628, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.06992962956428528, |
|
"eval_runtime": 17.9437, |
|
"eval_samples_per_second": 40.906, |
|
"eval_steps_per_second": 5.127, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.00010081521739130436, |
|
"loss": 0.068, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 9.945652173913043e-05, |
|
"loss": 0.0489, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 9.809782608695652e-05, |
|
"loss": 0.1042, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 9.673913043478261e-05, |
|
"loss": 0.0216, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.09693184494972229, |
|
"eval_runtime": 16.972, |
|
"eval_samples_per_second": 43.248, |
|
"eval_steps_per_second": 5.421, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 9.538043478260869e-05, |
|
"loss": 0.0751, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 9.402173913043478e-05, |
|
"loss": 0.0625, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 9.266304347826087e-05, |
|
"loss": 0.0581, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 9.130434782608696e-05, |
|
"loss": 0.04, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"eval_accuracy": 0.9686648501362398, |
|
"eval_loss": 0.06984598189592361, |
|
"eval_runtime": 17.4344, |
|
"eval_samples_per_second": 42.101, |
|
"eval_steps_per_second": 5.277, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 8.994565217391305e-05, |
|
"loss": 0.0959, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 8.858695652173914e-05, |
|
"loss": 0.0842, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 8.722826086956522e-05, |
|
"loss": 0.0556, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 8.586956521739131e-05, |
|
"loss": 0.0372, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.09554506838321686, |
|
"eval_runtime": 17.2543, |
|
"eval_samples_per_second": 42.54, |
|
"eval_steps_per_second": 5.332, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 8.45108695652174e-05, |
|
"loss": 0.1344, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 8.315217391304349e-05, |
|
"loss": 0.02, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 8.179347826086957e-05, |
|
"loss": 0.0706, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 8.043478260869566e-05, |
|
"loss": 0.0609, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"eval_accuracy": 0.9700272479564033, |
|
"eval_loss": 0.06672107428312302, |
|
"eval_runtime": 17.3439, |
|
"eval_samples_per_second": 42.32, |
|
"eval_steps_per_second": 5.304, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 7.907608695652175e-05, |
|
"loss": 0.0986, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 7.771739130434783e-05, |
|
"loss": 0.0523, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 7.635869565217392e-05, |
|
"loss": 0.0652, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.0375, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.07068129628896713, |
|
"eval_runtime": 17.3031, |
|
"eval_samples_per_second": 42.42, |
|
"eval_steps_per_second": 5.317, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 7.364130434782608e-05, |
|
"loss": 0.0506, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 7.228260869565217e-05, |
|
"loss": 0.0764, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 7.092391304347826e-05, |
|
"loss": 0.0622, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 6.956521739130436e-05, |
|
"loss": 0.0532, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.06182615086436272, |
|
"eval_runtime": 17.2684, |
|
"eval_samples_per_second": 42.505, |
|
"eval_steps_per_second": 5.328, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 6.820652173913043e-05, |
|
"loss": 0.0462, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 6.684782608695652e-05, |
|
"loss": 0.0477, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 6.548913043478261e-05, |
|
"loss": 0.0526, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 6.413043478260869e-05, |
|
"loss": 0.092, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.058743953704833984, |
|
"eval_runtime": 17.0643, |
|
"eval_samples_per_second": 43.014, |
|
"eval_steps_per_second": 5.391, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 6.277173913043478e-05, |
|
"loss": 0.0704, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 6.141304347826087e-05, |
|
"loss": 0.0301, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 6.0054347826086955e-05, |
|
"loss": 0.0788, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 5.869565217391305e-05, |
|
"loss": 0.0742, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.06040079519152641, |
|
"eval_runtime": 17.6675, |
|
"eval_samples_per_second": 41.545, |
|
"eval_steps_per_second": 5.207, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 5.7336956521739136e-05, |
|
"loss": 0.0658, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 5.5978260869565226e-05, |
|
"loss": 0.0564, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 5.461956521739131e-05, |
|
"loss": 0.0486, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 5.32608695652174e-05, |
|
"loss": 0.0443, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.11983449757099152, |
|
"eval_runtime": 17.2197, |
|
"eval_samples_per_second": 42.626, |
|
"eval_steps_per_second": 5.343, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 5.1902173913043484e-05, |
|
"loss": 0.0397, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 5.054347826086957e-05, |
|
"loss": 0.0818, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 4.918478260869566e-05, |
|
"loss": 0.0492, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 4.782608695652174e-05, |
|
"loss": 0.062, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.06326533854007721, |
|
"eval_runtime": 17.0684, |
|
"eval_samples_per_second": 43.003, |
|
"eval_steps_per_second": 5.39, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 4.646739130434783e-05, |
|
"loss": 0.0414, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 4.5108695652173916e-05, |
|
"loss": 0.0331, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 4.375e-05, |
|
"loss": 0.0531, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 4.239130434782609e-05, |
|
"loss": 0.0708, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.07502922415733337, |
|
"eval_runtime": 17.6357, |
|
"eval_samples_per_second": 41.62, |
|
"eval_steps_per_second": 5.217, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 4.1032608695652174e-05, |
|
"loss": 0.0507, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 3.9673913043478264e-05, |
|
"loss": 0.0607, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 3.831521739130435e-05, |
|
"loss": 0.0374, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 3.695652173913043e-05, |
|
"loss": 0.0252, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.08786644786596298, |
|
"eval_runtime": 17.4164, |
|
"eval_samples_per_second": 42.144, |
|
"eval_steps_per_second": 5.282, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 3.559782608695653e-05, |
|
"loss": 0.0502, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 3.423913043478261e-05, |
|
"loss": 0.0513, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 3.2880434782608696e-05, |
|
"loss": 0.0698, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 3.152173913043479e-05, |
|
"loss": 0.038, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.07282000035047531, |
|
"eval_runtime": 17.6309, |
|
"eval_samples_per_second": 41.631, |
|
"eval_steps_per_second": 5.218, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 3.016304347826087e-05, |
|
"loss": 0.0707, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 2.8804347826086957e-05, |
|
"loss": 0.0639, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 2.7445652173913044e-05, |
|
"loss": 0.0647, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 2.608695652173913e-05, |
|
"loss": 0.0348, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.07391868531703949, |
|
"eval_runtime": 17.5666, |
|
"eval_samples_per_second": 41.784, |
|
"eval_steps_per_second": 5.237, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 2.472826086956522e-05, |
|
"loss": 0.0471, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 2.3369565217391306e-05, |
|
"loss": 0.0859, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 2.2010869565217393e-05, |
|
"loss": 0.0558, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 2.065217391304348e-05, |
|
"loss": 0.0348, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.09225168824195862, |
|
"eval_runtime": 17.7535, |
|
"eval_samples_per_second": 41.344, |
|
"eval_steps_per_second": 5.182, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 1.9293478260869567e-05, |
|
"loss": 0.0205, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 1.793478260869565e-05, |
|
"loss": 0.051, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 1.6576086956521738e-05, |
|
"loss": 0.0419, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 1.5217391304347828e-05, |
|
"loss": 0.019, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.08419564366340637, |
|
"eval_runtime": 17.5834, |
|
"eval_samples_per_second": 41.744, |
|
"eval_steps_per_second": 5.232, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 1.3858695652173914e-05, |
|
"loss": 0.0473, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 1.25e-05, |
|
"loss": 0.0316, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 1.1141304347826088e-05, |
|
"loss": 0.0301, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 9.782608695652175e-06, |
|
"loss": 0.0181, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.09280063211917877, |
|
"eval_runtime": 17.9174, |
|
"eval_samples_per_second": 40.966, |
|
"eval_steps_per_second": 5.135, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 8.423913043478262e-06, |
|
"loss": 0.0314, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 7.065217391304347e-06, |
|
"loss": 0.0606, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 5.706521739130435e-06, |
|
"loss": 0.0453, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 4.347826086956522e-06, |
|
"loss": 0.0496, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"eval_accuracy": 0.9713896457765667, |
|
"eval_loss": 0.08846529573202133, |
|
"eval_runtime": 17.2944, |
|
"eval_samples_per_second": 42.442, |
|
"eval_steps_per_second": 5.32, |
|
"step": 1440 |
|
} |
|
], |
|
"max_steps": 1472, |
|
"num_train_epochs": 4, |
|
"total_flos": 1.781944272047278e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|