|
{ |
|
"best_metric": 0.7327188940092166, |
|
"best_model_checkpoint": "videomae-base-finetuned-subset-check10/checkpoint-896", |
|
"epoch": 19.041441441441442, |
|
"eval_steps": 500, |
|
"global_step": 1110, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.00900900900901e-07, |
|
"loss": 1.6459, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.801801801801802e-06, |
|
"loss": 1.636, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 2.702702702702703e-06, |
|
"loss": 1.6904, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 3.603603603603604e-06, |
|
"loss": 1.6413, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 4.504504504504505e-06, |
|
"loss": 1.6285, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"eval_accuracy": 0.3686635944700461, |
|
"eval_loss": 1.6004022359848022, |
|
"eval_runtime": 173.9244, |
|
"eval_samples_per_second": 1.248, |
|
"eval_steps_per_second": 0.161, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 5.405405405405406e-06, |
|
"loss": 1.63, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 6.3063063063063065e-06, |
|
"loss": 1.5959, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 7.207207207207208e-06, |
|
"loss": 1.6112, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 8.108108108108109e-06, |
|
"loss": 1.6242, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 9.00900900900901e-06, |
|
"loss": 1.5879, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 9.90990990990991e-06, |
|
"loss": 1.6056, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"eval_accuracy": 0.19815668202764977, |
|
"eval_loss": 1.5825837850570679, |
|
"eval_runtime": 167.123, |
|
"eval_samples_per_second": 1.298, |
|
"eval_steps_per_second": 0.168, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 9.90990990990991e-06, |
|
"loss": 1.5988, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 9.80980980980981e-06, |
|
"loss": 1.5945, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 9.70970970970971e-06, |
|
"loss": 1.6349, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 9.60960960960961e-06, |
|
"loss": 1.5828, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 9.50950950950951e-06, |
|
"loss": 1.5254, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"eval_accuracy": 0.14746543778801843, |
|
"eval_loss": 1.5978792905807495, |
|
"eval_runtime": 166.6054, |
|
"eval_samples_per_second": 1.302, |
|
"eval_steps_per_second": 0.168, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 9.40940940940941e-06, |
|
"loss": 1.5397, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 9.30930930930931e-06, |
|
"loss": 1.5011, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 9.20920920920921e-06, |
|
"loss": 1.4947, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 9.10910910910911e-06, |
|
"loss": 1.4436, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 9.00900900900901e-06, |
|
"loss": 1.3674, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 8.90890890890891e-06, |
|
"loss": 1.4239, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"eval_accuracy": 0.6359447004608295, |
|
"eval_loss": 1.2076131105422974, |
|
"eval_runtime": 167.766, |
|
"eval_samples_per_second": 1.293, |
|
"eval_steps_per_second": 0.167, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 8.80880880880881e-06, |
|
"loss": 1.4136, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 8.70870870870871e-06, |
|
"loss": 1.3031, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 8.60860860860861e-06, |
|
"loss": 1.3306, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 8.50850850850851e-06, |
|
"loss": 1.2416, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 8.408408408408409e-06, |
|
"loss": 1.1851, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 8.308308308308309e-06, |
|
"loss": 1.1884, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"eval_accuracy": 0.5576036866359447, |
|
"eval_loss": 1.1865636110305786, |
|
"eval_runtime": 165.9475, |
|
"eval_samples_per_second": 1.308, |
|
"eval_steps_per_second": 0.169, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 5.01, |
|
"learning_rate": 8.208208208208209e-06, |
|
"loss": 1.3263, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 5.02, |
|
"learning_rate": 8.108108108108109e-06, |
|
"loss": 1.2252, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 5.03, |
|
"learning_rate": 8.00800800800801e-06, |
|
"loss": 1.2029, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 5.04, |
|
"learning_rate": 7.90790790790791e-06, |
|
"loss": 1.1463, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 7.807807807807808e-06, |
|
"loss": 1.2336, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"eval_accuracy": 0.6405529953917051, |
|
"eval_loss": 1.028122067451477, |
|
"eval_runtime": 165.6809, |
|
"eval_samples_per_second": 1.31, |
|
"eval_steps_per_second": 0.169, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 7.707707707707708e-06, |
|
"loss": 1.224, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 6.01, |
|
"learning_rate": 7.607607607607608e-06, |
|
"loss": 1.1512, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 6.02, |
|
"learning_rate": 7.507507507507507e-06, |
|
"loss": 1.1913, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 6.03, |
|
"learning_rate": 7.4074074074074075e-06, |
|
"loss": 1.0144, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 6.04, |
|
"learning_rate": 7.307307307307308e-06, |
|
"loss": 1.0578, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 6.05, |
|
"learning_rate": 7.207207207207208e-06, |
|
"loss": 1.0726, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 6.05, |
|
"eval_accuracy": 0.663594470046083, |
|
"eval_loss": 0.9172731637954712, |
|
"eval_runtime": 166.3821, |
|
"eval_samples_per_second": 1.304, |
|
"eval_steps_per_second": 0.168, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 7.01, |
|
"learning_rate": 7.107107107107107e-06, |
|
"loss": 1.084, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 7.02, |
|
"learning_rate": 7.007007007007007e-06, |
|
"loss": 1.0031, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 7.03, |
|
"learning_rate": 6.906906906906907e-06, |
|
"loss": 0.997, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 7.03, |
|
"learning_rate": 6.8068068068068075e-06, |
|
"loss": 1.1918, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 7.04, |
|
"learning_rate": 6.706706706706707e-06, |
|
"loss": 1.0966, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 7.05, |
|
"eval_accuracy": 0.5990783410138248, |
|
"eval_loss": 1.0597409009933472, |
|
"eval_runtime": 164.5381, |
|
"eval_samples_per_second": 1.319, |
|
"eval_steps_per_second": 0.17, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 6.606606606606607e-06, |
|
"loss": 1.1823, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 8.01, |
|
"learning_rate": 6.506506506506507e-06, |
|
"loss": 0.9391, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 8.02, |
|
"learning_rate": 6.406406406406407e-06, |
|
"loss": 1.1039, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 8.03, |
|
"learning_rate": 6.3063063063063065e-06, |
|
"loss": 1.1439, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 8.04, |
|
"learning_rate": 6.206206206206207e-06, |
|
"loss": 0.9368, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 8.05, |
|
"learning_rate": 6.106106106106107e-06, |
|
"loss": 0.956, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 8.05, |
|
"eval_accuracy": 0.6682027649769585, |
|
"eval_loss": 0.9277688264846802, |
|
"eval_runtime": 165.822, |
|
"eval_samples_per_second": 1.309, |
|
"eval_steps_per_second": 0.169, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"learning_rate": 6.006006006006007e-06, |
|
"loss": 0.9728, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"learning_rate": 5.905905905905906e-06, |
|
"loss": 1.1136, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 9.02, |
|
"learning_rate": 5.805805805805806e-06, |
|
"loss": 0.9854, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 9.03, |
|
"learning_rate": 5.7057057057057065e-06, |
|
"loss": 0.9559, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 9.04, |
|
"learning_rate": 5.605605605605607e-06, |
|
"loss": 0.9154, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 9.05, |
|
"learning_rate": 5.505505505505506e-06, |
|
"loss": 1.0813, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 9.05, |
|
"eval_accuracy": 0.5714285714285714, |
|
"eval_loss": 1.0024998188018799, |
|
"eval_runtime": 167.438, |
|
"eval_samples_per_second": 1.296, |
|
"eval_steps_per_second": 0.167, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 10.01, |
|
"learning_rate": 5.405405405405406e-06, |
|
"loss": 0.949, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 10.02, |
|
"learning_rate": 5.305305305305306e-06, |
|
"loss": 0.9489, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 10.03, |
|
"learning_rate": 5.205205205205206e-06, |
|
"loss": 1.0595, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 10.04, |
|
"learning_rate": 5.105105105105106e-06, |
|
"loss": 0.8802, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 10.05, |
|
"learning_rate": 5.005005005005006e-06, |
|
"loss": 0.8996, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 10.05, |
|
"eval_accuracy": 0.7050691244239631, |
|
"eval_loss": 0.788213849067688, |
|
"eval_runtime": 164.6073, |
|
"eval_samples_per_second": 1.318, |
|
"eval_steps_per_second": 0.17, |
|
"step": 616 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 4.904904904904905e-06, |
|
"loss": 0.9075, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 11.01, |
|
"learning_rate": 4.804804804804805e-06, |
|
"loss": 0.921, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 11.02, |
|
"learning_rate": 4.704704704704705e-06, |
|
"loss": 0.93, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 11.03, |
|
"learning_rate": 4.604604604604605e-06, |
|
"loss": 0.7948, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 11.04, |
|
"learning_rate": 4.504504504504505e-06, |
|
"loss": 0.9116, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 11.05, |
|
"learning_rate": 4.404404404404405e-06, |
|
"loss": 0.947, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 11.05, |
|
"eval_accuracy": 0.7188940092165899, |
|
"eval_loss": 0.8638418316841125, |
|
"eval_runtime": 164.8412, |
|
"eval_samples_per_second": 1.316, |
|
"eval_steps_per_second": 0.17, |
|
"step": 672 |
|
}, |
|
{ |
|
"epoch": 12.01, |
|
"learning_rate": 4.304304304304305e-06, |
|
"loss": 0.9901, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 12.02, |
|
"learning_rate": 4.204204204204204e-06, |
|
"loss": 0.8267, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 12.03, |
|
"learning_rate": 4.1041041041041045e-06, |
|
"loss": 0.9112, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 12.03, |
|
"learning_rate": 4.004004004004005e-06, |
|
"loss": 0.8341, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 12.04, |
|
"learning_rate": 3.903903903903904e-06, |
|
"loss": 0.9386, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 12.05, |
|
"eval_accuracy": 0.7004608294930875, |
|
"eval_loss": 0.8191553354263306, |
|
"eval_runtime": 164.8134, |
|
"eval_samples_per_second": 1.317, |
|
"eval_steps_per_second": 0.17, |
|
"step": 728 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 3.803803803803804e-06, |
|
"loss": 0.9771, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 13.01, |
|
"learning_rate": 3.7037037037037037e-06, |
|
"loss": 0.8544, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 13.02, |
|
"learning_rate": 3.603603603603604e-06, |
|
"loss": 0.997, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 13.03, |
|
"learning_rate": 3.5035035035035036e-06, |
|
"loss": 0.9852, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 13.04, |
|
"learning_rate": 3.4034034034034037e-06, |
|
"loss": 0.946, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 13.05, |
|
"learning_rate": 3.3033033033033035e-06, |
|
"loss": 0.8754, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 13.05, |
|
"eval_accuracy": 0.695852534562212, |
|
"eval_loss": 0.7741014361381531, |
|
"eval_runtime": 164.9986, |
|
"eval_samples_per_second": 1.315, |
|
"eval_steps_per_second": 0.17, |
|
"step": 784 |
|
}, |
|
{ |
|
"epoch": 14.01, |
|
"learning_rate": 3.2032032032032036e-06, |
|
"loss": 0.9146, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 14.01, |
|
"learning_rate": 3.1031031031031033e-06, |
|
"loss": 0.8221, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 14.02, |
|
"learning_rate": 3.0030030030030034e-06, |
|
"loss": 0.8436, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 14.03, |
|
"learning_rate": 2.902902902902903e-06, |
|
"loss": 0.812, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 14.04, |
|
"learning_rate": 2.8028028028028033e-06, |
|
"loss": 0.8285, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 14.05, |
|
"learning_rate": 2.702702702702703e-06, |
|
"loss": 0.8028, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 14.05, |
|
"eval_accuracy": 0.7096774193548387, |
|
"eval_loss": 0.748151421546936, |
|
"eval_runtime": 165.2783, |
|
"eval_samples_per_second": 1.313, |
|
"eval_steps_per_second": 0.169, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 15.01, |
|
"learning_rate": 2.602602602602603e-06, |
|
"loss": 0.9496, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 15.02, |
|
"learning_rate": 2.502502502502503e-06, |
|
"loss": 0.8661, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 15.03, |
|
"learning_rate": 2.4024024024024026e-06, |
|
"loss": 0.8948, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 15.04, |
|
"learning_rate": 2.3023023023023023e-06, |
|
"loss": 0.7513, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 15.05, |
|
"learning_rate": 2.2022022022022024e-06, |
|
"loss": 0.7038, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 15.05, |
|
"eval_accuracy": 0.7327188940092166, |
|
"eval_loss": 0.7463698983192444, |
|
"eval_runtime": 165.2407, |
|
"eval_samples_per_second": 1.313, |
|
"eval_steps_per_second": 0.169, |
|
"step": 896 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 2.102102102102102e-06, |
|
"loss": 0.8482, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 16.01, |
|
"learning_rate": 2.0020020020020023e-06, |
|
"loss": 0.7576, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 16.02, |
|
"learning_rate": 1.901901901901902e-06, |
|
"loss": 0.8506, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 16.03, |
|
"learning_rate": 1.801801801801802e-06, |
|
"loss": 0.8435, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 16.04, |
|
"learning_rate": 1.7017017017017019e-06, |
|
"loss": 0.9429, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 16.05, |
|
"learning_rate": 1.6016016016016018e-06, |
|
"loss": 0.6302, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 16.05, |
|
"eval_accuracy": 0.7004608294930875, |
|
"eval_loss": 0.7732542753219604, |
|
"eval_runtime": 166.0375, |
|
"eval_samples_per_second": 1.307, |
|
"eval_steps_per_second": 0.169, |
|
"step": 952 |
|
}, |
|
{ |
|
"epoch": 17.01, |
|
"learning_rate": 1.5015015015015017e-06, |
|
"loss": 0.8215, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 17.02, |
|
"learning_rate": 1.4014014014014016e-06, |
|
"loss": 0.7503, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 17.03, |
|
"learning_rate": 1.3013013013013016e-06, |
|
"loss": 0.8131, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 17.03, |
|
"learning_rate": 1.2012012012012013e-06, |
|
"loss": 0.7954, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 17.04, |
|
"learning_rate": 1.1011011011011012e-06, |
|
"loss": 0.8387, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 17.05, |
|
"eval_accuracy": 0.7235023041474654, |
|
"eval_loss": 0.722377598285675, |
|
"eval_runtime": 164.6978, |
|
"eval_samples_per_second": 1.318, |
|
"eval_steps_per_second": 0.17, |
|
"step": 1008 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 1.0010010010010011e-06, |
|
"loss": 0.9197, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 18.01, |
|
"learning_rate": 9.00900900900901e-07, |
|
"loss": 0.8602, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 18.02, |
|
"learning_rate": 8.008008008008009e-07, |
|
"loss": 0.706, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 18.03, |
|
"learning_rate": 7.007007007007008e-07, |
|
"loss": 0.878, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 18.04, |
|
"learning_rate": 6.006006006006006e-07, |
|
"loss": 0.8754, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 18.05, |
|
"learning_rate": 5.005005005005006e-07, |
|
"loss": 0.5853, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 18.05, |
|
"eval_accuracy": 0.7142857142857143, |
|
"eval_loss": 0.7359205484390259, |
|
"eval_runtime": 165.3361, |
|
"eval_samples_per_second": 1.312, |
|
"eval_steps_per_second": 0.169, |
|
"step": 1064 |
|
}, |
|
{ |
|
"epoch": 19.01, |
|
"learning_rate": 4.0040040040040045e-07, |
|
"loss": 0.7846, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 19.01, |
|
"learning_rate": 3.003003003003003e-07, |
|
"loss": 0.9235, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 19.02, |
|
"learning_rate": 2.0020020020020022e-07, |
|
"loss": 0.9745, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 19.03, |
|
"learning_rate": 1.0010010010010011e-07, |
|
"loss": 0.6344, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 19.04, |
|
"learning_rate": 0.0, |
|
"loss": 0.7482, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 19.04, |
|
"eval_accuracy": 0.7188940092165899, |
|
"eval_loss": 0.7369951605796814, |
|
"eval_runtime": 166.4385, |
|
"eval_samples_per_second": 1.304, |
|
"eval_steps_per_second": 0.168, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 19.04, |
|
"step": 1110, |
|
"total_flos": 1.1041673732998595e+19, |
|
"train_loss": 1.081317645150262, |
|
"train_runtime": 11661.5957, |
|
"train_samples_per_second": 0.761, |
|
"train_steps_per_second": 0.095 |
|
}, |
|
{ |
|
"epoch": 19.04, |
|
"eval_accuracy": 0.7129629629629629, |
|
"eval_loss": 0.7363542914390564, |
|
"eval_runtime": 167.4287, |
|
"eval_samples_per_second": 1.29, |
|
"eval_steps_per_second": 0.161, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 19.04, |
|
"eval_accuracy": 0.7129629629629629, |
|
"eval_loss": 0.7363542914390564, |
|
"eval_runtime": 168.5999, |
|
"eval_samples_per_second": 1.281, |
|
"eval_steps_per_second": 0.16, |
|
"step": 1110 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1110, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 500, |
|
"total_flos": 1.1041673732998595e+19, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|