|
{ |
|
"best_metric": 0.08164331316947937, |
|
"best_model_checkpoint": "./vit-base-beans-demo-v5/checkpoint-1100", |
|
"epoch": 4.0, |
|
"eval_steps": 100, |
|
"global_step": 1440, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00019861111111111113, |
|
"loss": 1.5566, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019722222222222225, |
|
"loss": 1.262, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019583333333333334, |
|
"loss": 1.091, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019444444444444446, |
|
"loss": 0.7922, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019305555555555558, |
|
"loss": 0.8375, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00019166666666666667, |
|
"loss": 0.7567, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00019027777777777776, |
|
"loss": 0.6015, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00018888888888888888, |
|
"loss": 0.7216, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0001875, |
|
"loss": 0.6877, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00018611111111111112, |
|
"loss": 0.5092, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"eval_accuracy": 0.7680555555555556, |
|
"eval_loss": 0.6419699788093567, |
|
"eval_runtime": 22.1712, |
|
"eval_samples_per_second": 32.475, |
|
"eval_steps_per_second": 4.059, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00018472222222222224, |
|
"loss": 0.4847, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00018333333333333334, |
|
"loss": 0.6076, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00018194444444444445, |
|
"loss": 0.5853, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00018055555555555557, |
|
"loss": 0.5133, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0001791666666666667, |
|
"loss": 0.4949, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00017777777777777779, |
|
"loss": 0.5219, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0001763888888888889, |
|
"loss": 0.4862, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.000175, |
|
"loss": 0.5873, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00017361111111111112, |
|
"loss": 0.4319, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00017222222222222224, |
|
"loss": 0.5076, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"eval_accuracy": 0.8722222222222222, |
|
"eval_loss": 0.4068766236305237, |
|
"eval_runtime": 22.7196, |
|
"eval_samples_per_second": 31.691, |
|
"eval_steps_per_second": 3.961, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00017083333333333333, |
|
"loss": 0.4323, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00016944444444444445, |
|
"loss": 0.361, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00016805555555555557, |
|
"loss": 0.4156, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0001666666666666667, |
|
"loss": 0.4297, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00016527777777777778, |
|
"loss": 0.6522, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0001638888888888889, |
|
"loss": 0.4157, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00016250000000000002, |
|
"loss": 0.3354, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0001611111111111111, |
|
"loss": 0.3883, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00015972222222222223, |
|
"loss": 0.4386, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00015833333333333332, |
|
"loss": 0.3291, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"eval_accuracy": 0.8569444444444444, |
|
"eval_loss": 0.4342160224914551, |
|
"eval_runtime": 21.9024, |
|
"eval_samples_per_second": 32.873, |
|
"eval_steps_per_second": 4.109, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00015694444444444444, |
|
"loss": 0.4767, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00015555555555555556, |
|
"loss": 0.3492, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00015416666666666668, |
|
"loss": 0.4947, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00015277777777777777, |
|
"loss": 0.4482, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.0001513888888888889, |
|
"loss": 0.3367, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.2316, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00014861111111111113, |
|
"loss": 0.2207, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00014722222222222223, |
|
"loss": 0.1652, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.00014583333333333335, |
|
"loss": 0.2019, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00014444444444444444, |
|
"loss": 0.108, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"eval_accuracy": 0.9291666666666667, |
|
"eval_loss": 0.24096721410751343, |
|
"eval_runtime": 21.4632, |
|
"eval_samples_per_second": 33.546, |
|
"eval_steps_per_second": 4.193, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.00014305555555555556, |
|
"loss": 0.1746, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00014166666666666668, |
|
"loss": 0.0908, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00014027777777777777, |
|
"loss": 0.0816, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.0001388888888888889, |
|
"loss": 0.1607, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.0001375, |
|
"loss": 0.0993, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00013611111111111113, |
|
"loss": 0.1251, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.00013472222222222225, |
|
"loss": 0.1353, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00013333333333333334, |
|
"loss": 0.0739, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00013194444444444446, |
|
"loss": 0.06, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00013055555555555555, |
|
"loss": 0.0378, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"eval_accuracy": 0.9138888888888889, |
|
"eval_loss": 0.31065165996551514, |
|
"eval_runtime": 21.6753, |
|
"eval_samples_per_second": 33.217, |
|
"eval_steps_per_second": 4.152, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00012916666666666667, |
|
"loss": 0.0966, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00012777777777777776, |
|
"loss": 0.0644, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00012638888888888888, |
|
"loss": 0.0791, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.000125, |
|
"loss": 0.1598, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.00012361111111111112, |
|
"loss": 0.1561, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.00012222222222222224, |
|
"loss": 0.1597, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.00012083333333333333, |
|
"loss": 0.2064, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.00011944444444444445, |
|
"loss": 0.1435, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.00011805555555555556, |
|
"loss": 0.0582, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.00011666666666666668, |
|
"loss": 0.1488, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"eval_accuracy": 0.9388888888888889, |
|
"eval_loss": 0.19844678044319153, |
|
"eval_runtime": 21.7891, |
|
"eval_samples_per_second": 33.044, |
|
"eval_steps_per_second": 4.131, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.00011527777777777777, |
|
"loss": 0.1309, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.00011388888888888889, |
|
"loss": 0.097, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.00011250000000000001, |
|
"loss": 0.1493, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.00011111111111111112, |
|
"loss": 0.1808, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.00010972222222222224, |
|
"loss": 0.1028, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.00010833333333333333, |
|
"loss": 0.096, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.00010694444444444445, |
|
"loss": 0.0943, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.00010555555555555557, |
|
"loss": 0.116, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.00010416666666666667, |
|
"loss": 0.1912, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.00010277777777777778, |
|
"loss": 0.0532, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"eval_accuracy": 0.9513888888888888, |
|
"eval_loss": 0.17138923704624176, |
|
"eval_runtime": 23.0226, |
|
"eval_samples_per_second": 31.274, |
|
"eval_steps_per_second": 3.909, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.00010138888888888889, |
|
"loss": 0.08, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.0001, |
|
"loss": 0.0874, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 9.861111111111112e-05, |
|
"loss": 0.0968, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 9.722222222222223e-05, |
|
"loss": 0.0607, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 9.583333333333334e-05, |
|
"loss": 0.0303, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 9.444444444444444e-05, |
|
"loss": 0.0113, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 9.305555555555556e-05, |
|
"loss": 0.1197, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 9.166666666666667e-05, |
|
"loss": 0.0256, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 9.027777777777779e-05, |
|
"loss": 0.0141, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 8.888888888888889e-05, |
|
"loss": 0.0122, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"eval_accuracy": 0.9611111111111111, |
|
"eval_loss": 0.1333748996257782, |
|
"eval_runtime": 22.6484, |
|
"eval_samples_per_second": 31.79, |
|
"eval_steps_per_second": 3.974, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 8.75e-05, |
|
"loss": 0.0222, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 8.611111111111112e-05, |
|
"loss": 0.0101, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 8.472222222222222e-05, |
|
"loss": 0.042, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 8.333333333333334e-05, |
|
"loss": 0.0232, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 8.194444444444445e-05, |
|
"loss": 0.1049, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 8.055555555555556e-05, |
|
"loss": 0.0191, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 7.916666666666666e-05, |
|
"loss": 0.0778, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 7.777777777777778e-05, |
|
"loss": 0.0533, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 7.638888888888889e-05, |
|
"loss": 0.0567, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.0529, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"eval_accuracy": 0.9652777777777778, |
|
"eval_loss": 0.11391060799360275, |
|
"eval_runtime": 21.5745, |
|
"eval_samples_per_second": 33.373, |
|
"eval_steps_per_second": 4.172, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 7.361111111111111e-05, |
|
"loss": 0.0184, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 7.222222222222222e-05, |
|
"loss": 0.0247, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 7.083333333333334e-05, |
|
"loss": 0.0075, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 6.944444444444444e-05, |
|
"loss": 0.0601, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 6.805555555555556e-05, |
|
"loss": 0.0394, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 0.0079, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 6.527777777777778e-05, |
|
"loss": 0.0113, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 6.388888888888888e-05, |
|
"loss": 0.0417, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 6.25e-05, |
|
"loss": 0.0658, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 6.111111111111112e-05, |
|
"loss": 0.0221, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"eval_accuracy": 0.9736111111111111, |
|
"eval_loss": 0.08747294545173645, |
|
"eval_runtime": 21.8071, |
|
"eval_samples_per_second": 33.017, |
|
"eval_steps_per_second": 4.127, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 5.972222222222223e-05, |
|
"loss": 0.0223, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 5.833333333333334e-05, |
|
"loss": 0.0361, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 5.6944444444444445e-05, |
|
"loss": 0.0065, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 5.555555555555556e-05, |
|
"loss": 0.0062, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 5.4166666666666664e-05, |
|
"loss": 0.0083, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 5.2777777777777784e-05, |
|
"loss": 0.0077, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 5.138888888888889e-05, |
|
"loss": 0.0123, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0266, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 4.8611111111111115e-05, |
|
"loss": 0.0058, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 4.722222222222222e-05, |
|
"loss": 0.0052, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"eval_accuracy": 0.9819444444444444, |
|
"eval_loss": 0.08164331316947937, |
|
"eval_runtime": 21.3086, |
|
"eval_samples_per_second": 33.789, |
|
"eval_steps_per_second": 4.224, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 4.5833333333333334e-05, |
|
"loss": 0.0049, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.0053, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 4.305555555555556e-05, |
|
"loss": 0.0295, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.0044, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 4.027777777777778e-05, |
|
"loss": 0.0058, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 3.888888888888889e-05, |
|
"loss": 0.0052, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.0043, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 3.611111111111111e-05, |
|
"loss": 0.0045, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 3.472222222222222e-05, |
|
"loss": 0.0043, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.0045, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"eval_accuracy": 0.9791666666666666, |
|
"eval_loss": 0.08730719983577728, |
|
"eval_runtime": 21.963, |
|
"eval_samples_per_second": 32.782, |
|
"eval_steps_per_second": 4.098, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 3.194444444444444e-05, |
|
"loss": 0.0044, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 3.055555555555556e-05, |
|
"loss": 0.0042, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 2.916666666666667e-05, |
|
"loss": 0.0392, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.0042, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 2.6388888888888892e-05, |
|
"loss": 0.0045, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.0043, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 2.361111111111111e-05, |
|
"loss": 0.0044, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 0.0041, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 2.0833333333333336e-05, |
|
"loss": 0.0043, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 1.9444444444444445e-05, |
|
"loss": 0.0113, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"eval_accuracy": 0.9833333333333333, |
|
"eval_loss": 0.08817147463560104, |
|
"eval_runtime": 22.6142, |
|
"eval_samples_per_second": 31.838, |
|
"eval_steps_per_second": 3.98, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 1.8055555555555555e-05, |
|
"loss": 0.0038, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.0041, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 1.527777777777778e-05, |
|
"loss": 0.0039, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 1.388888888888889e-05, |
|
"loss": 0.0037, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 1.25e-05, |
|
"loss": 0.0042, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"loss": 0.006, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 9.722222222222223e-06, |
|
"loss": 0.0036, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.004, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 6.944444444444445e-06, |
|
"loss": 0.0035, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 5.555555555555556e-06, |
|
"loss": 0.0043, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"eval_accuracy": 0.9805555555555555, |
|
"eval_loss": 0.08652874827384949, |
|
"eval_runtime": 21.5822, |
|
"eval_samples_per_second": 33.361, |
|
"eval_steps_per_second": 4.17, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 4.166666666666667e-06, |
|
"loss": 0.0043, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 2.777777777777778e-06, |
|
"loss": 0.0041, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 1.388888888888889e-06, |
|
"loss": 0.0164, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 0.0, |
|
"loss": 0.0038, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"step": 1440, |
|
<<<<<<< HEAD |
|
"total_flos": 6.311528820941783e+18, |
|
"train_loss": 0.2221095090903772, |
|
"train_runtime": 4823.2045, |
|
"train_samples_per_second": 4.777, |
|
"train_steps_per_second": 0.299 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9791666666666666, |
|
"eval_loss": 0.10265839099884033, |
|
"eval_runtime": 48.2725, |
|
"eval_samples_per_second": 14.915, |
|
"eval_steps_per_second": 1.864, |
|
======= |
|
"total_flos": 1.7854794491756544e+18, |
|
"train_loss": 0.18370533613229378, |
|
"train_runtime": 2041.1421, |
|
"train_samples_per_second": 11.288, |
|
"train_steps_per_second": 0.705 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9819444444444444, |
|
"eval_loss": 0.08164331316947937, |
|
"eval_runtime": 32.0666, |
|
"eval_samples_per_second": 22.453, |
|
"eval_steps_per_second": 2.807, |
|
>>>>>>> parent of c5d9224 (Upload folder using huggingface_hub) |
|
"step": 1440 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1440, |
|
"num_train_epochs": 4, |
|
"save_steps": 100, |
|
"total_flos": 1.7854794491756544e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|