llava-v1.5-13b-Posthoc / trainer_state.json
ys-zong
posthoc 13b weights
8618ec9
raw
history blame contribute delete
No virus
10.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"global_step": 86,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 6.666666666666667e-06,
"loss": 0.8559,
"step": 1
},
{
"epoch": 0.02,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.7071,
"step": 2
},
{
"epoch": 0.03,
"learning_rate": 2e-05,
"loss": 0.7514,
"step": 3
},
{
"epoch": 0.05,
"learning_rate": 1.9992837548163315e-05,
"loss": 0.7098,
"step": 4
},
{
"epoch": 0.06,
"learning_rate": 1.9971360452796523e-05,
"loss": 0.6787,
"step": 5
},
{
"epoch": 0.07,
"learning_rate": 1.993559947963185e-05,
"loss": 0.6556,
"step": 6
},
{
"epoch": 0.08,
"learning_rate": 1.9885605855918887e-05,
"loss": 0.69,
"step": 7
},
{
"epoch": 0.09,
"learning_rate": 1.9821451197042028e-05,
"loss": 0.753,
"step": 8
},
{
"epoch": 0.1,
"learning_rate": 1.9743227403932135e-05,
"loss": 0.7316,
"step": 9
},
{
"epoch": 0.12,
"learning_rate": 1.9651046531419335e-05,
"loss": 0.7426,
"step": 10
},
{
"epoch": 0.13,
"learning_rate": 1.9545040627715554e-05,
"loss": 0.7588,
"step": 11
},
{
"epoch": 0.14,
"learning_rate": 1.942536154525673e-05,
"loss": 0.7027,
"step": 12
},
{
"epoch": 0.15,
"learning_rate": 1.9292180723175656e-05,
"loss": 0.6916,
"step": 13
},
{
"epoch": 0.16,
"learning_rate": 1.9145688941717074e-05,
"loss": 0.7475,
"step": 14
},
{
"epoch": 0.17,
"learning_rate": 1.8986096048946826e-05,
"loss": 0.7039,
"step": 15
},
{
"epoch": 0.19,
"learning_rate": 1.881363066014649e-05,
"loss": 0.7715,
"step": 16
},
{
"epoch": 0.2,
"learning_rate": 1.862853983032423e-05,
"loss": 0.6958,
"step": 17
},
{
"epoch": 0.21,
"learning_rate": 1.8431088700310846e-05,
"loss": 0.7214,
"step": 18
},
{
"epoch": 0.22,
"learning_rate": 1.8221560116948103e-05,
"loss": 0.6808,
"step": 19
},
{
"epoch": 0.23,
"learning_rate": 1.8000254227913346e-05,
"loss": 0.653,
"step": 20
},
{
"epoch": 0.24,
"learning_rate": 1.7767488051760858e-05,
"loss": 0.645,
"step": 21
},
{
"epoch": 0.26,
"learning_rate": 1.7523595023795814e-05,
"loss": 0.7061,
"step": 22
},
{
"epoch": 0.27,
"learning_rate": 1.7268924518431437e-05,
"loss": 0.7223,
"step": 23
},
{
"epoch": 0.28,
"learning_rate": 1.700384134871351e-05,
"loss": 0.6489,
"step": 24
},
{
"epoch": 0.29,
"learning_rate": 1.672872524372919e-05,
"loss": 0.6721,
"step": 25
},
{
"epoch": 0.3,
"learning_rate": 1.644397030464877e-05,
"loss": 0.6771,
"step": 26
},
{
"epoch": 0.31,
"learning_rate": 1.614998444017954e-05,
"loss": 0.1781,
"step": 27
},
{
"epoch": 0.33,
"learning_rate": 1.5847188782240473e-05,
"loss": 0.6716,
"step": 28
},
{
"epoch": 0.34,
"learning_rate": 1.5536017082694846e-05,
"loss": 0.7402,
"step": 29
},
{
"epoch": 0.35,
"learning_rate": 1.5216915092004847e-05,
"loss": 0.6907,
"step": 30
},
{
"epoch": 0.36,
"learning_rate": 1.4890339920698334e-05,
"loss": 0.746,
"step": 31
},
{
"epoch": 0.37,
"learning_rate": 1.4556759384562418e-05,
"loss": 0.7715,
"step": 32
},
{
"epoch": 0.38,
"learning_rate": 1.421665133450184e-05,
"loss": 0.6999,
"step": 33
},
{
"epoch": 0.4,
"learning_rate": 1.3870502972022175e-05,
"loss": 0.6374,
"step": 34
},
{
"epoch": 0.41,
"learning_rate": 1.351881015131833e-05,
"loss": 0.6886,
"step": 35
},
{
"epoch": 0.42,
"learning_rate": 1.316207666896824e-05,
"loss": 0.6923,
"step": 36
},
{
"epoch": 0.43,
"learning_rate": 1.2800813542249073e-05,
"loss": 0.648,
"step": 37
},
{
"epoch": 0.44,
"learning_rate": 1.2435538277109919e-05,
"loss": 0.6889,
"step": 38
},
{
"epoch": 0.45,
"learning_rate": 1.206677412684953e-05,
"loss": 0.7149,
"step": 39
},
{
"epoch": 0.47,
"learning_rate": 1.1695049342560969e-05,
"loss": 0.7076,
"step": 40
},
{
"epoch": 0.48,
"learning_rate": 1.1320896416417026e-05,
"loss": 0.7184,
"step": 41
},
{
"epoch": 0.49,
"learning_rate": 1.0944851318880314e-05,
"loss": 0.7535,
"step": 42
},
{
"epoch": 0.5,
"learning_rate": 1.0567452730930743e-05,
"loss": 0.7158,
"step": 43
},
{
"epoch": 0.51,
"learning_rate": 1.0189241272410191e-05,
"loss": 0.7169,
"step": 44
},
{
"epoch": 0.52,
"learning_rate": 9.810758727589814e-06,
"loss": 0.631,
"step": 45
},
{
"epoch": 0.53,
"learning_rate": 9.43254726906926e-06,
"loss": 0.6797,
"step": 46
},
{
"epoch": 0.55,
"learning_rate": 9.055148681119688e-06,
"loss": 0.7235,
"step": 47
},
{
"epoch": 0.56,
"learning_rate": 8.67910358358298e-06,
"loss": 0.7368,
"step": 48
},
{
"epoch": 0.57,
"learning_rate": 8.304950657439034e-06,
"loss": 0.6947,
"step": 49
},
{
"epoch": 0.58,
"learning_rate": 7.93322587315047e-06,
"loss": 0.6969,
"step": 50
},
{
"epoch": 0.59,
"learning_rate": 7.564461722890082e-06,
"loss": 0.7425,
"step": 51
},
{
"epoch": 0.6,
"learning_rate": 7.199186457750931e-06,
"loss": 0.641,
"step": 52
},
{
"epoch": 0.62,
"learning_rate": 6.837923331031761e-06,
"loss": 0.1685,
"step": 53
},
{
"epoch": 0.63,
"learning_rate": 6.48118984868167e-06,
"loss": 0.684,
"step": 54
},
{
"epoch": 0.64,
"learning_rate": 6.129497027977829e-06,
"loss": 0.1894,
"step": 55
},
{
"epoch": 0.65,
"learning_rate": 5.78334866549816e-06,
"loss": 0.6861,
"step": 56
},
{
"epoch": 0.66,
"learning_rate": 5.443240615437586e-06,
"loss": 0.6837,
"step": 57
},
{
"epoch": 0.67,
"learning_rate": 5.109660079301668e-06,
"loss": 0.7266,
"step": 58
},
{
"epoch": 0.69,
"learning_rate": 4.783084907995156e-06,
"loss": 0.665,
"step": 59
},
{
"epoch": 0.7,
"learning_rate": 4.463982917305155e-06,
"loss": 0.655,
"step": 60
},
{
"epoch": 0.71,
"learning_rate": 4.152811217759529e-06,
"loss": 0.6949,
"step": 61
},
{
"epoch": 0.72,
"learning_rate": 3.850015559820465e-06,
"loss": 0.6858,
"step": 62
},
{
"epoch": 0.73,
"learning_rate": 3.5560296953512296e-06,
"loss": 0.6224,
"step": 63
},
{
"epoch": 0.74,
"learning_rate": 3.2712747562708115e-06,
"loss": 0.6903,
"step": 64
},
{
"epoch": 0.76,
"learning_rate": 2.9961586512864947e-06,
"loss": 0.7373,
"step": 65
},
{
"epoch": 0.77,
"learning_rate": 2.7310754815685627e-06,
"loss": 0.6265,
"step": 66
},
{
"epoch": 0.78,
"learning_rate": 2.4764049762041874e-06,
"loss": 0.7343,
"step": 67
},
{
"epoch": 0.79,
"learning_rate": 2.2325119482391466e-06,
"loss": 0.6524,
"step": 68
},
{
"epoch": 0.8,
"learning_rate": 1.9997457720866554e-06,
"loss": 0.7026,
"step": 69
},
{
"epoch": 0.81,
"learning_rate": 1.7784398830519002e-06,
"loss": 0.6995,
"step": 70
},
{
"epoch": 0.83,
"learning_rate": 1.5689112996891576e-06,
"loss": 0.6672,
"step": 71
},
{
"epoch": 0.84,
"learning_rate": 1.3714601696757713e-06,
"loss": 0.7158,
"step": 72
},
{
"epoch": 0.85,
"learning_rate": 1.1863693398535115e-06,
"loss": 0.6767,
"step": 73
},
{
"epoch": 0.86,
"learning_rate": 1.01390395105318e-06,
"loss": 0.7041,
"step": 74
},
{
"epoch": 0.87,
"learning_rate": 8.543110582829272e-07,
"loss": 0.6582,
"step": 75
},
{
"epoch": 0.88,
"learning_rate": 7.078192768243486e-07,
"loss": 0.7415,
"step": 76
},
{
"epoch": 0.9,
"learning_rate": 5.746384547432738e-07,
"loss": 0.7245,
"step": 77
},
{
"epoch": 0.91,
"learning_rate": 4.549593722844492e-07,
"loss": 0.1694,
"step": 78
},
{
"epoch": 0.92,
"learning_rate": 3.4895346858066723e-07,
"loss": 0.6716,
"step": 79
},
{
"epoch": 0.93,
"learning_rate": 2.5677259606786686e-07,
"loss": 0.7095,
"step": 80
},
{
"epoch": 0.94,
"learning_rate": 1.7854880295797406e-07,
"loss": 0.6823,
"step": 81
},
{
"epoch": 0.95,
"learning_rate": 1.1439414408111471e-07,
"loss": 0.7316,
"step": 82
},
{
"epoch": 0.97,
"learning_rate": 6.440052036815081e-08,
"loss": 0.6417,
"step": 83
},
{
"epoch": 0.98,
"learning_rate": 2.86395472034795e-08,
"loss": 0.6386,
"step": 84
},
{
"epoch": 0.99,
"learning_rate": 7.162451836685291e-09,
"loss": 0.6541,
"step": 85
},
{
"epoch": 1.0,
"learning_rate": 0.0,
"loss": 0.1686,
"step": 86
},
{
"epoch": 1.0,
"step": 86,
"total_flos": 204478356652032.0,
"train_loss": 0.6681410723993945,
"train_runtime": 2105.2364,
"train_samples_per_second": 5.225,
"train_steps_per_second": 0.041
}
],
"max_steps": 86,
"num_train_epochs": 1,
"total_flos": 204478356652032.0,
"trial_name": null,
"trial_params": null
}