|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.25773554939466975, |
|
"global_step": 18500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.965170871703423e-05, |
|
"loss": 3.2187, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.9303417434068464e-05, |
|
"loss": 3.2319, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.8955126151102695e-05, |
|
"loss": 3.2003, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.8606834868136925e-05, |
|
"loss": 3.2206, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.825854358517115e-05, |
|
"loss": 3.2168, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.791025230220538e-05, |
|
"loss": 3.2134, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 4.756196101923962e-05, |
|
"loss": 3.2317, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.721366973627384e-05, |
|
"loss": 3.2235, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.686537845330807e-05, |
|
"loss": 3.2294, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.65170871703423e-05, |
|
"loss": 3.225, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.6168795887376534e-05, |
|
"loss": 3.2082, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.5820504604410765e-05, |
|
"loss": 3.2202, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.5472213321444996e-05, |
|
"loss": 3.221, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.512392203847922e-05, |
|
"loss": 3.2042, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.477563075551345e-05, |
|
"loss": 3.1977, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.442733947254769e-05, |
|
"loss": 3.1891, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.407904818958191e-05, |
|
"loss": 3.2053, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.373075690661614e-05, |
|
"loss": 3.2168, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.3382465623650374e-05, |
|
"loss": 3.2138, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.3034174340684604e-05, |
|
"loss": 3.1905, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.2685883057718835e-05, |
|
"loss": 3.1923, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.2337591774753066e-05, |
|
"loss": 3.1953, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.198930049178729e-05, |
|
"loss": 3.1958, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.164100920882153e-05, |
|
"loss": 3.1885, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.129271792585576e-05, |
|
"loss": 3.1701, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.094442664288998e-05, |
|
"loss": 3.1908, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.059613535992421e-05, |
|
"loss": 3.1618, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.0247844076958444e-05, |
|
"loss": 3.1917, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 3.9899552793992674e-05, |
|
"loss": 3.1812, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 3.9551261511026905e-05, |
|
"loss": 3.1794, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 3.9202970228061136e-05, |
|
"loss": 3.1902, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 3.885467894509536e-05, |
|
"loss": 3.18, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 3.85063876621296e-05, |
|
"loss": 3.1849, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 3.815809637916383e-05, |
|
"loss": 3.1857, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 3.780980509619805e-05, |
|
"loss": 3.1577, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 3.746151381323228e-05, |
|
"loss": 3.1878, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 3.7113222530266514e-05, |
|
"loss": 3.1512, |
|
"step": 18500 |
|
} |
|
], |
|
"max_steps": 71779, |
|
"num_train_epochs": 1, |
|
"total_flos": 3.912747299426304e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|