GaetanMichelet's picture
Model save
8a14236 verified
raw
history blame
12.2 kB
{
"best_metric": 1.1641393899917603,
"best_model_checkpoint": "data/Llama-31-8B_task-3_60-samples_config-2_full/checkpoint-48",
"epoch": 24.0,
"eval_steps": 500,
"global_step": 69,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.34782608695652173,
"grad_norm": 0.5745601058006287,
"learning_rate": 1e-05,
"loss": 1.7172,
"step": 1
},
{
"epoch": 0.6956521739130435,
"grad_norm": 0.47909945249557495,
"learning_rate": 2e-05,
"loss": 1.6831,
"step": 2
},
{
"epoch": 0.6956521739130435,
"eval_loss": 1.6899796724319458,
"eval_runtime": 11.1827,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 2
},
{
"epoch": 1.391304347826087,
"grad_norm": 0.4458988606929779,
"learning_rate": 4e-05,
"loss": 1.6856,
"step": 4
},
{
"epoch": 1.7391304347826086,
"eval_loss": 1.6448274850845337,
"eval_runtime": 11.1908,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 5
},
{
"epoch": 2.0869565217391304,
"grad_norm": 0.4629712402820587,
"learning_rate": 6e-05,
"loss": 1.671,
"step": 6
},
{
"epoch": 2.782608695652174,
"grad_norm": 0.467144638299942,
"learning_rate": 8e-05,
"loss": 1.6065,
"step": 8
},
{
"epoch": 2.782608695652174,
"eval_loss": 1.5643361806869507,
"eval_runtime": 11.1918,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 8
},
{
"epoch": 3.4782608695652173,
"grad_norm": 0.3479427695274353,
"learning_rate": 0.0001,
"loss": 1.5184,
"step": 10
},
{
"epoch": 3.8260869565217392,
"eval_loss": 1.4830130338668823,
"eval_runtime": 11.1894,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 11
},
{
"epoch": 4.173913043478261,
"grad_norm": 0.23519960045814514,
"learning_rate": 9.987820251299122e-05,
"loss": 1.4882,
"step": 12
},
{
"epoch": 4.869565217391305,
"grad_norm": 0.24616719782352448,
"learning_rate": 9.951340343707852e-05,
"loss": 1.4227,
"step": 14
},
{
"epoch": 4.869565217391305,
"eval_loss": 1.4134116172790527,
"eval_runtime": 11.1856,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 14
},
{
"epoch": 5.565217391304348,
"grad_norm": 0.22191396355628967,
"learning_rate": 9.890738003669029e-05,
"loss": 1.3635,
"step": 16
},
{
"epoch": 5.913043478260869,
"eval_loss": 1.347265362739563,
"eval_runtime": 11.1872,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 17
},
{
"epoch": 6.260869565217392,
"grad_norm": 0.2585182785987854,
"learning_rate": 9.806308479691595e-05,
"loss": 1.3158,
"step": 18
},
{
"epoch": 6.956521739130435,
"grad_norm": 0.26426589488983154,
"learning_rate": 9.698463103929542e-05,
"loss": 1.2487,
"step": 20
},
{
"epoch": 6.956521739130435,
"eval_loss": 1.2793773412704468,
"eval_runtime": 11.1842,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 20
},
{
"epoch": 7.6521739130434785,
"grad_norm": 0.22825951874256134,
"learning_rate": 9.567727288213005e-05,
"loss": 1.2196,
"step": 22
},
{
"epoch": 8.0,
"eval_loss": 1.2357500791549683,
"eval_runtime": 11.1872,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 23
},
{
"epoch": 8.347826086956522,
"grad_norm": 0.18481330573558807,
"learning_rate": 9.414737964294636e-05,
"loss": 1.1686,
"step": 24
},
{
"epoch": 8.695652173913043,
"eval_loss": 1.218384027481079,
"eval_runtime": 11.1871,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 25
},
{
"epoch": 9.043478260869565,
"grad_norm": 0.18732167780399323,
"learning_rate": 9.24024048078213e-05,
"loss": 1.1427,
"step": 26
},
{
"epoch": 9.73913043478261,
"grad_norm": 0.1554836928844452,
"learning_rate": 9.045084971874738e-05,
"loss": 1.1271,
"step": 28
},
{
"epoch": 9.73913043478261,
"eval_loss": 1.204110026359558,
"eval_runtime": 11.187,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 28
},
{
"epoch": 10.434782608695652,
"grad_norm": 0.1382986456155777,
"learning_rate": 8.83022221559489e-05,
"loss": 1.1329,
"step": 30
},
{
"epoch": 10.782608695652174,
"eval_loss": 1.1925348043441772,
"eval_runtime": 11.1891,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 31
},
{
"epoch": 11.130434782608695,
"grad_norm": 0.15797151625156403,
"learning_rate": 8.596699001693255e-05,
"loss": 1.0772,
"step": 32
},
{
"epoch": 11.826086956521738,
"grad_norm": 0.1335517019033432,
"learning_rate": 8.345653031794292e-05,
"loss": 1.1049,
"step": 34
},
{
"epoch": 11.826086956521738,
"eval_loss": 1.1837517023086548,
"eval_runtime": 11.1916,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 34
},
{
"epoch": 12.521739130434783,
"grad_norm": 0.13403163850307465,
"learning_rate": 8.07830737662829e-05,
"loss": 1.067,
"step": 36
},
{
"epoch": 12.869565217391305,
"eval_loss": 1.1764452457427979,
"eval_runtime": 11.1842,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 37
},
{
"epoch": 13.217391304347826,
"grad_norm": 0.14357908070087433,
"learning_rate": 7.795964517353735e-05,
"loss": 1.0779,
"step": 38
},
{
"epoch": 13.91304347826087,
"grad_norm": 0.1504034399986267,
"learning_rate": 7.500000000000001e-05,
"loss": 1.0693,
"step": 40
},
{
"epoch": 13.91304347826087,
"eval_loss": 1.170665979385376,
"eval_runtime": 11.1831,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 40
},
{
"epoch": 14.608695652173914,
"grad_norm": 0.13536736369132996,
"learning_rate": 7.191855733945387e-05,
"loss": 1.039,
"step": 42
},
{
"epoch": 14.956521739130435,
"eval_loss": 1.1671864986419678,
"eval_runtime": 11.1881,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 43
},
{
"epoch": 15.304347826086957,
"grad_norm": 0.1279880404472351,
"learning_rate": 6.873032967079561e-05,
"loss": 1.0399,
"step": 44
},
{
"epoch": 16.0,
"grad_norm": 0.15045784413814545,
"learning_rate": 6.545084971874738e-05,
"loss": 1.0381,
"step": 46
},
{
"epoch": 16.0,
"eval_loss": 1.1651216745376587,
"eval_runtime": 11.1971,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 46
},
{
"epoch": 16.695652173913043,
"grad_norm": 0.14994898438453674,
"learning_rate": 6.209609477998338e-05,
"loss": 0.994,
"step": 48
},
{
"epoch": 16.695652173913043,
"eval_loss": 1.1641393899917603,
"eval_runtime": 11.1852,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 48
},
{
"epoch": 17.391304347826086,
"grad_norm": 0.17326103150844574,
"learning_rate": 5.868240888334653e-05,
"loss": 1.0091,
"step": 50
},
{
"epoch": 17.73913043478261,
"eval_loss": 1.1647818088531494,
"eval_runtime": 11.1861,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 51
},
{
"epoch": 18.08695652173913,
"grad_norm": 0.15706367790699005,
"learning_rate": 5.522642316338268e-05,
"loss": 1.0247,
"step": 52
},
{
"epoch": 18.782608695652176,
"grad_norm": 0.16293032467365265,
"learning_rate": 5.174497483512506e-05,
"loss": 0.996,
"step": 54
},
{
"epoch": 18.782608695652176,
"eval_loss": 1.166734218597412,
"eval_runtime": 11.1882,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 54
},
{
"epoch": 19.47826086956522,
"grad_norm": 0.18445666134357452,
"learning_rate": 4.825502516487497e-05,
"loss": 0.969,
"step": 56
},
{
"epoch": 19.82608695652174,
"eval_loss": 1.1694756746292114,
"eval_runtime": 11.1866,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 57
},
{
"epoch": 20.17391304347826,
"grad_norm": 0.1890542209148407,
"learning_rate": 4.477357683661734e-05,
"loss": 0.9787,
"step": 58
},
{
"epoch": 20.869565217391305,
"grad_norm": 0.1912829726934433,
"learning_rate": 4.131759111665349e-05,
"loss": 0.9577,
"step": 60
},
{
"epoch": 20.869565217391305,
"eval_loss": 1.1710036993026733,
"eval_runtime": 11.1899,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 60
},
{
"epoch": 21.565217391304348,
"grad_norm": 0.2165202498435974,
"learning_rate": 3.790390522001662e-05,
"loss": 0.9489,
"step": 62
},
{
"epoch": 21.91304347826087,
"eval_loss": 1.171953797340393,
"eval_runtime": 11.1865,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 63
},
{
"epoch": 22.26086956521739,
"grad_norm": 0.21156741678714752,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.9391,
"step": 64
},
{
"epoch": 22.956521739130434,
"grad_norm": 0.23810920119285583,
"learning_rate": 3.12696703292044e-05,
"loss": 0.9253,
"step": 66
},
{
"epoch": 22.956521739130434,
"eval_loss": 1.176476001739502,
"eval_runtime": 11.1876,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 66
},
{
"epoch": 23.652173913043477,
"grad_norm": 0.22348417341709137,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.9133,
"step": 68
},
{
"epoch": 24.0,
"eval_loss": 1.180800199508667,
"eval_runtime": 11.185,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 69
},
{
"epoch": 24.0,
"step": 69,
"total_flos": 8.747351023812608e+16,
"train_loss": 1.1726603568464085,
"train_runtime": 3235.2716,
"train_samples_per_second": 0.711,
"train_steps_per_second": 0.031
}
],
"logging_steps": 2,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 7,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.747351023812608e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}