nicoboss's picture
Upload folder using huggingface_hub
526003b verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.6637554585152838,
"eval_steps": 500,
"global_step": 38,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.017467248908296942,
"grad_norm": 0.12968653440475464,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.3378,
"step": 1
},
{
"epoch": 0.034934497816593885,
"grad_norm": 0.1782296746969223,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.2777,
"step": 2
},
{
"epoch": 0.05240174672489083,
"grad_norm": 0.1216743066906929,
"learning_rate": 3e-06,
"loss": 1.2269,
"step": 3
},
{
"epoch": 0.06986899563318777,
"grad_norm": 0.13824662566184998,
"learning_rate": 4.000000000000001e-06,
"loss": 1.2621,
"step": 4
},
{
"epoch": 0.08733624454148471,
"grad_norm": 0.15446175634860992,
"learning_rate": 5e-06,
"loss": 1.2518,
"step": 5
},
{
"epoch": 0.10480349344978165,
"grad_norm": 0.13843178749084473,
"learning_rate": 6e-06,
"loss": 1.2438,
"step": 6
},
{
"epoch": 0.1222707423580786,
"grad_norm": 0.15130339562892914,
"learning_rate": 7e-06,
"loss": 1.2331,
"step": 7
},
{
"epoch": 0.13973799126637554,
"grad_norm": 0.15838982164859772,
"learning_rate": 8.000000000000001e-06,
"loss": 1.2703,
"step": 8
},
{
"epoch": 0.1572052401746725,
"grad_norm": 0.15755866467952728,
"learning_rate": 9e-06,
"loss": 1.2603,
"step": 9
},
{
"epoch": 0.17467248908296942,
"grad_norm": 0.16667789220809937,
"learning_rate": 1e-05,
"loss": 1.2428,
"step": 10
},
{
"epoch": 0.19213973799126638,
"grad_norm": 0.1794327050447464,
"learning_rate": 9.988834393115768e-06,
"loss": 1.2442,
"step": 11
},
{
"epoch": 0.2096069868995633,
"grad_norm": 0.18840420246124268,
"learning_rate": 9.955387440773902e-06,
"loss": 1.2562,
"step": 12
},
{
"epoch": 0.22707423580786026,
"grad_norm": 0.24771647155284882,
"learning_rate": 9.899808525182935e-06,
"loss": 1.2056,
"step": 13
},
{
"epoch": 0.2445414847161572,
"grad_norm": 0.2212584763765335,
"learning_rate": 9.822345875271884e-06,
"loss": 1.2462,
"step": 14
},
{
"epoch": 0.26200873362445415,
"grad_norm": 0.2274945080280304,
"learning_rate": 9.723345458039595e-06,
"loss": 1.262,
"step": 15
},
{
"epoch": 0.2794759825327511,
"grad_norm": 0.22981807589530945,
"learning_rate": 9.603249433382145e-06,
"loss": 1.2918,
"step": 16
},
{
"epoch": 0.29694323144104806,
"grad_norm": 0.27434590458869934,
"learning_rate": 9.462594179299408e-06,
"loss": 1.2858,
"step": 17
},
{
"epoch": 0.314410480349345,
"grad_norm": 0.23107963800430298,
"learning_rate": 9.302007896300697e-06,
"loss": 1.1679,
"step": 18
},
{
"epoch": 0.3318777292576419,
"grad_norm": 0.23975740373134613,
"learning_rate": 9.122207801708802e-06,
"loss": 1.2119,
"step": 19
},
{
"epoch": 0.34934497816593885,
"grad_norm": 0.21921472251415253,
"learning_rate": 8.923996926393306e-06,
"loss": 1.1735,
"step": 20
},
{
"epoch": 0.36681222707423583,
"grad_norm": 0.22271591424942017,
"learning_rate": 8.708260528239788e-06,
"loss": 1.2749,
"step": 21
},
{
"epoch": 0.38427947598253276,
"grad_norm": 0.20965439081192017,
"learning_rate": 8.475962138373212e-06,
"loss": 1.25,
"step": 22
},
{
"epoch": 0.4017467248908297,
"grad_norm": 0.17370395362377167,
"learning_rate": 8.228139257794012e-06,
"loss": 1.1968,
"step": 23
},
{
"epoch": 0.4192139737991266,
"grad_norm": 0.19179144501686096,
"learning_rate": 7.965898723646777e-06,
"loss": 1.169,
"step": 24
},
{
"epoch": 0.4366812227074236,
"grad_norm": 0.1869051307439804,
"learning_rate": 7.690411765816864e-06,
"loss": 1.199,
"step": 25
},
{
"epoch": 0.45414847161572053,
"grad_norm": 0.17581412196159363,
"learning_rate": 7.402908775933419e-06,
"loss": 1.126,
"step": 26
},
{
"epoch": 0.47161572052401746,
"grad_norm": 0.1623380184173584,
"learning_rate": 7.104673812141676e-06,
"loss": 1.1392,
"step": 27
},
{
"epoch": 0.4890829694323144,
"grad_norm": 0.19948747754096985,
"learning_rate": 6.797038864187564e-06,
"loss": 1.0956,
"step": 28
},
{
"epoch": 0.5065502183406113,
"grad_norm": 0.1480165272951126,
"learning_rate": 6.481377904428171e-06,
"loss": 1.1984,
"step": 29
},
{
"epoch": 0.5240174672489083,
"grad_norm": 0.15460729598999023,
"learning_rate": 6.1591007513376425e-06,
"loss": 1.1759,
"step": 30
},
{
"epoch": 0.5414847161572053,
"grad_norm": 0.13603241741657257,
"learning_rate": 5.831646772915651e-06,
"loss": 1.182,
"step": 31
},
{
"epoch": 0.5589519650655022,
"grad_norm": 0.15064558386802673,
"learning_rate": 5.500478458120493e-06,
"loss": 1.1191,
"step": 32
},
{
"epoch": 0.5764192139737991,
"grad_norm": 0.1394384205341339,
"learning_rate": 5.1670748850383734e-06,
"loss": 1.1359,
"step": 33
},
{
"epoch": 0.5938864628820961,
"grad_norm": 0.12493956089019775,
"learning_rate": 4.832925114961629e-06,
"loss": 1.1747,
"step": 34
},
{
"epoch": 0.611353711790393,
"grad_norm": 0.14052054286003113,
"learning_rate": 4.499521541879508e-06,
"loss": 1.1116,
"step": 35
},
{
"epoch": 0.62882096069869,
"grad_norm": 0.1254061758518219,
"learning_rate": 4.1683532270843505e-06,
"loss": 1.1836,
"step": 36
},
{
"epoch": 0.6462882096069869,
"grad_norm": 0.1256641447544098,
"learning_rate": 3.840899248662358e-06,
"loss": 1.1166,
"step": 37
},
{
"epoch": 0.6637554585152838,
"grad_norm": 0.22417481243610382,
"learning_rate": 3.518622095571831e-06,
"loss": 1.1714,
"step": 38
}
],
"logging_steps": 1,
"max_steps": 57,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 19,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.9396478814173266e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}