zephyr-2b-gemma-dft-debug / trainer_state.json
ale-bay's picture
Model save
4460366 verified
raw
history blame
9.89 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9981515711645101,
"eval_steps": 500,
"global_step": 270,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.018484288354898338,
"grad_norm": 422.0,
"learning_rate": 3.7037037037037037e-06,
"loss": 20.7086,
"step": 5
},
{
"epoch": 0.036968576709796676,
"grad_norm": 185.0,
"learning_rate": 7.4074074074074075e-06,
"loss": 19.253,
"step": 10
},
{
"epoch": 0.05545286506469501,
"grad_norm": 147.0,
"learning_rate": 1.1111111111111113e-05,
"loss": 16.7138,
"step": 15
},
{
"epoch": 0.07393715341959335,
"grad_norm": 62.75,
"learning_rate": 1.4814814814814815e-05,
"loss": 14.0999,
"step": 20
},
{
"epoch": 0.09242144177449169,
"grad_norm": 46.25,
"learning_rate": 1.851851851851852e-05,
"loss": 11.9456,
"step": 25
},
{
"epoch": 0.11090573012939002,
"grad_norm": 49.75,
"learning_rate": 1.9992479525042305e-05,
"loss": 10.3044,
"step": 30
},
{
"epoch": 0.12939001848428835,
"grad_norm": 88.5,
"learning_rate": 1.9946562024066018e-05,
"loss": 9.602,
"step": 35
},
{
"epoch": 0.1478743068391867,
"grad_norm": 49.25,
"learning_rate": 1.9859096633447965e-05,
"loss": 9.1612,
"step": 40
},
{
"epoch": 0.16635859519408502,
"grad_norm": 114.0,
"learning_rate": 1.973044870579824e-05,
"loss": 8.5549,
"step": 45
},
{
"epoch": 0.18484288354898337,
"grad_norm": 54.75,
"learning_rate": 1.95611556177388e-05,
"loss": 8.3168,
"step": 50
},
{
"epoch": 0.2033271719038817,
"grad_norm": 16.875,
"learning_rate": 1.93519245252219e-05,
"loss": 7.8042,
"step": 55
},
{
"epoch": 0.22181146025878004,
"grad_norm": 55.75,
"learning_rate": 1.9103629409661468e-05,
"loss": 7.4335,
"step": 60
},
{
"epoch": 0.24029574861367836,
"grad_norm": 17.5,
"learning_rate": 1.881730742721608e-05,
"loss": 7.4047,
"step": 65
},
{
"epoch": 0.2587800369685767,
"grad_norm": 52.5,
"learning_rate": 1.8494154576472976e-05,
"loss": 7.0682,
"step": 70
},
{
"epoch": 0.27726432532347506,
"grad_norm": 69.5,
"learning_rate": 1.8135520702629677e-05,
"loss": 7.1519,
"step": 75
},
{
"epoch": 0.2957486136783734,
"grad_norm": 23.125,
"learning_rate": 1.7742903859041324e-05,
"loss": 6.9284,
"step": 80
},
{
"epoch": 0.3142329020332717,
"grad_norm": 17.25,
"learning_rate": 1.7317944049686125e-05,
"loss": 6.723,
"step": 85
},
{
"epoch": 0.33271719038817005,
"grad_norm": 103.5,
"learning_rate": 1.686241637868734e-05,
"loss": 6.6468,
"step": 90
},
{
"epoch": 0.3512014787430684,
"grad_norm": 17.625,
"learning_rate": 1.637822363550706e-05,
"loss": 6.5427,
"step": 95
},
{
"epoch": 0.36968576709796674,
"grad_norm": 15.6875,
"learning_rate": 1.586738834678418e-05,
"loss": 6.3795,
"step": 100
},
{
"epoch": 0.38817005545286504,
"grad_norm": 17.875,
"learning_rate": 1.5332044328016916e-05,
"loss": 6.4565,
"step": 105
},
{
"epoch": 0.4066543438077634,
"grad_norm": 68.0,
"learning_rate": 1.4774427770379492e-05,
"loss": 6.416,
"step": 110
},
{
"epoch": 0.42513863216266173,
"grad_norm": 71.5,
"learning_rate": 1.4196867899904292e-05,
"loss": 6.2522,
"step": 115
},
{
"epoch": 0.4436229205175601,
"grad_norm": 28.875,
"learning_rate": 1.3601777248047105e-05,
"loss": 6.0918,
"step": 120
},
{
"epoch": 0.46210720887245843,
"grad_norm": 25.75,
"learning_rate": 1.2991641574276419e-05,
"loss": 6.0943,
"step": 125
},
{
"epoch": 0.4805914972273567,
"grad_norm": 24.375,
"learning_rate": 1.2369009482781191e-05,
"loss": 6.0615,
"step": 130
},
{
"epoch": 0.49907578558225507,
"grad_norm": 53.0,
"learning_rate": 1.1736481776669307e-05,
"loss": 5.9591,
"step": 135
},
{
"epoch": 0.5175600739371534,
"grad_norm": 93.5,
"learning_rate": 1.1096700594125318e-05,
"loss": 6.0178,
"step": 140
},
{
"epoch": 0.5360443622920518,
"grad_norm": 14.0625,
"learning_rate": 1.0452338371907065e-05,
"loss": 5.8597,
"step": 145
},
{
"epoch": 0.5545286506469501,
"grad_norm": 102.0,
"learning_rate": 9.806086682281759e-06,
"loss": 5.8942,
"step": 150
},
{
"epoch": 0.5730129390018485,
"grad_norm": 30.125,
"learning_rate": 9.160644990030932e-06,
"loss": 5.9999,
"step": 155
},
{
"epoch": 0.5914972273567468,
"grad_norm": 23.25,
"learning_rate": 8.518709376487515e-06,
"loss": 5.9019,
"step": 160
},
{
"epoch": 0.609981515711645,
"grad_norm": 56.0,
"learning_rate": 7.882961277705897e-06,
"loss": 5.8274,
"step": 165
},
{
"epoch": 0.6284658040665434,
"grad_norm": 10.1875,
"learning_rate": 7.256056283806987e-06,
"loss": 5.9167,
"step": 170
},
{
"epoch": 0.6469500924214417,
"grad_norm": 26.75,
"learning_rate": 6.640613046284581e-06,
"loss": 5.9097,
"step": 175
},
{
"epoch": 0.6654343807763401,
"grad_norm": 11.5625,
"learning_rate": 6.039202339608432e-06,
"loss": 5.6327,
"step": 180
},
{
"epoch": 0.6839186691312384,
"grad_norm": 26.75,
"learning_rate": 5.454336322814995e-06,
"loss": 5.6804,
"step": 185
},
{
"epoch": 0.7024029574861368,
"grad_norm": 30.375,
"learning_rate": 4.888458045941269e-06,
"loss": 5.7316,
"step": 190
},
{
"epoch": 0.7208872458410351,
"grad_norm": 39.5,
"learning_rate": 4.343931245134616e-06,
"loss": 5.7299,
"step": 195
},
{
"epoch": 0.7393715341959335,
"grad_norm": 11.75,
"learning_rate": 3.823030469065431e-06,
"loss": 5.6982,
"step": 200
},
{
"epoch": 0.7578558225508318,
"grad_norm": 20.625,
"learning_rate": 3.3279315778858034e-06,
"loss": 5.862,
"step": 205
},
{
"epoch": 0.7763401109057301,
"grad_norm": 50.75,
"learning_rate": 2.8607026544210115e-06,
"loss": 5.7507,
"step": 210
},
{
"epoch": 0.7948243992606284,
"grad_norm": 49.75,
"learning_rate": 2.423295365558821e-06,
"loss": 5.7101,
"step": 215
},
{
"epoch": 0.8133086876155268,
"grad_norm": 14.4375,
"learning_rate": 2.01753680992107e-06,
"loss": 5.8428,
"step": 220
},
{
"epoch": 0.8317929759704251,
"grad_norm": 34.75,
"learning_rate": 1.6451218858706374e-06,
"loss": 5.7162,
"step": 225
},
{
"epoch": 0.8502772643253235,
"grad_norm": 47.0,
"learning_rate": 1.307606211733522e-06,
"loss": 5.6621,
"step": 230
},
{
"epoch": 0.8687615526802218,
"grad_norm": 12.875,
"learning_rate": 1.0063996278090704e-06,
"loss": 5.6616,
"step": 235
},
{
"epoch": 0.8872458410351202,
"grad_norm": 13.1875,
"learning_rate": 7.427603073110967e-07,
"loss": 5.8559,
"step": 240
},
{
"epoch": 0.9057301293900185,
"grad_norm": 7.96875,
"learning_rate": 5.177895008392353e-07,
"loss": 5.7149,
"step": 245
},
{
"epoch": 0.9242144177449169,
"grad_norm": 61.75,
"learning_rate": 3.3242693633337986e-07,
"loss": 5.9011,
"step": 250
},
{
"epoch": 0.9426987060998152,
"grad_norm": 13.625,
"learning_rate": 1.874468937261531e-07,
"loss": 5.685,
"step": 255
},
{
"epoch": 0.9611829944547134,
"grad_norm": 15.6875,
"learning_rate": 8.345497068998897e-08,
"loss": 5.7765,
"step": 260
},
{
"epoch": 0.9796672828096118,
"grad_norm": 18.5,
"learning_rate": 2.088555298867978e-08,
"loss": 5.734,
"step": 265
},
{
"epoch": 0.9981515711645101,
"grad_norm": 28.25,
"learning_rate": 0.0,
"loss": 5.7909,
"step": 270
},
{
"epoch": 0.9981515711645101,
"eval_loss": 5.75302791595459,
"eval_runtime": 376.2306,
"eval_samples_per_second": 2.581,
"eval_steps_per_second": 0.324,
"step": 270
},
{
"epoch": 0.9981515711645101,
"step": 270,
"total_flos": 5.260333472022528e+16,
"train_loss": 7.3803352285314485,
"train_runtime": 2516.7229,
"train_samples_per_second": 3.438,
"train_steps_per_second": 0.107
}
],
"logging_steps": 5,
"max_steps": 270,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.260333472022528e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}