|
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 3.0,
|
|
"eval_steps": 500,
|
|
"global_step": 282,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.10638297872340426,
|
|
"grad_norm": 3.6553943157196045,
|
|
"learning_rate": 4.840425531914894e-05,
|
|
"loss": 0.9456,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.2127659574468085,
|
|
"grad_norm": 3.9983913898468018,
|
|
"learning_rate": 4.663120567375887e-05,
|
|
"loss": 0.8552,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 0.3191489361702128,
|
|
"grad_norm": 3.7922072410583496,
|
|
"learning_rate": 4.48581560283688e-05,
|
|
"loss": 1.0052,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 0.425531914893617,
|
|
"grad_norm": 3.6140127182006836,
|
|
"learning_rate": 4.3262411347517734e-05,
|
|
"loss": 0.9121,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 0.5319148936170213,
|
|
"grad_norm": 3.507614850997925,
|
|
"learning_rate": 4.148936170212766e-05,
|
|
"loss": 0.8598,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 0.6382978723404256,
|
|
"grad_norm": 4.006348133087158,
|
|
"learning_rate": 3.971631205673759e-05,
|
|
"loss": 0.9708,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 0.7446808510638298,
|
|
"grad_norm": 4.814829349517822,
|
|
"learning_rate": 3.794326241134752e-05,
|
|
"loss": 0.8753,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 0.851063829787234,
|
|
"grad_norm": 4.398285388946533,
|
|
"learning_rate": 3.617021276595745e-05,
|
|
"loss": 0.9466,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 0.9574468085106383,
|
|
"grad_norm": 4.186042785644531,
|
|
"learning_rate": 3.4397163120567377e-05,
|
|
"loss": 0.9408,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 1.0,
|
|
"eval_loss": 2.226564645767212,
|
|
"eval_runtime": 9.5187,
|
|
"eval_samples_per_second": 4.412,
|
|
"eval_steps_per_second": 1.156,
|
|
"step": 94
|
|
},
|
|
{
|
|
"epoch": 1.0638297872340425,
|
|
"grad_norm": 3.774606227874756,
|
|
"learning_rate": 3.262411347517731e-05,
|
|
"loss": 0.8182,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 1.1702127659574468,
|
|
"grad_norm": 3.641083240509033,
|
|
"learning_rate": 3.085106382978723e-05,
|
|
"loss": 0.7849,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 1.2765957446808511,
|
|
"grad_norm": 5.060397624969482,
|
|
"learning_rate": 2.9078014184397162e-05,
|
|
"loss": 0.7795,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 1.3829787234042552,
|
|
"grad_norm": 3.457118272781372,
|
|
"learning_rate": 2.7304964539007094e-05,
|
|
"loss": 0.723,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 1.4893617021276595,
|
|
"grad_norm": 3.421212673187256,
|
|
"learning_rate": 2.5531914893617022e-05,
|
|
"loss": 0.6819,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 1.5957446808510638,
|
|
"grad_norm": 3.1989083290100098,
|
|
"learning_rate": 2.3758865248226954e-05,
|
|
"loss": 0.6891,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 1.702127659574468,
|
|
"grad_norm": 3.486337661743164,
|
|
"learning_rate": 2.1985815602836882e-05,
|
|
"loss": 0.7369,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 1.8085106382978724,
|
|
"grad_norm": 3.4989173412323,
|
|
"learning_rate": 2.0212765957446807e-05,
|
|
"loss": 0.7042,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 1.9148936170212765,
|
|
"grad_norm": 2.965428590774536,
|
|
"learning_rate": 1.8439716312056736e-05,
|
|
"loss": 0.712,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_loss": 2.416543483734131,
|
|
"eval_runtime": 9.5843,
|
|
"eval_samples_per_second": 4.382,
|
|
"eval_steps_per_second": 1.148,
|
|
"step": 188
|
|
},
|
|
{
|
|
"epoch": 2.021276595744681,
|
|
"grad_norm": 4.359320640563965,
|
|
"learning_rate": 1.6666666666666667e-05,
|
|
"loss": 0.7045,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 2.127659574468085,
|
|
"grad_norm": 3.513986825942993,
|
|
"learning_rate": 1.4893617021276596e-05,
|
|
"loss": 0.6259,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 2.2340425531914896,
|
|
"grad_norm": 4.127784252166748,
|
|
"learning_rate": 1.3120567375886524e-05,
|
|
"loss": 0.6355,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 2.3404255319148937,
|
|
"grad_norm": 3.798154592514038,
|
|
"learning_rate": 1.1347517730496454e-05,
|
|
"loss": 0.5645,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 2.4468085106382977,
|
|
"grad_norm": 3.0239367485046387,
|
|
"learning_rate": 9.574468085106383e-06,
|
|
"loss": 0.6694,
|
|
"step": 230
|
|
},
|
|
{
|
|
"epoch": 2.5531914893617023,
|
|
"grad_norm": 3.148362398147583,
|
|
"learning_rate": 7.801418439716313e-06,
|
|
"loss": 0.6103,
|
|
"step": 240
|
|
},
|
|
{
|
|
"epoch": 2.6595744680851063,
|
|
"grad_norm": 4.111635684967041,
|
|
"learning_rate": 6.028368794326241e-06,
|
|
"loss": 0.5299,
|
|
"step": 250
|
|
},
|
|
{
|
|
"epoch": 2.7659574468085104,
|
|
"grad_norm": 3.042776107788086,
|
|
"learning_rate": 4.255319148936171e-06,
|
|
"loss": 0.5928,
|
|
"step": 260
|
|
},
|
|
{
|
|
"epoch": 2.872340425531915,
|
|
"grad_norm": 3.5736513137817383,
|
|
"learning_rate": 2.4822695035460995e-06,
|
|
"loss": 0.6444,
|
|
"step": 270
|
|
},
|
|
{
|
|
"epoch": 2.978723404255319,
|
|
"grad_norm": 3.4036762714385986,
|
|
"learning_rate": 7.092198581560284e-07,
|
|
"loss": 0.5531,
|
|
"step": 280
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"eval_loss": 2.504506826400757,
|
|
"eval_runtime": 14.1693,
|
|
"eval_samples_per_second": 2.964,
|
|
"eval_steps_per_second": 0.776,
|
|
"step": 282
|
|
}
|
|
],
|
|
"logging_steps": 10,
|
|
"max_steps": 282,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 3,
|
|
"save_steps": 500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 1047574383427584.0,
|
|
"train_batch_size": 4,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|