|
{ |
|
"best_metric": 0.6268936991691589, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.40424456796361796, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00808489135927236, |
|
"grad_norm": 1.6052894592285156, |
|
"learning_rate": 5e-05, |
|
"loss": 1.744, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00808489135927236, |
|
"eval_loss": 2.1085262298583984, |
|
"eval_runtime": 3.8396, |
|
"eval_samples_per_second": 13.022, |
|
"eval_steps_per_second": 3.386, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01616978271854472, |
|
"grad_norm": 1.779472827911377, |
|
"learning_rate": 0.0001, |
|
"loss": 1.9097, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02425467407781708, |
|
"grad_norm": 2.0423178672790527, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 1.8782, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.03233956543708944, |
|
"grad_norm": 2.7541654109954834, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 1.709, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0404244567963618, |
|
"grad_norm": 2.0727179050445557, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 1.23, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.04850934815563416, |
|
"grad_norm": 1.5737111568450928, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 1.1274, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.05659423951490652, |
|
"grad_norm": 1.5571566820144653, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 1.0106, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.06467913087417888, |
|
"grad_norm": 1.84938383102417, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 0.9096, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.07276402223345124, |
|
"grad_norm": 0.7360393404960632, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 0.8576, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0808489135927236, |
|
"grad_norm": 0.7223778367042542, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 0.7962, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.08893380495199596, |
|
"grad_norm": 0.6078234314918518, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 0.6922, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.09701869631126832, |
|
"grad_norm": 0.6479470729827881, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 0.5929, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.10510358767054068, |
|
"grad_norm": 0.4767465889453888, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 0.7316, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.11318847902981304, |
|
"grad_norm": 0.49146518111228943, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 0.6829, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.1212733703890854, |
|
"grad_norm": 0.49033933877944946, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 0.7022, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.12935826174835777, |
|
"grad_norm": 0.4863513112068176, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 0.826, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.1374431531076301, |
|
"grad_norm": 0.4957202970981598, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 0.8124, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.14552804446690248, |
|
"grad_norm": 0.4850342571735382, |
|
"learning_rate": 7.75e-05, |
|
"loss": 0.77, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.15361293582617483, |
|
"grad_norm": 0.47507721185684204, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 0.7409, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.1616978271854472, |
|
"grad_norm": 0.4847748577594757, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 0.7319, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.16978271854471955, |
|
"grad_norm": 0.46571776270866394, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 0.7065, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.17786760990399192, |
|
"grad_norm": 0.46438243985176086, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 0.7466, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.18595250126326426, |
|
"grad_norm": 0.43673038482666016, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 0.6795, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.19403739262253664, |
|
"grad_norm": 0.40861475467681885, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 0.6411, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.20212228398180898, |
|
"grad_norm": 0.4549863040447235, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 0.6347, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.20212228398180898, |
|
"eval_loss": 0.6450268030166626, |
|
"eval_runtime": 3.4879, |
|
"eval_samples_per_second": 14.335, |
|
"eval_steps_per_second": 3.727, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.21020717534108135, |
|
"grad_norm": 0.5683823823928833, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 0.7581, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.21829206670035373, |
|
"grad_norm": 0.5637506246566772, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 0.7071, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.22637695805962607, |
|
"grad_norm": 0.44266441464424133, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 0.7379, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.23446184941889844, |
|
"grad_norm": 0.3773636519908905, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 0.7573, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.2425467407781708, |
|
"grad_norm": 0.3675413131713867, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 0.6714, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.25063163213744316, |
|
"grad_norm": 0.37877586483955383, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 0.7193, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.25871652349671553, |
|
"grad_norm": 0.41423583030700684, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 0.6975, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.26680141485598785, |
|
"grad_norm": 0.4509444832801819, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 0.7055, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.2748863062152602, |
|
"grad_norm": 0.4025251567363739, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 0.6764, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.2829711975745326, |
|
"grad_norm": 0.4443969428539276, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 0.6929, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.29105608893380497, |
|
"grad_norm": 0.4087773859500885, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 0.6093, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.2991409802930773, |
|
"grad_norm": 0.40454772114753723, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 0.6058, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.30722587165234966, |
|
"grad_norm": 0.34317758679389954, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 0.5982, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.31531076301162203, |
|
"grad_norm": 0.3430269658565521, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 0.6715, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.3233956543708944, |
|
"grad_norm": 0.3907616138458252, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 0.6747, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3314805457301668, |
|
"grad_norm": 0.35974612832069397, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 0.6912, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.3395654370894391, |
|
"grad_norm": 0.47870934009552, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 0.7493, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.34765032844871147, |
|
"grad_norm": 0.4124900698661804, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 0.6813, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.35573521980798384, |
|
"grad_norm": 0.3971357047557831, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 0.7382, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.3638201111672562, |
|
"grad_norm": 0.4053785800933838, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 0.6815, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.3719050025265285, |
|
"grad_norm": 0.42002072930336, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 0.6561, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.3799898938858009, |
|
"grad_norm": 0.4654466211795807, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 0.6316, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.3880747852450733, |
|
"grad_norm": 0.4234202802181244, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 0.6168, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.39615967660434565, |
|
"grad_norm": 0.49169376492500305, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 0.5986, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.40424456796361796, |
|
"grad_norm": 0.5422549843788147, |
|
"learning_rate": 1e-05, |
|
"loss": 0.5929, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.40424456796361796, |
|
"eval_loss": 0.6268936991691589, |
|
"eval_runtime": 3.4738, |
|
"eval_samples_per_second": 14.393, |
|
"eval_steps_per_second": 3.742, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.968083617316864e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|