|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 20.0, |
|
"eval_steps": 500, |
|
"global_step": 2440, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 4.480478763580322, |
|
"learning_rate": 4.75e-05, |
|
"loss": 0.5517, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.7167919799498746, |
|
"eval_f1": 0.6423852090389206, |
|
"eval_loss": 0.5130985379219055, |
|
"eval_precision": 0.6513043478260869, |
|
"eval_recall": 0.6371158392434988, |
|
"eval_runtime": 1.7122, |
|
"eval_samples_per_second": 233.038, |
|
"eval_steps_per_second": 29.203, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 4.928369998931885, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.4833, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.7518796992481203, |
|
"eval_f1": 0.7158957687522027, |
|
"eval_loss": 0.4657236337661743, |
|
"eval_precision": 0.7087837837837838, |
|
"eval_recall": 0.7294508092380433, |
|
"eval_runtime": 1.7104, |
|
"eval_samples_per_second": 233.285, |
|
"eval_steps_per_second": 29.234, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 4.609431266784668, |
|
"learning_rate": 4.25e-05, |
|
"loss": 0.4318, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8120300751879699, |
|
"eval_f1": 0.7781114447781114, |
|
"eval_loss": 0.40558183193206787, |
|
"eval_precision": 0.7728937728937728, |
|
"eval_recall": 0.784506273867976, |
|
"eval_runtime": 1.7095, |
|
"eval_samples_per_second": 233.398, |
|
"eval_steps_per_second": 29.248, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 5.862994194030762, |
|
"learning_rate": 4e-05, |
|
"loss": 0.3905, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8421052631578947, |
|
"eval_f1": 0.8100071803786705, |
|
"eval_loss": 0.3810623586177826, |
|
"eval_precision": 0.8092466373122624, |
|
"eval_recall": 0.8107837788688852, |
|
"eval_runtime": 1.71, |
|
"eval_samples_per_second": 233.338, |
|
"eval_steps_per_second": 29.24, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 8.350956916809082, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.3626, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.849624060150376, |
|
"eval_f1": 0.8186033824331697, |
|
"eval_loss": 0.36519819498062134, |
|
"eval_precision": 0.8186033824331697, |
|
"eval_recall": 0.8186033824331697, |
|
"eval_runtime": 1.7101, |
|
"eval_samples_per_second": 233.324, |
|
"eval_steps_per_second": 29.239, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 2.2842788696289062, |
|
"learning_rate": 3.5e-05, |
|
"loss": 0.3331, |
|
"step": 732 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.8546365914786967, |
|
"eval_f1": 0.8325179481241316, |
|
"eval_loss": 0.3646066188812256, |
|
"eval_precision": 0.8214046915919095, |
|
"eval_recall": 0.849654482633206, |
|
"eval_runtime": 1.7082, |
|
"eval_samples_per_second": 233.581, |
|
"eval_steps_per_second": 29.271, |
|
"step": 732 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 3.8412091732025146, |
|
"learning_rate": 3.2500000000000004e-05, |
|
"loss": 0.3134, |
|
"step": 854 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.8671679197994987, |
|
"eval_f1": 0.8385441718775052, |
|
"eval_loss": 0.34403207898139954, |
|
"eval_precision": 0.8412280701754387, |
|
"eval_recall": 0.8360156392071285, |
|
"eval_runtime": 1.7096, |
|
"eval_samples_per_second": 233.394, |
|
"eval_steps_per_second": 29.247, |
|
"step": 854 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 5.936851501464844, |
|
"learning_rate": 3e-05, |
|
"loss": 0.2927, |
|
"step": 976 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.8646616541353384, |
|
"eval_f1": 0.8375505157126486, |
|
"eval_loss": 0.3412378430366516, |
|
"eval_precision": 0.8359243697478991, |
|
"eval_recall": 0.8392434988179669, |
|
"eval_runtime": 1.7101, |
|
"eval_samples_per_second": 233.324, |
|
"eval_steps_per_second": 29.239, |
|
"step": 976 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"grad_norm": 10.900829315185547, |
|
"learning_rate": 2.7500000000000004e-05, |
|
"loss": 0.2833, |
|
"step": 1098 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.8646616541353384, |
|
"eval_f1": 0.8383403361344538, |
|
"eval_loss": 0.3352757394313812, |
|
"eval_precision": 0.8352012604792199, |
|
"eval_recall": 0.8417439534460811, |
|
"eval_runtime": 1.709, |
|
"eval_samples_per_second": 233.472, |
|
"eval_steps_per_second": 29.257, |
|
"step": 1098 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 5.930834770202637, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.2672, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.8671679197994987, |
|
"eval_f1": 0.8431987543098654, |
|
"eval_loss": 0.3296276330947876, |
|
"eval_precision": 0.8366910866910866, |
|
"eval_recall": 0.8510183669758138, |
|
"eval_runtime": 1.7081, |
|
"eval_samples_per_second": 233.589, |
|
"eval_steps_per_second": 29.272, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"grad_norm": 8.893163681030273, |
|
"learning_rate": 2.25e-05, |
|
"loss": 0.2641, |
|
"step": 1342 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy": 0.8771929824561403, |
|
"eval_f1": 0.8483536940081443, |
|
"eval_loss": 0.3269977569580078, |
|
"eval_precision": 0.8575792287132493, |
|
"eval_recall": 0.8406073831605747, |
|
"eval_runtime": 1.7082, |
|
"eval_samples_per_second": 233.574, |
|
"eval_steps_per_second": 29.27, |
|
"step": 1342 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"grad_norm": 21.00832748413086, |
|
"learning_rate": 2e-05, |
|
"loss": 0.2549, |
|
"step": 1464 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.8696741854636592, |
|
"eval_f1": 0.8349560901107294, |
|
"eval_loss": 0.33516642451286316, |
|
"eval_precision": 0.8558080808080808, |
|
"eval_recall": 0.8202855064557192, |
|
"eval_runtime": 1.708, |
|
"eval_samples_per_second": 233.601, |
|
"eval_steps_per_second": 29.273, |
|
"step": 1464 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"grad_norm": 2.7979023456573486, |
|
"learning_rate": 1.75e-05, |
|
"loss": 0.2534, |
|
"step": 1586 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_accuracy": 0.8696741854636592, |
|
"eval_f1": 0.8329898898834439, |
|
"eval_loss": 0.34021762013435364, |
|
"eval_precision": 0.8601973684210527, |
|
"eval_recall": 0.8152845971994909, |
|
"eval_runtime": 1.7123, |
|
"eval_samples_per_second": 233.024, |
|
"eval_steps_per_second": 29.201, |
|
"step": 1586 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"grad_norm": 10.50624942779541, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.2389, |
|
"step": 1708 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_accuracy": 0.8822055137844611, |
|
"eval_f1": 0.858259325044405, |
|
"eval_loss": 0.3207694888114929, |
|
"eval_precision": 0.8573798178418481, |
|
"eval_recall": 0.8591562102200401, |
|
"eval_runtime": 1.7089, |
|
"eval_samples_per_second": 233.48, |
|
"eval_steps_per_second": 29.258, |
|
"step": 1708 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"grad_norm": 3.9172396659851074, |
|
"learning_rate": 1.25e-05, |
|
"loss": 0.2203, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy": 0.87468671679198, |
|
"eval_f1": 0.8422176526415692, |
|
"eval_loss": 0.3279098570346832, |
|
"eval_precision": 0.8604724566416373, |
|
"eval_recall": 0.8288325150027278, |
|
"eval_runtime": 1.7092, |
|
"eval_samples_per_second": 233.448, |
|
"eval_steps_per_second": 29.254, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"grad_norm": 3.250375986099243, |
|
"learning_rate": 1e-05, |
|
"loss": 0.2298, |
|
"step": 1952 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.87468671679198, |
|
"eval_f1": 0.8448388501742161, |
|
"eval_loss": 0.3175312280654907, |
|
"eval_precision": 0.8551721930610677, |
|
"eval_recall": 0.8363338788870704, |
|
"eval_runtime": 1.7091, |
|
"eval_samples_per_second": 233.452, |
|
"eval_steps_per_second": 29.255, |
|
"step": 1952 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"grad_norm": 0.9103662967681885, |
|
"learning_rate": 7.5e-06, |
|
"loss": 0.2227, |
|
"step": 2074 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_accuracy": 0.87468671679198, |
|
"eval_f1": 0.843111041207927, |
|
"eval_loss": 0.32178837060928345, |
|
"eval_precision": 0.8585673051692468, |
|
"eval_recall": 0.831332969630842, |
|
"eval_runtime": 1.7084, |
|
"eval_samples_per_second": 233.545, |
|
"eval_steps_per_second": 29.266, |
|
"step": 2074 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"grad_norm": 7.885207176208496, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2225, |
|
"step": 2196 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_accuracy": 0.8771929824561403, |
|
"eval_f1": 0.8514869535493182, |
|
"eval_loss": 0.3177903890609741, |
|
"eval_precision": 0.8523821128305106, |
|
"eval_recall": 0.8506092016730314, |
|
"eval_runtime": 1.709, |
|
"eval_samples_per_second": 233.472, |
|
"eval_steps_per_second": 29.257, |
|
"step": 2196 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"grad_norm": 4.890872478485107, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.2192, |
|
"step": 2318 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_accuracy": 0.8771929824561403, |
|
"eval_f1": 0.846679266293906, |
|
"eval_loss": 0.3198798894882202, |
|
"eval_precision": 0.8609191655801824, |
|
"eval_recall": 0.8356064739043463, |
|
"eval_runtime": 1.7082, |
|
"eval_samples_per_second": 233.574, |
|
"eval_steps_per_second": 29.27, |
|
"step": 2318 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"grad_norm": 4.476297378540039, |
|
"learning_rate": 0.0, |
|
"loss": 0.2229, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy": 0.8771929824561403, |
|
"eval_f1": 0.846679266293906, |
|
"eval_loss": 0.3205019235610962, |
|
"eval_precision": 0.8609191655801824, |
|
"eval_recall": 0.8356064739043463, |
|
"eval_runtime": 1.7082, |
|
"eval_samples_per_second": 233.579, |
|
"eval_steps_per_second": 29.271, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"step": 2440, |
|
"total_flos": 7662265464912000.0, |
|
"train_loss": 0.3029043838626049, |
|
"train_runtime": 615.7228, |
|
"train_samples_per_second": 118.17, |
|
"train_steps_per_second": 3.963 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 2440, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 20, |
|
"save_steps": 500, |
|
"total_flos": 7662265464912000.0, |
|
"train_batch_size": 30, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|