|
{ |
|
"best_metric": 0.8006419539451599, |
|
"best_model_checkpoint": "data/Mistral-7B_task-1_120-samples_config-2_auto/checkpoint-22", |
|
"epoch": 10.909090909090908, |
|
"eval_steps": 500, |
|
"global_step": 60, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.18181818181818182, |
|
"grad_norm": 2.5200440883636475, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 1.0757, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.36363636363636365, |
|
"grad_norm": 2.74460768699646, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 1.0848, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.7272727272727273, |
|
"grad_norm": 2.399035930633545, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 1.0732, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.9090909090909091, |
|
"eval_loss": 1.0410027503967285, |
|
"eval_runtime": 9.9786, |
|
"eval_samples_per_second": 2.405, |
|
"eval_steps_per_second": 2.405, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 1.0909090909090908, |
|
"grad_norm": 2.24735689163208, |
|
"learning_rate": 2.4e-05, |
|
"loss": 1.0927, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 1.4545454545454546, |
|
"grad_norm": 1.4750593900680542, |
|
"learning_rate": 3.2000000000000005e-05, |
|
"loss": 0.9017, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 1.8181818181818183, |
|
"grad_norm": 1.4488487243652344, |
|
"learning_rate": 4e-05, |
|
"loss": 0.9533, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.8651717305183411, |
|
"eval_runtime": 9.9797, |
|
"eval_samples_per_second": 2.405, |
|
"eval_steps_per_second": 2.405, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 2.1818181818181817, |
|
"grad_norm": 1.1370949745178223, |
|
"learning_rate": 4.8e-05, |
|
"loss": 0.7473, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 2.5454545454545454, |
|
"grad_norm": 1.217821478843689, |
|
"learning_rate": 5.6000000000000006e-05, |
|
"loss": 0.8209, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 2.909090909090909, |
|
"grad_norm": 1.2512751817703247, |
|
"learning_rate": 6.400000000000001e-05, |
|
"loss": 0.77, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 2.909090909090909, |
|
"eval_loss": 0.8181025385856628, |
|
"eval_runtime": 9.9892, |
|
"eval_samples_per_second": 2.403, |
|
"eval_steps_per_second": 2.403, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 3.2727272727272725, |
|
"grad_norm": 0.911823034286499, |
|
"learning_rate": 7.2e-05, |
|
"loss": 0.6774, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 3.6363636363636362, |
|
"grad_norm": 0.9840978384017944, |
|
"learning_rate": 8e-05, |
|
"loss": 0.6603, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 1.0185283422470093, |
|
"learning_rate": 8.800000000000001e-05, |
|
"loss": 0.6091, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 0.8006419539451599, |
|
"eval_runtime": 9.9803, |
|
"eval_samples_per_second": 2.405, |
|
"eval_steps_per_second": 2.405, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 4.363636363636363, |
|
"grad_norm": 1.0211368799209595, |
|
"learning_rate": 9.6e-05, |
|
"loss": 0.5531, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 4.7272727272727275, |
|
"grad_norm": 1.279502511024475, |
|
"learning_rate": 9.999512620046522e-05, |
|
"loss": 0.4937, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 4.909090909090909, |
|
"eval_loss": 0.8277094960212708, |
|
"eval_runtime": 9.98, |
|
"eval_samples_per_second": 2.405, |
|
"eval_steps_per_second": 2.405, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 5.090909090909091, |
|
"grad_norm": 1.2927266359329224, |
|
"learning_rate": 9.995614150494293e-05, |
|
"loss": 0.4136, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 5.454545454545454, |
|
"grad_norm": 1.0783287286758423, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 0.3471, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 5.818181818181818, |
|
"grad_norm": 1.5167845487594604, |
|
"learning_rate": 9.976136999909156e-05, |
|
"loss": 0.3093, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 0.9963887333869934, |
|
"eval_runtime": 9.9808, |
|
"eval_samples_per_second": 2.405, |
|
"eval_steps_per_second": 2.405, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 6.181818181818182, |
|
"grad_norm": 1.3760337829589844, |
|
"learning_rate": 9.96057350657239e-05, |
|
"loss": 0.2358, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 6.545454545454545, |
|
"grad_norm": 1.3134976625442505, |
|
"learning_rate": 9.941141907232765e-05, |
|
"loss": 0.1933, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 6.909090909090909, |
|
"grad_norm": 1.4621546268463135, |
|
"learning_rate": 9.917857354066931e-05, |
|
"loss": 0.1731, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 6.909090909090909, |
|
"eval_loss": 1.081476092338562, |
|
"eval_runtime": 9.9877, |
|
"eval_samples_per_second": 2.403, |
|
"eval_steps_per_second": 2.403, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 7.2727272727272725, |
|
"grad_norm": 1.1559796333312988, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 0.1331, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 7.636363636363637, |
|
"grad_norm": 1.1186745166778564, |
|
"learning_rate": 9.859805002892732e-05, |
|
"loss": 0.0815, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 1.7643119096755981, |
|
"learning_rate": 9.825082472361557e-05, |
|
"loss": 0.1045, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 1.3043652772903442, |
|
"eval_runtime": 9.9847, |
|
"eval_samples_per_second": 2.404, |
|
"eval_steps_per_second": 2.404, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 8.363636363636363, |
|
"grad_norm": 1.0151166915893555, |
|
"learning_rate": 9.786597487660337e-05, |
|
"loss": 0.0489, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 8.727272727272727, |
|
"grad_norm": 2.2880821228027344, |
|
"learning_rate": 9.744380058222483e-05, |
|
"loss": 0.0642, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 8.909090909090908, |
|
"eval_loss": 1.3456028699874878, |
|
"eval_runtime": 9.9818, |
|
"eval_samples_per_second": 2.404, |
|
"eval_steps_per_second": 2.404, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 9.090909090909092, |
|
"grad_norm": 1.4389766454696655, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.0681, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 9.454545454545455, |
|
"grad_norm": 0.7696138620376587, |
|
"learning_rate": 9.648882429441257e-05, |
|
"loss": 0.0378, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 9.818181818181818, |
|
"grad_norm": 1.0077182054519653, |
|
"learning_rate": 9.595676696276172e-05, |
|
"loss": 0.0552, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 1.4000715017318726, |
|
"eval_runtime": 9.9796, |
|
"eval_samples_per_second": 2.405, |
|
"eval_steps_per_second": 2.405, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 10.181818181818182, |
|
"grad_norm": 1.0939630270004272, |
|
"learning_rate": 9.538887392664544e-05, |
|
"loss": 0.0368, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 10.545454545454545, |
|
"grad_norm": 1.0606611967086792, |
|
"learning_rate": 9.478558801197065e-05, |
|
"loss": 0.0318, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 10.909090909090908, |
|
"grad_norm": 0.8217382431030273, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 0.0359, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 10.909090909090908, |
|
"eval_loss": 1.489484429359436, |
|
"eval_runtime": 9.984, |
|
"eval_samples_per_second": 2.404, |
|
"eval_steps_per_second": 2.404, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 10.909090909090908, |
|
"step": 60, |
|
"total_flos": 1.5952182167732224e+16, |
|
"train_loss": 0.42697442828988036, |
|
"train_runtime": 1180.2738, |
|
"train_samples_per_second": 3.728, |
|
"train_steps_per_second": 0.212 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 250, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 50, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 7, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.5952182167732224e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|