File size: 5,167 Bytes
19199c9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 12120,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.41254125412541254,
"grad_norm": 2.3108015060424805,
"learning_rate": 4.793729372937294e-05,
"loss": 2.2146,
"step": 500
},
{
"epoch": 0.8250825082508251,
"grad_norm": 2.0797488689422607,
"learning_rate": 4.5874587458745876e-05,
"loss": 2.1084,
"step": 1000
},
{
"epoch": 1.2376237623762376,
"grad_norm": 3.0814099311828613,
"learning_rate": 4.3811881188118816e-05,
"loss": 2.0322,
"step": 1500
},
{
"epoch": 1.6501650165016502,
"grad_norm": 2.495213508605957,
"learning_rate": 4.174917491749175e-05,
"loss": 2.0345,
"step": 2000
},
{
"epoch": 2.062706270627063,
"grad_norm": 2.1899046897888184,
"learning_rate": 3.968646864686469e-05,
"loss": 2.0033,
"step": 2500
},
{
"epoch": 2.4752475247524752,
"grad_norm": 1.9743722677230835,
"learning_rate": 3.762376237623763e-05,
"loss": 1.9696,
"step": 3000
},
{
"epoch": 2.887788778877888,
"grad_norm": 2.0741982460021973,
"learning_rate": 3.556105610561056e-05,
"loss": 1.9576,
"step": 3500
},
{
"epoch": 3.3003300330033003,
"grad_norm": 2.478909492492676,
"learning_rate": 3.34983498349835e-05,
"loss": 1.9328,
"step": 4000
},
{
"epoch": 3.7128712871287126,
"grad_norm": 1.9436851739883423,
"learning_rate": 3.1435643564356435e-05,
"loss": 1.9219,
"step": 4500
},
{
"epoch": 4.125412541254126,
"grad_norm": 2.5008606910705566,
"learning_rate": 2.9372937293729375e-05,
"loss": 1.9088,
"step": 5000
},
{
"epoch": 4.537953795379538,
"grad_norm": 2.509181499481201,
"learning_rate": 2.731023102310231e-05,
"loss": 1.9007,
"step": 5500
},
{
"epoch": 4.9504950495049505,
"grad_norm": 2.128865957260132,
"learning_rate": 2.5247524752475248e-05,
"loss": 1.8946,
"step": 6000
},
{
"epoch": 5.363036303630363,
"grad_norm": 2.622591972351074,
"learning_rate": 2.3184818481848185e-05,
"loss": 1.8833,
"step": 6500
},
{
"epoch": 5.775577557755776,
"grad_norm": 2.249598264694214,
"learning_rate": 2.1122112211221125e-05,
"loss": 1.8689,
"step": 7000
},
{
"epoch": 6.188118811881188,
"grad_norm": 2.382103681564331,
"learning_rate": 1.905940594059406e-05,
"loss": 1.8549,
"step": 7500
},
{
"epoch": 6.600660066006601,
"grad_norm": 2.8902101516723633,
"learning_rate": 1.6996699669966998e-05,
"loss": 1.8497,
"step": 8000
},
{
"epoch": 7.013201320132013,
"grad_norm": 3.372351884841919,
"learning_rate": 1.4933993399339935e-05,
"loss": 1.8578,
"step": 8500
},
{
"epoch": 7.425742574257426,
"grad_norm": 2.1681647300720215,
"learning_rate": 1.2871287128712873e-05,
"loss": 1.8376,
"step": 9000
},
{
"epoch": 7.838283828382838,
"grad_norm": 2.148872137069702,
"learning_rate": 1.080858085808581e-05,
"loss": 1.846,
"step": 9500
},
{
"epoch": 8.250825082508252,
"grad_norm": 2.4388763904571533,
"learning_rate": 8.745874587458746e-06,
"loss": 1.825,
"step": 10000
},
{
"epoch": 8.663366336633663,
"grad_norm": 3.16158127784729,
"learning_rate": 6.6831683168316835e-06,
"loss": 1.8288,
"step": 10500
},
{
"epoch": 9.075907590759076,
"grad_norm": 2.0919010639190674,
"learning_rate": 4.62046204620462e-06,
"loss": 1.8287,
"step": 11000
},
{
"epoch": 9.488448844884488,
"grad_norm": 2.2496068477630615,
"learning_rate": 2.557755775577558e-06,
"loss": 1.8293,
"step": 11500
},
{
"epoch": 9.900990099009901,
"grad_norm": 3.0332181453704834,
"learning_rate": 4.950495049504951e-07,
"loss": 1.8216,
"step": 12000
},
{
"epoch": 10.0,
"step": 12120,
"total_flos": 1.1470348787122176e+16,
"train_loss": 1.9159377371910775,
"train_runtime": 858.1835,
"train_samples_per_second": 112.948,
"train_steps_per_second": 14.123
}
],
"logging_steps": 500,
"max_steps": 12120,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.1470348787122176e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}
|