File size: 3,840 Bytes
0f4ca08 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.1,
"eval_steps": 2000,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 0.9697519540786743,
"learning_rate": 1e-06,
"loss": 0.1629,
"step": 100
},
{
"epoch": 0.01,
"grad_norm": 1.3848124742507935,
"learning_rate": 9.898989898989898e-07,
"loss": 0.14,
"step": 200
},
{
"epoch": 0.01,
"grad_norm": 0.9986572265625,
"learning_rate": 9.797979797979797e-07,
"loss": 0.1354,
"step": 300
},
{
"epoch": 0.02,
"grad_norm": 0.11438798904418945,
"learning_rate": 9.696969696969698e-07,
"loss": 0.1182,
"step": 400
},
{
"epoch": 0.03,
"grad_norm": 0.8548241257667542,
"learning_rate": 9.595959595959596e-07,
"loss": 0.1192,
"step": 500
},
{
"epoch": 0.03,
"grad_norm": 1.5312464237213135,
"learning_rate": 9.494949494949495e-07,
"loss": 0.0997,
"step": 600
},
{
"epoch": 0.04,
"grad_norm": 0.9692059755325317,
"learning_rate": 9.393939393939395e-07,
"loss": 0.102,
"step": 700
},
{
"epoch": 0.04,
"grad_norm": 0.42864611744880676,
"learning_rate": 9.292929292929292e-07,
"loss": 0.0901,
"step": 800
},
{
"epoch": 0.04,
"grad_norm": 0.852543830871582,
"learning_rate": 9.191919191919192e-07,
"loss": 0.0937,
"step": 900
},
{
"epoch": 0.05,
"grad_norm": 0.5718303322792053,
"learning_rate": 9.09090909090909e-07,
"loss": 0.093,
"step": 1000
},
{
"epoch": 0.06,
"grad_norm": 0.9396565556526184,
"learning_rate": 8.98989898989899e-07,
"loss": 0.0892,
"step": 1100
},
{
"epoch": 0.06,
"grad_norm": 0.08157779276371002,
"learning_rate": 8.888888888888888e-07,
"loss": 0.0934,
"step": 1200
},
{
"epoch": 0.07,
"grad_norm": 0.8076322078704834,
"learning_rate": 8.787878787878787e-07,
"loss": 0.0725,
"step": 1300
},
{
"epoch": 0.07,
"grad_norm": 1.5076119899749756,
"learning_rate": 8.686868686868687e-07,
"loss": 0.0835,
"step": 1400
},
{
"epoch": 0.07,
"grad_norm": 1.1567238569259644,
"learning_rate": 8.585858585858586e-07,
"loss": 0.0747,
"step": 1500
},
{
"epoch": 0.08,
"grad_norm": 0.6817927956581116,
"learning_rate": 8.484848484848484e-07,
"loss": 0.0903,
"step": 1600
},
{
"epoch": 0.09,
"grad_norm": 0.6467050313949585,
"learning_rate": 8.383838383838383e-07,
"loss": 0.0721,
"step": 1700
},
{
"epoch": 0.09,
"grad_norm": 1.8435570001602173,
"learning_rate": 8.282828282828283e-07,
"loss": 0.0847,
"step": 1800
},
{
"epoch": 0.1,
"grad_norm": 0.6265794634819031,
"learning_rate": 8.181818181818182e-07,
"loss": 0.0687,
"step": 1900
},
{
"epoch": 0.1,
"grad_norm": 1.360060453414917,
"learning_rate": 8.08080808080808e-07,
"loss": 0.0748,
"step": 2000
},
{
"epoch": 0.1,
"eval_loss": 0.06745574623346329,
"eval_runtime": 304.7718,
"eval_samples_per_second": 3.281,
"eval_steps_per_second": 0.82,
"step": 2000
}
],
"logging_steps": 100,
"max_steps": 10000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"total_flos": 1.63205502468096e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}
|