shibajustfor commited on
Commit
8f81749
·
verified ·
1 Parent(s): ba59a5e

Training in progress, step 200, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2f837d82ed2b2defd2560d8c98441c3a9410bc768a6e064973ead006620ff99d
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cba490cb9b38476f187580c19ba3d3dcd1c994e7222fd5ca4e940fa049f2fe96
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9972c762ec828e05403ecdd6d242414b2d78f140667bef16cd872278569c81e6
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85c89331e1b020217513a572b7945e492a44458106c6a1a748d49eb87932d036
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b489c13e2d776471e9a48162938854a24466ce6713df9c0fb59abd9a09fb226b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca9a25c72339c898b564e0c464a3f6fc75bbeec408008928b7ed05533156b98c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5660377358490566,
5
  "eval_steps": 50,
6
- "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -144,6 +144,49 @@
144
  "eval_samples_per_second": 18.019,
145
  "eval_steps_per_second": 9.009,
146
  "step": 150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  }
148
  ],
149
  "logging_steps": 10,
@@ -158,12 +201,12 @@
158
  "should_evaluate": false,
159
  "should_log": false,
160
  "should_save": true,
161
- "should_training_stop": false
162
  },
163
  "attributes": {}
164
  }
165
  },
166
- "total_flos": 5.24925203864617e+16,
167
  "train_batch_size": 2,
168
  "trial_name": null,
169
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.7547169811320755,
5
  "eval_steps": 50,
6
+ "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
144
  "eval_samples_per_second": 18.019,
145
  "eval_steps_per_second": 9.009,
146
  "step": 150
147
+ },
148
+ {
149
+ "epoch": 0.6037735849056604,
150
+ "grad_norm": NaN,
151
+ "learning_rate": 2.0055723659649904e-05,
152
+ "loss": 0.0,
153
+ "step": 160
154
+ },
155
+ {
156
+ "epoch": 0.6415094339622641,
157
+ "grad_norm": NaN,
158
+ "learning_rate": 1.1454397434679021e-05,
159
+ "loss": 0.1717,
160
+ "step": 170
161
+ },
162
+ {
163
+ "epoch": 0.6792452830188679,
164
+ "grad_norm": NaN,
165
+ "learning_rate": 5.146355805285452e-06,
166
+ "loss": 0.0,
167
+ "step": 180
168
+ },
169
+ {
170
+ "epoch": 0.7169811320754716,
171
+ "grad_norm": NaN,
172
+ "learning_rate": 1.2949737362087156e-06,
173
+ "loss": 0.0,
174
+ "step": 190
175
+ },
176
+ {
177
+ "epoch": 0.7547169811320755,
178
+ "grad_norm": NaN,
179
+ "learning_rate": 0.0,
180
+ "loss": 0.0941,
181
+ "step": 200
182
+ },
183
+ {
184
+ "epoch": 0.7547169811320755,
185
+ "eval_loss": NaN,
186
+ "eval_runtime": 6.2151,
187
+ "eval_samples_per_second": 18.021,
188
+ "eval_steps_per_second": 9.01,
189
+ "step": 200
190
  }
191
  ],
192
  "logging_steps": 10,
 
201
  "should_evaluate": false,
202
  "should_log": false,
203
  "should_save": true,
204
+ "should_training_stop": true
205
  },
206
  "attributes": {}
207
  }
208
  },
209
+ "total_flos": 6.993160478864179e+16,
210
  "train_batch_size": 2,
211
  "trial_name": null,
212
  "trial_params": null