Femboyuwu2000 commited on
Commit
0b363d0
1 Parent(s): 6b5d826

Training in progress, step 6340, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:58c64b2afa3075c63c8b9f594cccb3ee92261843651be4ce842e6f492eeda183
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:665884f5884803b9a5bbea0709b82dd7fbb647b93fad77b359888693e9cff352
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9d07424ebe3809b342981b7f4601654f56d1e92c48e452c0691c294c5cd8fd1e
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7ce0ad1ff0c9d0b20bcddceaaa44934a70735779e9f943f3d8229f9c73ef187
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8ec163df978966f1649bfa7de360f9e162803dced01b76f578e0c72ae9cc60d8
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d629e4cc722d8e87bb022ff0d9602b06aa0cd7b201a1717d04fe682ff20396b5
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:27c21c5aa08a74ea5fd586db5a5e4a5db803cbdef83255f0ea8bbf1d608282c0
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fcd4e201825a35fb3ea0cae13bedc3b38b96d5f8699bf82f679e567b3a62a37
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5056,
5
  "eval_steps": 500,
6
- "global_step": 6320,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2219,6 +2219,13 @@
2219
  "learning_rate": 2.4012129778456556e-05,
2220
  "loss": 3.5062,
2221
  "step": 6320
 
 
 
 
 
 
 
2222
  }
2223
  ],
2224
  "logging_steps": 20,
@@ -2226,7 +2233,7 @@
2226
  "num_input_tokens_seen": 0,
2227
  "num_train_epochs": 2,
2228
  "save_steps": 20,
2229
- "total_flos": 1.4934662058835968e+16,
2230
  "train_batch_size": 8,
2231
  "trial_name": null,
2232
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5072,
5
  "eval_steps": 500,
6
+ "global_step": 6340,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2219
  "learning_rate": 2.4012129778456556e-05,
2220
  "loss": 3.5062,
2221
  "step": 6320
2222
+ },
2223
+ {
2224
+ "epoch": 0.51,
2225
+ "grad_norm": 36.066741943359375,
2226
+ "learning_rate": 2.397324701119233e-05,
2227
+ "loss": 3.6017,
2228
+ "step": 6340
2229
  }
2230
  ],
2231
  "logging_steps": 20,
 
2233
  "num_input_tokens_seen": 0,
2234
  "num_train_epochs": 2,
2235
  "save_steps": 20,
2236
+ "total_flos": 1.4971240651358208e+16,
2237
  "train_batch_size": 8,
2238
  "trial_name": null,
2239
  "trial_params": null