Femboyuwu2000 commited on
Commit
793ffbf
1 Parent(s): b164306

Training in progress, step 6580, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:af05e0a6c9ba308c4aa54b622a45d3520ca159d9d263a4a9c8055c1826a20938
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c448acb197cac2ff41883b95dd2638d4520107e6fabdd779c26a2104c239dbec
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:90d032f3a40082b9d9daff6363158374b72c5065b002445b695a99ca47398242
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bea9366110eb126918d6bd54a6076b009e1e42b45b792b4257de387cca7a230
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:723ef5e592813da00df6025c04119524c1f1ef85c5f1fe9c93dc4858a8d9cc57
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db3f270725a091664f0e5136926832fa25bc9d5c6fd1757199539906c68a4edf
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8119438268b977f27b827c8867c2f6dee7675b5e1267fc478a5f6ac6f0b45e53
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47fd382c1759e8992bf827fcae1a0f8564edc018afc6fd46493c07694bb97f98
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5248,
5
  "eval_steps": 500,
6
- "global_step": 6560,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2303,6 +2303,13 @@
2303
  "learning_rate": 2.353941474369544e-05,
2304
  "loss": 3.4933,
2305
  "step": 6560
 
 
 
 
 
 
 
2306
  }
2307
  ],
2308
  "logging_steps": 20,
@@ -2310,7 +2317,7 @@
2310
  "num_input_tokens_seen": 0,
2311
  "num_train_epochs": 2,
2312
  "save_steps": 20,
2313
- "total_flos": 1.5485571748724736e+16,
2314
  "train_batch_size": 8,
2315
  "trial_name": null,
2316
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5264,
5
  "eval_steps": 500,
6
+ "global_step": 6580,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2303
  "learning_rate": 2.353941474369544e-05,
2304
  "loss": 3.4933,
2305
  "step": 6560
2306
+ },
2307
+ {
2308
+ "epoch": 0.53,
2309
+ "grad_norm": 33.990203857421875,
2310
+ "learning_rate": 2.3499429673227224e-05,
2311
+ "loss": 3.4745,
2312
+ "step": 6580
2313
  }
2314
  ],
2315
  "logging_steps": 20,
 
2317
  "num_input_tokens_seen": 0,
2318
  "num_train_epochs": 2,
2319
  "save_steps": 20,
2320
+ "total_flos": 1.553491184123904e+16,
2321
  "train_batch_size": 8,
2322
  "trial_name": null,
2323
  "trial_params": null