Femboyuwu2000 commited on
Commit
22b02f0
1 Parent(s): dd627b1

Training in progress, step 6600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c448acb197cac2ff41883b95dd2638d4520107e6fabdd779c26a2104c239dbec
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae6463dbdc2d91e9b1322891346f3833afe4775061c6f0d3b48157d6dc75931b
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6bea9366110eb126918d6bd54a6076b009e1e42b45b792b4257de387cca7a230
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15999c7ecd152d668237b91758fdf2cc3d249213db421bac4f67f330f90f0aed
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:db3f270725a091664f0e5136926832fa25bc9d5c6fd1757199539906c68a4edf
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c054359dfd0c186601f9d93f0acef8011e2b1aa8a1492ec2b57ae90a1bc5794b
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:47fd382c1759e8992bf827fcae1a0f8564edc018afc6fd46493c07694bb97f98
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cc67bbe12a73a6303ede190a9fcdf4c085df79ad7d2b182ce6b8dfd5dcdfecd
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5264,
5
  "eval_steps": 500,
6
- "global_step": 6580,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2310,6 +2310,13 @@
2310
  "learning_rate": 2.3499429673227224e-05,
2311
  "loss": 3.4745,
2312
  "step": 6580
 
 
 
 
 
 
 
2313
  }
2314
  ],
2315
  "logging_steps": 20,
@@ -2317,7 +2324,7 @@
2317
  "num_input_tokens_seen": 0,
2318
  "num_train_epochs": 2,
2319
  "save_steps": 20,
2320
- "total_flos": 1.553491184123904e+16,
2321
  "train_batch_size": 8,
2322
  "trial_name": null,
2323
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.528,
5
  "eval_steps": 500,
6
+ "global_step": 6600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2310
  "learning_rate": 2.3499429673227224e-05,
2311
  "loss": 3.4745,
2312
  "step": 6580
2313
+ },
2314
+ {
2315
+ "epoch": 0.53,
2316
+ "grad_norm": 39.59190368652344,
2317
+ "learning_rate": 2.3459355447763596e-05,
2318
+ "loss": 3.4875,
2319
+ "step": 6600
2320
  }
2321
  ],
2322
  "logging_steps": 20,
 
2324
  "num_input_tokens_seen": 0,
2325
  "num_train_epochs": 2,
2326
  "save_steps": 20,
2327
+ "total_flos": 1.5577756363063296e+16,
2328
  "train_batch_size": 8,
2329
  "trial_name": null,
2330
  "trial_params": null