Femboyuwu2000 commited on
Commit
7e3aa77
1 Parent(s): 2abae2e

Training in progress, step 5300, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f5bfad4f42d19e9da696cbdb43e4409d0f807b2b877c6a089cfd0a74bf2771f
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20cdefd57805f803ae76e655f7dbbf814dfbdec3e1c1f7257e45fb2a6c06eab5
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dc987e09473dd7b3d086e9ed9bc1748d7d1b3108088d88eddfdaf6972cbf4189
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2db2c533e49d05568b09bae363fc3e81a696c252f9dfc839b36f0463a25f0e02
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f1dbdd57232626eddcfa10c081f5c18623de1c4663a61f927134c8ad55d47712
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49f030e6373edf769dccea9c3f55568d6feebbe751b5ccaf17c5dd48945e66be
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b342905fe7ee06e2011340421596fa9b2d4facdf3e6fe1f5ce5617922a76da7c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f889ec4d2e9c550aeb9719ae7a9587413e8a6ae17e4db0aeba0b5fdab4e7fc8f
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.4224,
5
  "eval_steps": 500,
6
- "global_step": 5280,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1855,6 +1855,13 @@
1855
  "learning_rate": 2.589453947885745e-05,
1856
  "loss": 3.5018,
1857
  "step": 5280
 
 
 
 
 
 
 
1858
  }
1859
  ],
1860
  "logging_steps": 20,
@@ -1862,7 +1869,7 @@
1862
  "num_input_tokens_seen": 0,
1863
  "num_train_epochs": 2,
1864
  "save_steps": 20,
1865
- "total_flos": 1.2497937290428416e+16,
1866
  "train_batch_size": 8,
1867
  "trial_name": null,
1868
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.424,
5
  "eval_steps": 500,
6
+ "global_step": 5300,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1855
  "learning_rate": 2.589453947885745e-05,
1856
  "loss": 3.5018,
1857
  "step": 5280
1858
+ },
1859
+ {
1860
+ "epoch": 0.42,
1861
+ "grad_norm": 34.955291748046875,
1862
+ "learning_rate": 2.5861088803056324e-05,
1863
+ "loss": 3.4988,
1864
+ "step": 5300
1865
  }
1866
  ],
1867
  "logging_steps": 20,
 
1869
  "num_input_tokens_seen": 0,
1870
  "num_train_epochs": 2,
1871
  "save_steps": 20,
1872
+ "total_flos": 1.2540420947214336e+16,
1873
  "train_batch_size": 8,
1874
  "trial_name": null,
1875
  "trial_params": null