Femboyuwu2000 commited on
Commit
df4652c
1 Parent(s): 13e0163

Training in progress, step 5320, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:20cdefd57805f803ae76e655f7dbbf814dfbdec3e1c1f7257e45fb2a6c06eab5
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3a737d68a4827402068459de6efdbc7478a5a7fd2fe7bd0de466d15928fc4c2
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2db2c533e49d05568b09bae363fc3e81a696c252f9dfc839b36f0463a25f0e02
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:400d300ab5c6f0031e3d0ba8d37ecabfc57b9e43f048b577c44acb4c00908460
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:49f030e6373edf769dccea9c3f55568d6feebbe751b5ccaf17c5dd48945e66be
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ae9f372ef03becebb97898a812ec56da297ccd7cd57a3d1c178ee8c3636ac73
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f889ec4d2e9c550aeb9719ae7a9587413e8a6ae17e4db0aeba0b5fdab4e7fc8f
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:215c966d5ce6af96785202b075ac3637e563938102fe487ca5657a08dce17dcb
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.424,
5
  "eval_steps": 500,
6
- "global_step": 5300,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1862,6 +1862,13 @@
1862
  "learning_rate": 2.5861088803056324e-05,
1863
  "loss": 3.4988,
1864
  "step": 5300
 
 
 
 
 
 
 
1865
  }
1866
  ],
1867
  "logging_steps": 20,
@@ -1869,7 +1876,7 @@
1869
  "num_input_tokens_seen": 0,
1870
  "num_train_epochs": 2,
1871
  "save_steps": 20,
1872
- "total_flos": 1.2540420947214336e+16,
1873
  "train_batch_size": 8,
1874
  "trial_name": null,
1875
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.4256,
5
  "eval_steps": 500,
6
+ "global_step": 5320,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1862
  "learning_rate": 2.5861088803056324e-05,
1863
  "loss": 3.4988,
1864
  "step": 5300
1865
+ },
1866
+ {
1867
+ "epoch": 0.43,
1868
+ "grad_norm": 29.175064086914062,
1869
+ "learning_rate": 2.5827524199573033e-05,
1870
+ "loss": 3.475,
1871
+ "step": 5320
1872
  }
1873
  ],
1874
  "logging_steps": 20,
 
1876
  "num_input_tokens_seen": 0,
1877
  "num_train_epochs": 2,
1878
  "save_steps": 20,
1879
+ "total_flos": 1.2585955553869824e+16,
1880
  "train_batch_size": 8,
1881
  "trial_name": null,
1882
  "trial_params": null