Femboyuwu2000 commited on
Commit
cd1e6f2
1 Parent(s): 0df8803

Training in progress, step 6300, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4952d07d35404125ac82a69ff1d2b691b555ce36f8f5e2491332621aa761003d
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44d9867527ada8f06b3d5b4a3b1a084c5587bf5d24b8f6abb3243ac9e5ecc1bb
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8dfa581ec2b543e12a4d9e7daa6a74d185441160dd1412bf1875fce58548b340
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:664614e14cc5551b8f4f79a692c8c7ab8b97cdcef757ae57c283fec6b60fca8b
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c20249241b6c1615d63c46164c68a0aefa907c40365a5dc082bbe417fdd12bf6
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe631caac525a44feb552772252434ef5806653f82e9cba96375efa0ad983cd3
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2ac175a77f9ab524825431337f25ea7bdd98b98a7ae16ff30a1d8c19f59c4b3f
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b8ab3807f200a849a139d1332c1073c145c9a442b57cd4342cee505d555158f
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5024,
5
  "eval_steps": 500,
6
- "global_step": 6280,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2205,6 +2205,13 @@
2205
  "learning_rate": 2.408961130718405e-05,
2206
  "loss": 3.4637,
2207
  "step": 6280
 
 
 
 
 
 
 
2208
  }
2209
  ],
2210
  "logging_steps": 20,
@@ -2212,7 +2219,7 @@
2212
  "num_input_tokens_seen": 0,
2213
  "num_train_epochs": 2,
2214
  "save_steps": 20,
2215
- "total_flos": 1.4845134723416064e+16,
2216
  "train_batch_size": 8,
2217
  "trial_name": null,
2218
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.504,
5
  "eval_steps": 500,
6
+ "global_step": 6300,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2205
  "learning_rate": 2.408961130718405e-05,
2206
  "loss": 3.4637,
2207
  "step": 6280
2208
+ },
2209
+ {
2210
+ "epoch": 0.5,
2211
+ "grad_norm": 35.380767822265625,
2212
+ "learning_rate": 2.405091801274387e-05,
2213
+ "loss": 3.4403,
2214
+ "step": 6300
2215
  }
2216
  ],
2217
  "logging_steps": 20,
 
2219
  "num_input_tokens_seen": 0,
2220
  "num_train_epochs": 2,
2221
  "save_steps": 20,
2222
+ "total_flos": 1.4891030195109888e+16,
2223
  "train_batch_size": 8,
2224
  "trial_name": null,
2225
  "trial_params": null