Femboyuwu2000 commited on
Commit
c87114f
1 Parent(s): f6faa0d

Training in progress, step 660, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7b35ddd4427844661dfab289de8478e50dd50f1beeb81b60b938f6decab96be8
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be0bca22803c9150df03e6466773cd219b4542d072342c711ec03c97247eb64c
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:213ebcbe3bb1067adfedc8d41c2d0ccdc1e057c3d9dd18c30ed17bdbd4df6c1f
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:835a7525c06da7ed873e20c691c9e2e2c793823b899c56c236618edeabdee1e4
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:001288f4eaf188f446d2ca5cd5f672ec468ff9152761daa8c936edbc48ef4642
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:084dccba20efd6b3e5dec3fea90abdf37dfbc8e08f2e8403d8ff8352b73dbaf5
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:71b63c6d0a28783ef8450676a810d5195abcf4c2886ef29e04e2c6964bcd124c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d360c1de8e8e92eec017cc71d37bb9f185d942b9274c99e38578ff0397698ae
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0512,
5
  "eval_steps": 500,
6
- "global_step": 640,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -231,6 +231,13 @@
231
  "learning_rate": 2.999968531502098e-05,
232
  "loss": 3.7374,
233
  "step": 640
 
 
 
 
 
 
 
234
  }
235
  ],
236
  "logging_steps": 20,
@@ -238,7 +245,7 @@
238
  "num_input_tokens_seen": 0,
239
  "num_train_epochs": 2,
240
  "save_steps": 20,
241
- "total_flos": 1526262276685824.0,
242
  "train_batch_size": 8,
243
  "trial_name": null,
244
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0528,
5
  "eval_steps": 500,
6
+ "global_step": 660,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
231
  "learning_rate": 2.999968531502098e-05,
232
  "loss": 3.7374,
233
  "step": 640
234
+ },
235
+ {
236
+ "epoch": 0.05,
237
+ "grad_norm": 72.25853729248047,
238
+ "learning_rate": 2.99992919618918e-05,
239
+ "loss": 3.7735,
240
+ "step": 660
241
  }
242
  ],
243
  "logging_steps": 20,
 
245
  "num_input_tokens_seen": 0,
246
  "num_train_epochs": 2,
247
  "save_steps": 20,
248
+ "total_flos": 1566580743241728.0,
249
  "train_batch_size": 8,
250
  "trial_name": null,
251
  "trial_params": null