Femboyuwu2000 commited on
Commit
0b2494c
1 Parent(s): 630cb28

Training in progress, step 3880, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bcf9e22c88e0fb0603b9cd82abbc6b98b49f7ad3ed9dc4c142244b973e2719d2
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cde6f153e1e8b9a9e7c00323ae4d7db7386328f2d24b06facfc13a8ee245b3fe
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4e0a82f4d4487e9ab1022d1b2edd903560854e8fb2a0c9abf71fe128a2a2b94
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:124b7ab2647d2a34f2d76c3fe8f556de61ab2239e56a049ecc9fdd7887cdf35e
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:877a724df59c57e1a2f0f22ee80507c6df287299fe61cc0042cb8694e46f52f4
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cbc3f7eb6570c006a685ab90bed6095506dc3296d40e802ec891bd2b2edc3fe
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:861a7b99be1280c1ac6dbca0c55884807e0dfa7f21363b76c38ae1c977205711
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:073260aeda648df98a231950123c1b6238009c89906d5834aa58374e8953701f
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.3088,
5
  "eval_steps": 500,
6
- "global_step": 3860,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1358,6 +1358,13 @@
1358
  "learning_rate": 2.7957872481757377e-05,
1359
  "loss": 3.5455,
1360
  "step": 3860
 
 
 
 
 
 
 
1361
  }
1362
  ],
1363
  "logging_steps": 20,
@@ -1365,7 +1372,7 @@
1365
  "num_input_tokens_seen": 0,
1366
  "num_train_epochs": 2,
1367
  "save_steps": 20,
1368
- "total_flos": 9104592111304704.0,
1369
  "train_batch_size": 8,
1370
  "trial_name": null,
1371
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.3104,
5
  "eval_steps": 500,
6
+ "global_step": 3880,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1358
  "learning_rate": 2.7957872481757377e-05,
1359
  "loss": 3.5455,
1360
  "step": 3860
1361
+ },
1362
+ {
1363
+ "epoch": 0.31,
1364
+ "grad_norm": 23.779598236083984,
1365
+ "learning_rate": 2.793333244167681e-05,
1366
+ "loss": 3.5345,
1367
+ "step": 3880
1368
  }
1369
  ],
1370
  "logging_steps": 20,
 
1372
  "num_input_tokens_seen": 0,
1373
  "num_train_epochs": 2,
1374
  "save_steps": 20,
1375
+ "total_flos": 9159181149831168.0,
1376
  "train_batch_size": 8,
1377
  "trial_name": null,
1378
  "trial_params": null