Femboyuwu2000 commited on
Commit
ee0d6ef
1 Parent(s): a944fd7

Training in progress, step 3680, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fbcfaec3e348c7d2277c6b26e72513d8f233c50f6153d77f79fb16607816e019
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aef5ea8b79738fa0b1afc11d4b2107af60cdd8cb8f1d9e425e3dbf9f968e4c88
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8e9a7c0408bb21c6add531c8ecf332ee65e8da095897c11526a8a71159a612bd
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cde1575d56a414a2045e8dbdaca0d2f623d7e43dfc4fc696b88d5f3e863f7170
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fbdcee77be9827de8d12c3593b0e445ae8e58c98fda9d45171de520fb50310c0
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5ad82d08ebc1302cfaf16005dda10850453ff7fda97e1c6c54c3d6aaca32ef8
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:75387a128862b4898057b986470075974f53b66436a53a12635d2da9900d5d04
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a07985d480aea598ad66efeed57f22e38607f1040692f61ddafbb1ee20e27674
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.2928,
5
  "eval_steps": 500,
6
- "global_step": 3660,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1288,6 +1288,13 @@
1288
  "learning_rate": 2.8195755409920584e-05,
1289
  "loss": 3.4387,
1290
  "step": 3660
 
 
 
 
 
 
 
1291
  }
1292
  ],
1293
  "logging_steps": 20,
@@ -1295,7 +1302,7 @@
1295
  "num_input_tokens_seen": 0,
1296
  "num_train_epochs": 2,
1297
  "save_steps": 20,
1298
- "total_flos": 8653740454772736.0,
1299
  "train_batch_size": 8,
1300
  "trial_name": null,
1301
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.2944,
5
  "eval_steps": 500,
6
+ "global_step": 3680,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1288
  "learning_rate": 2.8195755409920584e-05,
1289
  "loss": 3.4387,
1290
  "step": 3660
1291
+ },
1292
+ {
1293
+ "epoch": 0.29,
1294
+ "grad_norm": 43.26001739501953,
1295
+ "learning_rate": 2.8172585936822056e-05,
1296
+ "loss": 3.5127,
1297
+ "step": 3680
1298
  }
1299
  ],
1300
  "logging_steps": 20,
 
1302
  "num_input_tokens_seen": 0,
1303
  "num_train_epochs": 2,
1304
  "save_steps": 20,
1305
+ "total_flos": 8695239934181376.0,
1306
  "train_batch_size": 8,
1307
  "trial_name": null,
1308
  "trial_params": null