Femboyuwu2000 commited on
Commit
37ac56f
1 Parent(s): ffa9eeb

Training in progress, step 3520, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d574a9cfe44b6a9f1d49919f1d1075af72d4ebae831ed5168457960a110cf96
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86d755fdbe6f94a9a2692d37f476a7f3f294adf90ab7d7dfd64705c07fa4d33a
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:43bdae8cb558222fb1e4b8ab4a8921afca3a24f6302f6ab4eb57e840888a343d
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c6b821c1771e6fd3ffaa6fc89b2818ba9fe473e836a944a154ed7ef1fb7364a
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fd37529ec6267cade7e3edbd0752da2dc70537a327ba7205cdcfdf07532b1f14
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa8265710fb321d1c26a08015211fa45d8789b6c86382ab93455535b66e21e3b
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c9cf56fcd440484564e22fe18b4a309200b45f2e6cb790d1d19c81f6ae46057
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4c2f733828b47d09685b4b92680b3f65e8207ee9b83d81c0150205a4a65cf60
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.28,
5
  "eval_steps": 500,
6
- "global_step": 3500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1232,6 +1232,13 @@
1232
  "learning_rate": 2.8376108064873216e-05,
1233
  "loss": 3.5228,
1234
  "step": 3500
 
 
 
 
 
 
 
1235
  }
1236
  ],
1237
  "logging_steps": 20,
@@ -1239,7 +1246,7 @@
1239
  "num_input_tokens_seen": 0,
1240
  "num_train_epochs": 2,
1241
  "save_steps": 20,
1242
- "total_flos": 8289562019266560.0,
1243
  "train_batch_size": 8,
1244
  "trial_name": null,
1245
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.2816,
5
  "eval_steps": 500,
6
+ "global_step": 3520,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1232
  "learning_rate": 2.8376108064873216e-05,
1233
  "loss": 3.5228,
1234
  "step": 3500
1235
+ },
1236
+ {
1237
+ "epoch": 0.28,
1238
+ "grad_norm": 32.50440216064453,
1239
+ "learning_rate": 2.835405261236676e-05,
1240
+ "loss": 3.479,
1241
+ "step": 3520
1242
  }
1243
  ],
1244
  "logging_steps": 20,
 
1246
  "num_input_tokens_seen": 0,
1247
  "num_train_epochs": 2,
1248
  "save_steps": 20,
1249
+ "total_flos": 8338245993529344.0,
1250
  "train_batch_size": 8,
1251
  "trial_name": null,
1252
  "trial_params": null