Femboyuwu2000 commited on
Commit
6418675
1 Parent(s): c76ce61

Training in progress, step 6540, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab44c0650fc35117528c103709aeefeda1ba5a0c1b201a042c443fced883ac06
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d30cf0270ab7eb3471153a28d31eec5ab87afc18c35880c0689d7b33e9ccec01
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b8f8827e725054a7d3aa47b66a00e417f27bf58195e5d53d80134e98a97b930d
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15270cb1bec565741ee825e92d321439b45a02901ba1581ae9722a89f84ea419
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:966b1d0c91c0efe2171d6c761e8fb3dd8dd23b43cb7c345758210f8c5726e8a5
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3334f41707da2f1f3b6ff0afd38e2f3ea20e604b3409a23e240e2d1711a472d3
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b9c2686d4c99776f5194741a1ee501ae1a6b12f42eb0cf041ec1c8a8c483c5a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d3da2849decc3d12a1c67a143710f26acee85daa0e821364144220b5cf57d3c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5216,
5
  "eval_steps": 500,
6
- "global_step": 6520,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2289,6 +2289,13 @@
2289
  "learning_rate": 2.361911574288736e-05,
2290
  "loss": 3.5952,
2291
  "step": 6520
 
 
 
 
 
 
 
2292
  }
2293
  ],
2294
  "logging_steps": 20,
@@ -2296,7 +2303,7 @@
2296
  "num_input_tokens_seen": 0,
2297
  "num_train_epochs": 2,
2298
  "save_steps": 20,
2299
- "total_flos": 1.539204209197056e+16,
2300
  "train_batch_size": 8,
2301
  "trial_name": null,
2302
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5232,
5
  "eval_steps": 500,
6
+ "global_step": 6540,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2289
  "learning_rate": 2.361911574288736e-05,
2290
  "loss": 3.5952,
2291
  "step": 6520
2292
+ },
2293
+ {
2294
+ "epoch": 0.52,
2295
+ "grad_norm": 41.03891372680664,
2296
+ "learning_rate": 2.3579310239743776e-05,
2297
+ "loss": 3.5194,
2298
+ "step": 6540
2299
  }
2300
  ],
2301
  "logging_steps": 20,
 
2303
  "num_input_tokens_seen": 0,
2304
  "num_train_epochs": 2,
2305
  "save_steps": 20,
2306
+ "total_flos": 1.5443350539239424e+16,
2307
  "train_batch_size": 8,
2308
  "trial_name": null,
2309
  "trial_params": null