Femboyuwu2000 commited on
Commit
8f770c2
1 Parent(s): fce0d99

Training in progress, step 9380, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ce3133fc8023dc9fec5e655ff4095b17d357c0b7879a0cb32cee9112f5995b98
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bec6bca3c48b3e9f7ec1a052500985b06b331ed7b76a2c7795a8ebc958fc6fb
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a8cc9cb8b2654a9d8d631a10c10e390e4fcaec9c1afb800e42a2d87bc9203baf
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ca5553eb1deb65392427bfb5f3e004d9236ac9eeebf32208464d046a530ffce
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:326613a61f7ba42eb9028eedd563cedc07a0da8a535fbe448e54c2eb7d1c1b65
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f652f4e3db4d7a26e9dba2e7386e5cf6c51fb000752a737298ff24d1508a8460
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:719ba58f8d34bc5a5298f55e15bc61b57310339d8425df02e38a130cb9f3b6fa
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca42f36f25082c2beb3fc00186e8ed8c12110e85da52d110e31ee1b1dc6e0295
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.7488,
5
  "eval_steps": 500,
6
- "global_step": 9360,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3283,6 +3283,13 @@
3283
  "learning_rate": 1.7274514748802418e-05,
3284
  "loss": 3.4322,
3285
  "step": 9360
 
 
 
 
 
 
 
3286
  }
3287
  ],
3288
  "logging_steps": 20,
@@ -3290,7 +3297,7 @@
3290
  "num_input_tokens_seen": 0,
3291
  "num_train_epochs": 2,
3292
  "save_steps": 20,
3293
- "total_flos": 2.212768645024973e+16,
3294
  "train_batch_size": 8,
3295
  "trial_name": null,
3296
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.7504,
5
  "eval_steps": 500,
6
+ "global_step": 9380,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3283
  "learning_rate": 1.7274514748802418e-05,
3284
  "loss": 3.4322,
3285
  "step": 9360
3286
+ },
3287
+ {
3288
+ "epoch": 0.75,
3289
+ "grad_norm": 29.3850040435791,
3290
+ "learning_rate": 1.7226483336120707e-05,
3291
+ "loss": 3.443,
3292
+ "step": 9380
3293
  }
3294
  ],
3295
  "logging_steps": 20,
 
3297
  "num_input_tokens_seen": 0,
3298
  "num_train_epochs": 2,
3299
  "save_steps": 20,
3300
+ "total_flos": 2.217039974842368e+16,
3301
  "train_batch_size": 8,
3302
  "trial_name": null,
3303
  "trial_params": null