Femboyuwu2000 commited on
Commit
6be7edc
1 Parent(s): 21164ed

Training in progress, step 9320, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:85beb4e63f2c5b59a9fb44c1941465e25467b6f51e0595870a1ed098da9eb3a9
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e12c37b04f848c57b1fbaa1586497570ce04b4c474a41d82ca6927d46cfafc8
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2bfa466e47344fdb4b82ac2e27349b5aa4a9780daa2a9dd909396e056672be06
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be6249a73b51acc1e605e0c726f6b0c6b5b71b95e8e1b3bd37e1fd6605c078f1
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e3cda8d8dfc43c49473f34af5d9dbd59701d7b0b55fa4d024a4a95d2b0775e60
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4030094ed5282ad80d870d012a583b682ade163de00f842f2ba8ebc8caafb7bc
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e194f1dd757c906a836decafb805f1dccfe64bb4c1bfe3207704010eaaa32fd
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab2f7e65908b7a6affd31bf3469c124def8863b28efc3fb8855df9f9b8567a7c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.744,
5
  "eval_steps": 500,
6
- "global_step": 9300,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3262,6 +3262,13 @@
3262
  "learning_rate": 1.7418463821296398e-05,
3263
  "loss": 3.4075,
3264
  "step": 9300
 
 
 
 
 
 
 
3265
  }
3266
  ],
3267
  "logging_steps": 20,
@@ -3269,7 +3276,7 @@
3269
  "num_input_tokens_seen": 0,
3270
  "num_train_epochs": 2,
3271
  "save_steps": 20,
3272
- "total_flos": 2.199570826395648e+16,
3273
  "train_batch_size": 8,
3274
  "trial_name": null,
3275
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.7456,
5
  "eval_steps": 500,
6
+ "global_step": 9320,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3262
  "learning_rate": 1.7418463821296398e-05,
3263
  "loss": 3.4075,
3264
  "step": 9300
3265
+ },
3266
+ {
3267
+ "epoch": 0.75,
3268
+ "grad_norm": 52.64670944213867,
3269
+ "learning_rate": 1.7370505494839012e-05,
3270
+ "loss": 3.4542,
3271
+ "step": 9320
3272
  }
3273
  ],
3274
  "logging_steps": 20,
 
3276
  "num_input_tokens_seen": 0,
3277
  "num_train_epochs": 2,
3278
  "save_steps": 20,
3279
+ "total_flos": 2.203989782819635e+16,
3280
  "train_batch_size": 8,
3281
  "trial_name": null,
3282
  "trial_params": null