Femboyuwu2000 commited on
Commit
36bffab
1 Parent(s): d665756

Training in progress, step 540, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5a5bbafb92274fe73c656e68ae815c74026d14d6c752004391586ec62c0617ca
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7042f9ed08ad702b4807e2082f62b7c77edad4356283c5cfef4c2ac8b3b06034
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2a7f4e516bbee85e2efd4ebaf8cbcad8f26f7ac5fd68acc0e1b8d6076617c481
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea2f7979011099fe7dff43f9cdaa7c8c71ecebd10fcfa3e69aea88e5fe4630df
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f841c8c705374da539163e3f8b292003c9f867c36cc43bc718b2a7996192a2c0
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cad5804f88e56590ede594cde634ed7edf004b94b62834acab77d105f3c4294b
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ddb11d96a8b6173e10af5190423ae3f7ff04e92efd4a5a946800f7520b5b8483
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8954cfae79acdcb584a4c2168efc0e60ef154434502bd57627e490c360dd8a90
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0416,
5
  "eval_steps": 500,
6
- "global_step": 520,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -189,6 +189,13 @@
189
  "learning_rate": 2.6000000000000002e-05,
190
  "loss": 3.8144,
191
  "step": 520
 
 
 
 
 
 
 
192
  }
193
  ],
194
  "logging_steps": 20,
@@ -196,7 +203,7 @@
196
  "num_input_tokens_seen": 0,
197
  "num_train_epochs": 2,
198
  "save_steps": 20,
199
- "total_flos": 1248724256292864.0,
200
  "train_batch_size": 8,
201
  "trial_name": null,
202
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0432,
5
  "eval_steps": 500,
6
+ "global_step": 540,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
189
  "learning_rate": 2.6000000000000002e-05,
190
  "loss": 3.8144,
191
  "step": 520
192
+ },
193
+ {
194
+ "epoch": 0.04,
195
+ "grad_norm": 24.934555053710938,
196
+ "learning_rate": 2.7000000000000002e-05,
197
+ "loss": 3.9166,
198
+ "step": 540
199
  }
200
  ],
201
  "logging_steps": 20,
 
203
  "num_input_tokens_seen": 0,
204
  "num_train_epochs": 2,
205
  "save_steps": 20,
206
+ "total_flos": 1299081332097024.0,
207
  "train_batch_size": 8,
208
  "trial_name": null,
209
  "trial_params": null