Femboyuwu2000 commited on
Commit
4174742
1 Parent(s): fe56fd8

Training in progress, step 640, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c92ea041d259fd0f1f97d3d99cc8dcb4a35f0729c4f9cfe1e03af37c28316782
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b35ddd4427844661dfab289de8478e50dd50f1beeb81b60b938f6decab96be8
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:78a98df909e786b9af2ba1de67522448ec74261759633c6997092bdf4beaab7f
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:213ebcbe3bb1067adfedc8d41c2d0ccdc1e057c3d9dd18c30ed17bdbd4df6c1f
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c65cd55e2fff96c97e67ca068ed8c714e7c66984dea1333f2e03593c396075d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:001288f4eaf188f446d2ca5cd5f672ec468ff9152761daa8c936edbc48ef4642
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e1ce72457ecdb61a4ff660064d82592f9b16269351060cb691068fea59fdedc2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71b63c6d0a28783ef8450676a810d5195abcf4c2886ef29e04e2c6964bcd124c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0496,
5
  "eval_steps": 500,
6
- "global_step": 620,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -224,6 +224,13 @@
224
  "learning_rate": 2.999992132854894e-05,
225
  "loss": 3.8692,
226
  "step": 620
 
 
 
 
 
 
 
227
  }
228
  ],
229
  "logging_steps": 20,
@@ -231,7 +238,7 @@
231
  "num_input_tokens_seen": 0,
232
  "num_train_epochs": 2,
233
  "save_steps": 20,
234
- "total_flos": 1478300032499712.0,
235
  "train_batch_size": 8,
236
  "trial_name": null,
237
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0512,
5
  "eval_steps": 500,
6
+ "global_step": 640,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
224
  "learning_rate": 2.999992132854894e-05,
225
  "loss": 3.8692,
226
  "step": 620
227
+ },
228
+ {
229
+ "epoch": 0.05,
230
+ "grad_norm": 45.1494026184082,
231
+ "learning_rate": 2.999968531502098e-05,
232
+ "loss": 3.7374,
233
+ "step": 640
234
  }
235
  ],
236
  "logging_steps": 20,
 
238
  "num_input_tokens_seen": 0,
239
  "num_train_epochs": 2,
240
  "save_steps": 20,
241
+ "total_flos": 1526262276685824.0,
242
  "train_batch_size": 8,
243
  "trial_name": null,
244
  "trial_params": null