Femboyuwu2000 commited on
Commit
6d0ddf1
1 Parent(s): 17214a2

Training in progress, step 900, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:122f732830a82f1a7a3a7425537e862e7ee1d697cd8187d0044646345b434143
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bacbcdeb772271ae4a73b553452403dd574bd8d136434ea8e8a57d0663616033
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aa28364534d96be46cefdbe1b76cf7a40940e1cf04c4f5a6cf3ccbb915f922bc
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bb997ee1b9cc31b655c80659a8be14291f7c87e0520c3fe55461fe9ea80dc5a
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dea0e7f5f0c188c2ee6e729843388c15e47a905456029356105d7ca29f2d0fef
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40fb52d754072900f939e68c95258ed986e02ba250a7a6a809e0b910ea21aa4b
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:52aa474262a07e7da5493acb71323699d3a95e420cf1220730538dffbf9e1ae4
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26e4e7bb751929813cc507d4c008baeb62df515276167a2e28f789b23a39f84f
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0704,
5
  "eval_steps": 500,
6
- "global_step": 880,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -315,6 +315,13 @@
315
  "learning_rate": 2.9984583023761318e-05,
316
  "loss": 3.7271,
317
  "step": 880
 
 
 
 
 
 
 
318
  }
319
  ],
320
  "logging_steps": 20,
@@ -322,7 +329,7 @@
322
  "num_input_tokens_seen": 0,
323
  "num_train_epochs": 2,
324
  "save_steps": 20,
325
- "total_flos": 2079501186367488.0,
326
  "train_batch_size": 8,
327
  "trial_name": null,
328
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.072,
5
  "eval_steps": 500,
6
+ "global_step": 900,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
315
  "learning_rate": 2.9984583023761318e-05,
316
  "loss": 3.7271,
317
  "step": 880
318
+ },
319
+ {
320
+ "epoch": 0.07,
321
+ "grad_norm": 26.61457061767578,
322
+ "learning_rate": 2.998230238919386e-05,
323
+ "loss": 3.7376,
324
+ "step": 900
325
  }
326
  ],
327
  "logging_steps": 20,
 
329
  "num_input_tokens_seen": 0,
330
  "num_train_epochs": 2,
331
  "save_steps": 20,
332
+ "total_flos": 2126840118214656.0,
333
  "train_batch_size": 8,
334
  "trial_name": null,
335
  "trial_params": null