Femboyuwu2000 commited on
Commit
16ff74a
1 Parent(s): f860b7a

Training in progress, step 6700, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5c13f2cc73f2b50eb3054c4f9f34e24e00ccbb2ee12f40760019fdb6ba4a7264
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:baab6ce921f1613f657e84693cedae976231955db151735d337f863663c7e95d
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7dc0a407b3cd6fb8c2009a5a2e28b1a647ee883170696f020ccd1bf2d47f70d5
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b5cfe34653a49f261818a83aeb56f415a305909d4642a477a11be9eec2dd304
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6dc43f4f08c4cb18483c623c72390b89d9e5c0e070695c8546fdb6db415c2659
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:496c0941a8254c7f1b866b7962463a46dae8fc62e73271e65fb12a07ffd6629e
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:64e6df07c2fbddd7c7262a897eedc5919a19077162b1c7b5831b4ba1cffd7cac
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fab7e6b1d1ff4eafb72e67dcebe45ceb949977389095195924aab3d52c70cb9d
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5344,
5
  "eval_steps": 500,
6
- "global_step": 6680,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2345,6 +2345,13 @@
2345
  "learning_rate": 2.329817541708346e-05,
2346
  "loss": 3.4163,
2347
  "step": 6680
 
 
 
 
 
 
 
2348
  }
2349
  ],
2350
  "logging_steps": 20,
@@ -2352,7 +2359,7 @@
2352
  "num_input_tokens_seen": 0,
2353
  "num_train_epochs": 2,
2354
  "save_steps": 20,
2355
- "total_flos": 1.5763273798680576e+16,
2356
  "train_batch_size": 8,
2357
  "trial_name": null,
2358
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.536,
5
  "eval_steps": 500,
6
+ "global_step": 6700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2345
  "learning_rate": 2.329817541708346e-05,
2346
  "loss": 3.4163,
2347
  "step": 6680
2348
+ },
2349
+ {
2350
+ "epoch": 0.54,
2351
+ "grad_norm": 21.337862014770508,
2352
+ "learning_rate": 2.3257661740587055e-05,
2353
+ "loss": 3.3443,
2354
+ "step": 6700
2355
  }
2356
  ],
2357
  "logging_steps": 20,
 
2359
  "num_input_tokens_seen": 0,
2360
  "num_train_epochs": 2,
2361
  "save_steps": 20,
2362
+ "total_flos": 1.5820716951601152e+16,
2363
  "train_batch_size": 8,
2364
  "trial_name": null,
2365
  "trial_params": null