Femboyuwu2000 commited on
Commit
88db6dd
1 Parent(s): 9110a6c

Training in progress, step 840, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c685aafc65181370b17db36243c0d171817f938725ed77fa0a1e03b53e030014
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:110e40ef4e3b35e43c4b8e29cc4d8dacba6d280f69bc8a29bf2ea57136d99510
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:036773597bfd897fa517e6ad196b274fca905a2d4b3b02935e43e14210ec8f79
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f714a7f3ccb21a9df52dd325a7b732a1a6695dc5d620393c7f1ac8fb9f416e45
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5ad9df02f521bb1e4003457689d3b3dba0522b12dae9613bb1e420ae84cb87c5
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f65c5ab9da6e46829bd259bc0530be4e36f97ecb7b4f497275a1a99eb3a6c3bc
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e7d3370081bd8faebed56ed9d5e5383e284bccc346e41b56e56c69c853ca9ca1
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8606193994762bc6fbc4224c0c881800eb5d3299a457924ce427b06d77b4771d
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0656,
5
  "eval_steps": 500,
6
- "global_step": 820,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -294,6 +294,13 @@
294
  "learning_rate": 2.9990481752904566e-05,
295
  "loss": 3.6855,
296
  "step": 820
 
 
 
 
 
 
 
297
  }
298
  ],
299
  "logging_steps": 20,
@@ -301,7 +308,7 @@
301
  "num_input_tokens_seen": 0,
302
  "num_train_epochs": 2,
303
  "save_steps": 20,
304
- "total_flos": 1942798948663296.0,
305
  "train_batch_size": 8,
306
  "trial_name": null,
307
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0672,
5
  "eval_steps": 500,
6
+ "global_step": 840,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
294
  "learning_rate": 2.9990481752904566e-05,
295
  "loss": 3.6855,
296
  "step": 820
297
+ },
298
+ {
299
+ "epoch": 0.07,
300
+ "grad_norm": 46.554874420166016,
301
+ "learning_rate": 2.9988672727066197e-05,
302
+ "loss": 3.7201,
303
+ "step": 840
304
  }
305
  ],
306
  "logging_steps": 20,
 
308
  "num_input_tokens_seen": 0,
309
  "num_train_epochs": 2,
310
  "save_steps": 20,
311
+ "total_flos": 1985676276400128.0,
312
  "train_batch_size": 8,
313
  "trial_name": null,
314
  "trial_params": null