Femboyuwu2000 commited on
Commit
3003d8f
1 Parent(s): f21684b

Training in progress, step 80, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c13c1fb05d548b619e4ea3b6565286621267c5c8ce62890c4968c123b771070
3
  size 12803224
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e72960fe1f5461766c235a01313cd773da95227563f32936ac3a015ff0459139
3
  size 12803224
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:68903178a78f75a3d0256b3cd6634ad0163adbc4d74cda491a06a4482ccf1d07
3
  size 6472698
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9382d52e6821be11c14df84b7893ef587ba8bd5a15f4fe4d93d554e30030c8d6
3
  size 6472698
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bd37aad04b91620e0c3483fdbb04450826c4bfc1adad9bf38a9c80870ce97622
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80ebca932a50bd9cfabfd23733c5f77d34c6627e8990d72f32cbb244a43bf7bf
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6b207be31c66d2c120e6fb6b0a2aa4e289fca3c6de3dc048cad8998306d1d4ca
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0533425b70142c89e8cb8876e2451acb905d0925372b0f8e777ed24de781cce
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 4.669515070859891e-05,
5
  "eval_steps": 500,
6
- "global_step": 60,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -28,6 +28,13 @@
28
  "learning_rate": 8.666666666666668e-06,
29
  "loss": 3.5164,
30
  "step": 60
 
 
 
 
 
 
 
31
  }
32
  ],
33
  "logging_steps": 20,
@@ -35,7 +42,7 @@
35
  "num_input_tokens_seen": 0,
36
  "num_train_epochs": 1,
37
  "save_steps": 20,
38
- "total_flos": 253168758374400.0,
39
  "train_batch_size": 1,
40
  "trial_name": null,
41
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 6.226020094479855e-05,
5
  "eval_steps": 500,
6
+ "global_step": 80,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
28
  "learning_rate": 8.666666666666668e-06,
29
  "loss": 3.5164,
30
  "step": 60
31
+ },
32
+ {
33
+ "epoch": 0.0,
34
+ "grad_norm": 109.34711456298828,
35
+ "learning_rate": 1.2e-05,
36
+ "loss": 4.0846,
37
+ "step": 80
38
  }
39
  ],
40
  "logging_steps": 20,
 
42
  "num_input_tokens_seen": 0,
43
  "num_train_epochs": 1,
44
  "save_steps": 20,
45
+ "total_flos": 337115655782400.0,
46
  "train_batch_size": 1,
47
  "trial_name": null,
48
  "trial_params": null