Femboyuwu2000 commited on
Commit
18682e5
1 Parent(s): 3fb266e

Training in progress, step 6380, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:469c9ce6820a85a006cd1be1ac3e4bc00d3ff72e99862c889a25470f7a425355
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d0dc7626ddde701087507da47f6483bfaa8f5f8e188458a83f2674165d89960
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2f28a62275cf693c348113c53b24032412deef50018fc781c228dc840c1a2bf8
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96d7a1d37a2127f086863b0d86e41d38ac45c83459eba5f392016c96b8b91645
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e4c351cd5bf9010bba98ddebc6172fea02d4349c1d4d85d7125985b1f4486fff
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f44a842856192a95499780f57278298938a0b08d05196d1a0077617782d19298
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0a09e65d4f379cdacac12c0eb8ac2f7aee229cba4545b6e0dd83cf4c5c09dabe
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d00d77c9b01f729ad41511fced2214cb644688b952a92d0006d23c69218c0530
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5088,
5
  "eval_steps": 500,
6
- "global_step": 6360,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2233,6 +2233,13 @@
2233
  "learning_rate": 2.3934270118813024e-05,
2234
  "loss": 3.4738,
2235
  "step": 6360
 
 
 
 
 
 
 
2236
  }
2237
  ],
2238
  "logging_steps": 20,
@@ -2240,7 +2247,7 @@
2240
  "num_input_tokens_seen": 0,
2241
  "num_train_epochs": 2,
2242
  "save_steps": 20,
2243
- "total_flos": 1.5018678000943104e+16,
2244
  "train_batch_size": 8,
2245
  "trial_name": null,
2246
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5104,
5
  "eval_steps": 500,
6
+ "global_step": 6380,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2233
  "learning_rate": 2.3934270118813024e-05,
2234
  "loss": 3.4738,
2235
  "step": 6360
2236
+ },
2237
+ {
2238
+ "epoch": 0.51,
2239
+ "grad_norm": 24.581098556518555,
2240
+ "learning_rate": 2.3895199510167793e-05,
2241
+ "loss": 3.4775,
2242
+ "step": 6380
2243
  }
2244
  ],
2245
  "logging_steps": 20,
 
2247
  "num_input_tokens_seen": 0,
2248
  "num_train_epochs": 2,
2249
  "save_steps": 20,
2250
+ "total_flos": 1.5064671890374656e+16,
2251
  "train_batch_size": 8,
2252
  "trial_name": null,
2253
  "trial_params": null