Femboyuwu2000 commited on
Commit
b5196a2
1 Parent(s): 15297b3

Training in progress, step 3980, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7889d14a2444a830b872f5e86d561143f2cbab0ea5ba81dbb788ecd497b6dc12
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fed8b037293eea965a891ec6faa4a8c5f09e500208437b6e0939687c63268f1
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7ef7e9ed4a471e46548d0b25542268f6f7e42b296dc835d21a4629297b643f8a
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ec148549087fad601f78d26348f8c3cd657d46a252e419103abbe89a0a1b1b2
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:94bb4d16678255f69acd3c6ad1f7e28dae58deeccef6c73a9ed3860161dd1747
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51370fa33b9c13d5f80ae7fa8db4167f4b87b80a692d734ed71e92d3211caace
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4aa728ff8dd68ad393219e062ef8c6a1fc7dc031ee553ad652965f2249179521
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f79a2f74b522fa6033abd487865a5913aed0c5f19b8f644e33fa9a5d01b02ca
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.3168,
5
  "eval_steps": 500,
6
- "global_step": 3960,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1393,6 +1393,13 @@
1393
  "learning_rate": 2.783381823144452e-05,
1394
  "loss": 3.6398,
1395
  "step": 3960
 
 
 
 
 
 
 
1396
  }
1397
  ],
1398
  "logging_steps": 20,
@@ -1400,7 +1407,7 @@
1400
  "num_input_tokens_seen": 0,
1401
  "num_train_epochs": 2,
1402
  "save_steps": 20,
1403
- "total_flos": 9350734873362432.0,
1404
  "train_batch_size": 8,
1405
  "trial_name": null,
1406
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.3184,
5
  "eval_steps": 500,
6
+ "global_step": 3980,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1393
  "learning_rate": 2.783381823144452e-05,
1394
  "loss": 3.6398,
1395
  "step": 3960
1396
+ },
1397
+ {
1398
+ "epoch": 0.32,
1399
+ "grad_norm": 28.959075927734375,
1400
+ "learning_rate": 2.780860247123153e-05,
1401
+ "loss": 3.4484,
1402
+ "step": 3980
1403
  }
1404
  ],
1405
  "logging_steps": 20,
 
1407
  "num_input_tokens_seen": 0,
1408
  "num_train_epochs": 2,
1409
  "save_steps": 20,
1410
+ "total_flos": 9403552392609792.0,
1411
  "train_batch_size": 8,
1412
  "trial_name": null,
1413
  "trial_params": null