Femboyuwu2000 commited on
Commit
f5e5cd6
1 Parent(s): 6b2b8bb

Training in progress, step 4000, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6fed8b037293eea965a891ec6faa4a8c5f09e500208437b6e0939687c63268f1
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebfe5edf81138c978229a3a68b0fe6644c67b98edc7c5fe61004927e35302f75
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2ec148549087fad601f78d26348f8c3cd657d46a252e419103abbe89a0a1b1b2
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fb53f00fdd7fbcafad95bc9586286b21025aebe6a7a756c6a906a18324316bc
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:51370fa33b9c13d5f80ae7fa8db4167f4b87b80a692d734ed71e92d3211caace
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43c690d898c7ec435e0a3e95049a4459dd3ca635c017bbe0a97e3303b2d233ca
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9f79a2f74b522fa6033abd487865a5913aed0c5f19b8f644e33fa9a5d01b02ca
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddd4c9ae93fd7d164e48abdd1f7e950a314350432c3244939b41b87291d2074a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.3184,
5
  "eval_steps": 500,
6
- "global_step": 3980,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1400,6 +1400,13 @@
1400
  "learning_rate": 2.780860247123153e-05,
1401
  "loss": 3.4484,
1402
  "step": 3980
 
 
 
 
 
 
 
1403
  }
1404
  ],
1405
  "logging_steps": 20,
@@ -1407,7 +1414,7 @@
1407
  "num_input_tokens_seen": 0,
1408
  "num_train_epochs": 2,
1409
  "save_steps": 20,
1410
- "total_flos": 9403552392609792.0,
1411
  "train_batch_size": 8,
1412
  "trial_name": null,
1413
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.32,
5
  "eval_steps": 500,
6
+ "global_step": 4000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1400
  "learning_rate": 2.780860247123153e-05,
1401
  "loss": 3.4484,
1402
  "step": 3980
1403
+ },
1404
+ {
1405
+ "epoch": 0.32,
1406
+ "grad_norm": 30.727558135986328,
1407
+ "learning_rate": 2.778325235483954e-05,
1408
+ "loss": 3.5112,
1409
+ "step": 4000
1410
  }
1411
  ],
1412
  "logging_steps": 20,
 
1414
  "num_input_tokens_seen": 0,
1415
  "num_train_epochs": 2,
1416
  "save_steps": 20,
1417
+ "total_flos": 9444034888728576.0,
1418
  "train_batch_size": 8,
1419
  "trial_name": null,
1420
  "trial_params": null