Femboyuwu2000 commited on
Commit
dab66d6
1 Parent(s): 27084a9

Training in progress, step 5940, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:70e0ffa6186a8d19a1c6a1af2754af73bbde6f3d4b17d0eaa6d63f56b1dcc237
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:608a6b07ab17338bd6282af158218a38aaf660c765e9d6ab6b3901cfdb0f98b7
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b9f6b01c389b88e9bf0fa9b136dc9465beaeaf31a2e306aab9eded42fe708a84
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21a7d513c9296103d22d55f32962fde622c0d973a7f373aa4a9c0fe2c3f05177
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:715d2d6b3ea46b462c5a3d4523172f732bb653b4084281a8a43ddfdca55ee170
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0127f56f128ab49d16717c9bb322e33079e0546dc1e679e0ba07a312af3b19a
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a79a21920ea4884fef0686ddc829dd24e9c53857b4d31a716734744f6af5ee77
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14f5d4d06571f17a13a7fb5f8c4d4c26d3ffcd4700c7abd904f5f1753c1fd7ec
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.4736,
5
  "eval_steps": 500,
6
- "global_step": 5920,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2079,6 +2079,13 @@
2079
  "learning_rate": 2.476939810755923e-05,
2080
  "loss": 3.4988,
2081
  "step": 5920
 
 
 
 
 
 
 
2082
  }
2083
  ],
2084
  "logging_steps": 20,
@@ -2086,7 +2093,7 @@
2086
  "num_input_tokens_seen": 0,
2087
  "num_train_epochs": 2,
2088
  "save_steps": 20,
2089
- "total_flos": 1.4003597454016512e+16,
2090
  "train_batch_size": 8,
2091
  "trial_name": null,
2092
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.4752,
5
  "eval_steps": 500,
6
+ "global_step": 5940,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2079
  "learning_rate": 2.476939810755923e-05,
2080
  "loss": 3.4988,
2081
  "step": 5920
2082
+ },
2083
+ {
2084
+ "epoch": 0.48,
2085
+ "grad_norm": 27.388093948364258,
2086
+ "learning_rate": 2.4732482149416325e-05,
2087
+ "loss": 3.4577,
2088
+ "step": 5940
2089
  }
2090
  ],
2091
  "logging_steps": 20,
 
2093
  "num_input_tokens_seen": 0,
2094
  "num_train_epochs": 2,
2095
  "save_steps": 20,
2096
+ "total_flos": 1.4047360541392896e+16,
2097
  "train_batch_size": 8,
2098
  "trial_name": null,
2099
  "trial_params": null