Femboyuwu2000 commited on
Commit
cfa69b4
1 Parent(s): 4b8c357

Training in progress, step 5760, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:85dd23053478779ccf46e0ca2c0ab595376b10a00281986da38738dcf4c5a6f2
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:106e0a8e71cc55180d6c0190fdb06747b7348afc51dc8c66dd21361bdf370b7c
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e68508aeacddbbed435dffee6ab56d14ad8cc7af1af1a4a1cbd0e8c21cccf52c
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c4d8d6970ba09dd48969d99468a522c66cd6d17c14302ed117d1905247ae7ab
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3568a47a934d5a4cfad1f4c6204e4bef8d710f3fea7631bb7a3e787aba3f0c2
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:328225a5ada1e91a7a041c82bd448f246b9de4bba61f01f80576b43e250cc0a2
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:db005cc740de60b97936932099d8a692b0015d48f80c2b3cda30ecdcf7f9644e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90472b611ffb952578703b9c8bc11e7e2553f2156ebce3593e42f79723ecc9cf
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.4592,
5
  "eval_steps": 500,
6
- "global_step": 5740,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2016,6 +2016,13 @@
2016
  "learning_rate": 2.509698418337534e-05,
2017
  "loss": 3.4938,
2018
  "step": 5740
 
 
 
 
 
 
 
2019
  }
2020
  ],
2021
  "logging_steps": 20,
@@ -2023,7 +2030,7 @@
2023
  "num_input_tokens_seen": 0,
2024
  "num_train_epochs": 2,
2025
  "save_steps": 20,
2026
- "total_flos": 1.3598247598227456e+16,
2027
  "train_batch_size": 8,
2028
  "trial_name": null,
2029
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.4608,
5
  "eval_steps": 500,
6
+ "global_step": 5760,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2016
  "learning_rate": 2.509698418337534e-05,
2017
  "loss": 3.4938,
2018
  "step": 5740
2019
+ },
2020
+ {
2021
+ "epoch": 0.46,
2022
+ "grad_norm": 32.694766998291016,
2023
+ "learning_rate": 2.5061004322976953e-05,
2024
+ "loss": 3.4351,
2025
+ "step": 5760
2026
  }
2027
  ],
2028
  "logging_steps": 20,
 
2030
  "num_input_tokens_seen": 0,
2031
  "num_train_epochs": 2,
2032
  "save_steps": 20,
2033
+ "total_flos": 1.364496321773568e+16,
2034
  "train_batch_size": 8,
2035
  "trial_name": null,
2036
  "trial_params": null