Femboyuwu2000 commited on
Commit
7f07066
1 Parent(s): 0d4cc95

Training in progress, step 5780, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:106e0a8e71cc55180d6c0190fdb06747b7348afc51dc8c66dd21361bdf370b7c
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:283dd7cd18659a3c9d3097777e6a3109d1270cd6b8e6372800d079fa0c6abec5
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c4d8d6970ba09dd48969d99468a522c66cd6d17c14302ed117d1905247ae7ab
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6b30e324e0c0ca3d4e69841f3e1c91025824da6588bf53e3d9fec35c7904fe9
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:328225a5ada1e91a7a041c82bd448f246b9de4bba61f01f80576b43e250cc0a2
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1680fcc8fd6c2f3a95198a6b5dcd06c9a4a57f3225e8f95006ad95075adefeb1
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:90472b611ffb952578703b9c8bc11e7e2553f2156ebce3593e42f79723ecc9cf
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:447f6f7a97384eaf154d22cf9cf079d353a317926c5cc0034d003c8c0323b94e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.4608,
5
  "eval_steps": 500,
6
- "global_step": 5760,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2023,6 +2023,13 @@
2023
  "learning_rate": 2.5061004322976953e-05,
2024
  "loss": 3.4351,
2025
  "step": 5760
 
 
 
 
 
 
 
2026
  }
2027
  ],
2028
  "logging_steps": 20,
@@ -2030,7 +2037,7 @@
2030
  "num_input_tokens_seen": 0,
2031
  "num_train_epochs": 2,
2032
  "save_steps": 20,
2033
- "total_flos": 1.364496321773568e+16,
2034
  "train_batch_size": 8,
2035
  "trial_name": null,
2036
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.4624,
5
  "eval_steps": 500,
6
+ "global_step": 5780,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2023
  "learning_rate": 2.5061004322976953e-05,
2024
  "loss": 3.4351,
2025
  "step": 5760
2026
+ },
2027
+ {
2028
+ "epoch": 0.46,
2029
+ "grad_norm": 92.71965789794922,
2030
+ "learning_rate": 2.5024918927404005e-05,
2031
+ "loss": 3.5194,
2032
+ "step": 5780
2033
  }
2034
  ],
2035
  "logging_steps": 20,
 
2037
  "num_input_tokens_seen": 0,
2038
  "num_train_epochs": 2,
2039
  "save_steps": 20,
2040
+ "total_flos": 1.3688234216423424e+16,
2041
  "train_batch_size": 8,
2042
  "trial_name": null,
2043
  "trial_params": null