Femboyuwu2000 commited on
Commit
1579729
1 Parent(s): 626651d

Training in progress, step 5980, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:010f676ba5602979d0c7829e0358b3d388c49d2f6fee5237a36a28449c16b492
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0feb6f64ea3e3b92cc96080a409e4641eb264d332cc20250df4e39a05e662c7
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:102af6e58d98e65b6dcfb4906ef10405c96eb13273b480ae020aefeb341b6282
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a970c4eb86fbaca60a94a3c845063771d7d39e9087249e399eb948470aed22d1
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d6347c5bcc78e95a65da46d270d8df1f2b00359d0f1d79efc0275b98ad678c0a
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a0fb4f18c5202fb0a998c484ac19f5b59c60044cb935356391104a5e3a4b564
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a94fd165a88f9a6615f126687e3aefb9cf9a572c3aa79a5a0f1147469e4c177d
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afe52a7672d7fe624a4913a1f41e35efbe969e8d4b2f9dcc25e7efb0a77e2b02
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.4768,
5
  "eval_steps": 500,
6
- "global_step": 5960,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2093,6 +2093,13 @@
2093
  "learning_rate": 2.4695464102141002e-05,
2094
  "loss": 3.4816,
2095
  "step": 5960
 
 
 
 
 
 
 
2096
  }
2097
  ],
2098
  "logging_steps": 20,
@@ -2100,7 +2107,7 @@
2100
  "num_input_tokens_seen": 0,
2101
  "num_train_epochs": 2,
2102
  "save_steps": 20,
2103
- "total_flos": 1.4099423524651008e+16,
2104
  "train_batch_size": 8,
2105
  "trial_name": null,
2106
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.4784,
5
  "eval_steps": 500,
6
+ "global_step": 5980,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2093
  "learning_rate": 2.4695464102141002e-05,
2094
  "loss": 3.4816,
2095
  "step": 5960
2096
+ },
2097
+ {
2098
+ "epoch": 0.48,
2099
+ "grad_norm": 33.66743469238281,
2100
+ "learning_rate": 2.4658344354035063e-05,
2101
+ "loss": 3.5148,
2102
+ "step": 5980
2103
  }
2104
  ],
2105
  "logging_steps": 20,
 
2107
  "num_input_tokens_seen": 0,
2108
  "num_train_epochs": 2,
2109
  "save_steps": 20,
2110
+ "total_flos": 1.41590318678016e+16,
2111
  "train_batch_size": 8,
2112
  "trial_name": null,
2113
  "trial_params": null