Femboyuwu2000 commited on
Commit
b6adbbb
1 Parent(s): c082e0a

Training in progress, step 9180, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:20f1a714eb67c73b6569a5f7e5dedf54237b0ad2716eb3b0475c33a5c2632e6b
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02c9055a738b9a9de575613a1a5580636f05db397ca77f53fdf338e8870afd3d
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9730bd1fd19af1573bb86be461d8d2283246a50b424518f179c57f93b8e62ac4
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:147d3fa875f4ec20c612e78f064b3f808bdb35a6690922445f61b72afc32af1d
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:626b6a087fd94d790b874f54e98174d3e59d634f407178aef84daa0bee2f4f38
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39bf8edf039f594951035ab5637656bd85129b21b01c61e9ca4cb4fb949b5b72
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ff87507bf7d087bfecc1378d173f4afe91a57804efc7c9f721f922c781ee2ed5
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffeb4e9a55740f59ac2f34810ad1181abbaa2cb8f6beeff3a39a90a1c460800d
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.7328,
5
  "eval_steps": 500,
6
- "global_step": 9160,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3213,6 +3213,13 @@
3213
  "learning_rate": 1.775343365017254e-05,
3214
  "loss": 3.5308,
3215
  "step": 9160
 
 
 
 
 
 
 
3216
  }
3217
  ],
3218
  "logging_steps": 20,
@@ -3220,7 +3227,7 @@
3220
  "num_input_tokens_seen": 0,
3221
  "num_train_epochs": 2,
3222
  "save_steps": 20,
3223
- "total_flos": 2.166026780786688e+16,
3224
  "train_batch_size": 8,
3225
  "trial_name": null,
3226
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.7344,
5
  "eval_steps": 500,
6
+ "global_step": 9180,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3213
  "learning_rate": 1.775343365017254e-05,
3214
  "loss": 3.5308,
3215
  "step": 9160
3216
+ },
3217
+ {
3218
+ "epoch": 0.73,
3219
+ "grad_norm": 23.338972091674805,
3220
+ "learning_rate": 1.77056634524951e-05,
3221
+ "loss": 3.5206,
3222
+ "step": 9180
3223
  }
3224
  ],
3225
  "logging_steps": 20,
 
3227
  "num_input_tokens_seen": 0,
3228
  "num_train_epochs": 2,
3229
  "save_steps": 20,
3230
+ "total_flos": 2.1709115811692544e+16,
3231
  "train_batch_size": 8,
3232
  "trial_name": null,
3233
  "trial_params": null