Femboyuwu2000 commited on
Commit
3867242
1 Parent(s): ecfc46c

Training in progress, step 9220, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1191e15f1f075f04e1758ba87cec35bdffa2da050944d1bbd85df57885868d03
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32794401cb308b267d0bd79030d66e61135ddaadf4fd04203a94ef1dc084f749
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1a010ce7c172b0d1642d9d0d0c2e4c836b9098269d1f3df199282ff380452442
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:806be36c7d4247910d10d0a3d4e85abf9ecb886dba015205196e017aa6a69b3d
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:267bfcd733ebdb94275f330516fcaee122db5c84d3b8dd0fa12ab600e130ce3a
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d0e1721e39570a369ee3212183976850af3d42923a35fa475e14d07fb127160
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fd1f9483d4afb3a72629e531d61449865b645ed3eea3223a993e49f084490f43
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99eb28a147ff3b3e70b87395088aa61d9ebc6ba66456bca527ae02b29c9a018a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.736,
5
  "eval_steps": 500,
6
- "global_step": 9200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3227,6 +3227,13 @@
3227
  "learning_rate": 1.7657864873688343e-05,
3228
  "loss": 3.4515,
3229
  "step": 9200
 
 
 
 
 
 
 
3230
  }
3231
  ],
3232
  "logging_steps": 20,
@@ -3234,7 +3241,7 @@
3234
  "num_input_tokens_seen": 0,
3235
  "num_train_epochs": 2,
3236
  "save_steps": 20,
3237
- "total_flos": 2.1755437760249856e+16,
3238
  "train_batch_size": 8,
3239
  "trial_name": null,
3240
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.7376,
5
  "eval_steps": 500,
6
+ "global_step": 9220,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3227
  "learning_rate": 1.7657864873688343e-05,
3228
  "loss": 3.4515,
3229
  "step": 9200
3230
+ },
3231
+ {
3232
+ "epoch": 0.74,
3233
+ "grad_norm": 41.386260986328125,
3234
+ "learning_rate": 1.761003841513674e-05,
3235
+ "loss": 3.4684,
3236
+ "step": 9220
3237
  }
3238
  ],
3239
  "logging_steps": 20,
 
3241
  "num_input_tokens_seen": 0,
3242
  "num_train_epochs": 2,
3243
  "save_steps": 20,
3244
+ "total_flos": 2.1795953062281216e+16,
3245
  "train_batch_size": 8,
3246
  "trial_name": null,
3247
  "trial_params": null