Femboyuwu2000 commited on
Commit
f3409dd
1 Parent(s): 88f27d7

Training in progress, step 9360, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:751f9f960dca5d1bc5b5f0841d1b2cd22831c5a9dd0e4a5ca9f8a71c9bacbd5a
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce3133fc8023dc9fec5e655ff4095b17d357c0b7879a0cb32cee9112f5995b98
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:568119d120e0792ab2007ac43295445f846bf7d46a353f3c9997e7874c17c32e
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8cc9cb8b2654a9d8d631a10c10e390e4fcaec9c1afb800e42a2d87bc9203baf
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b9a0d75844d6619f1d43c17c2161774384d43f6fff4ebf0f3423379b5d726d2c
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:326613a61f7ba42eb9028eedd563cedc07a0da8a535fbe448e54c2eb7d1c1b65
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c57b84b3c01996ecacc9d6df616fa036107288915c2593ed0d7afa9b69854186
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:719ba58f8d34bc5a5298f55e15bc61b57310339d8425df02e38a130cb9f3b6fa
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.7472,
5
  "eval_steps": 500,
6
- "global_step": 9340,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3276,6 +3276,13 @@
3276
  "learning_rate": 1.73225223029007e-05,
3277
  "loss": 3.5017,
3278
  "step": 9340
 
 
 
 
 
 
 
3279
  }
3280
  ],
3281
  "logging_steps": 20,
@@ -3283,7 +3290,7 @@
3283
  "num_input_tokens_seen": 0,
3284
  "num_train_epochs": 2,
3285
  "save_steps": 20,
3286
- "total_flos": 2.208257832045773e+16,
3287
  "train_batch_size": 8,
3288
  "trial_name": null,
3289
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.7488,
5
  "eval_steps": 500,
6
+ "global_step": 9360,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3276
  "learning_rate": 1.73225223029007e-05,
3277
  "loss": 3.5017,
3278
  "step": 9340
3279
+ },
3280
+ {
3281
+ "epoch": 0.75,
3282
+ "grad_norm": 33.9267578125,
3283
+ "learning_rate": 1.7274514748802418e-05,
3284
+ "loss": 3.4322,
3285
+ "step": 9360
3286
  }
3287
  ],
3288
  "logging_steps": 20,
 
3290
  "num_input_tokens_seen": 0,
3291
  "num_train_epochs": 2,
3292
  "save_steps": 20,
3293
+ "total_flos": 2.212768645024973e+16,
3294
  "train_batch_size": 8,
3295
  "trial_name": null,
3296
  "trial_params": null