Femboyuwu2000 commited on
Commit
98e34dc
1 Parent(s): b3890fd

Training in progress, step 3380, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c1698d0b9850198369e888fcfaf13d37be26de99003ada18e391207f1b5ff228
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69537f9ed2ed65ee60acaac389fcecc462f045dfeb53ece12f3946f8b602575d
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d8fe17bfb83911cb927d541d4a34d80b626c32a1c080df866cc26b5c30ec4ff1
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5eddb8d0942fc0d2111a643fd0b3d55490262a609cbee970570ebbd5ab507d8c
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7db2bdbe63109eacd97a36be96f1c1369969403fe38eb6536eaee017f63bad8f
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2972df69961514cb9cbbc80a0a964c5c02518f3c97507a0353d5aad14f50d0a4
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d7c2d8201e8b73967831cd725d7a9e1ee970c8f0d997a055abf4549ddfa7435
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05b6fd6b8b38a7df2dd205c561b40fe74b7f319a01589a7a4797e47201a7e9a9
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.2688,
5
  "eval_steps": 500,
6
- "global_step": 3360,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1183,6 +1183,13 @@
1183
  "learning_rate": 2.852655480925828e-05,
1184
  "loss": 3.5195,
1185
  "step": 3360
 
 
 
 
 
 
 
1186
  }
1187
  ],
1188
  "logging_steps": 20,
@@ -1190,7 +1197,7 @@
1190
  "num_input_tokens_seen": 0,
1191
  "num_train_epochs": 2,
1192
  "save_steps": 20,
1193
- "total_flos": 7983843719970816.0,
1194
  "train_batch_size": 8,
1195
  "trial_name": null,
1196
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.2704,
5
  "eval_steps": 500,
6
+ "global_step": 3380,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1183
  "learning_rate": 2.852655480925828e-05,
1184
  "loss": 3.5195,
1185
  "step": 3360
1186
+ },
1187
+ {
1188
+ "epoch": 0.27,
1189
+ "grad_norm": 36.46931076049805,
1190
+ "learning_rate": 2.8505486295849884e-05,
1191
+ "loss": 3.5451,
1192
+ "step": 3380
1193
  }
1194
  ],
1195
  "logging_steps": 20,
 
1197
  "num_input_tokens_seen": 0,
1198
  "num_train_epochs": 2,
1199
  "save_steps": 20,
1200
+ "total_flos": 8025605646680064.0,
1201
  "train_batch_size": 8,
1202
  "trial_name": null,
1203
  "trial_params": null