Femboyuwu2000 commited on
Commit
2a4f74a
1 Parent(s): bd67fa1

Training in progress, step 3420, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3cd865daf8559d59e29839bc441ae79f1e2737ae9485572c595f64db334fed61
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65f8e5f02732fdec9957da384752cf78dcb48b177a3d9870168cb62a3d63c083
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:69d9e559aaab66e7869c4c787aeee42b2f6a56fd645707d2b87003a3dc5385b0
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:502ba21a6999a73b4500e2f1bcd6bfd5f9ffe609b9ca658097637c18e5f7934c
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a2b272dc83cb7d749874b2399c80d27af338c5717f334d8e94a65cda4901704
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32157d9312da78c31c7eb9da54d0e4180014deb5a808f327c04702a69d5ee885
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:29aee8cd0d410b2add351b6ed06e07a7408b0ac663b0315841fa1e8b8bd51e25
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c1ae01ab3056f0c3cd70e7c180f3c03d506b883504c2660390de9d287862bd8
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.272,
5
  "eval_steps": 500,
6
- "global_step": 3400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1197,6 +1197,13 @@
1197
  "learning_rate": 2.848427611628093e-05,
1198
  "loss": 3.4868,
1199
  "step": 3400
 
 
 
 
 
 
 
1200
  }
1201
  ],
1202
  "logging_steps": 20,
@@ -1204,7 +1211,7 @@
1204
  "num_input_tokens_seen": 0,
1205
  "num_train_epochs": 2,
1206
  "save_steps": 20,
1207
- "total_flos": 8068909451280384.0,
1208
  "train_batch_size": 8,
1209
  "trial_name": null,
1210
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.2736,
5
  "eval_steps": 500,
6
+ "global_step": 3420,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1197
  "learning_rate": 2.848427611628093e-05,
1198
  "loss": 3.4868,
1199
  "step": 3400
1200
+ },
1201
+ {
1202
+ "epoch": 0.27,
1203
+ "grad_norm": 28.35199546813965,
1204
+ "learning_rate": 2.8462924493036168e-05,
1205
+ "loss": 3.4984,
1206
+ "step": 3420
1207
  }
1208
  ],
1209
  "logging_steps": 20,
 
1211
  "num_input_tokens_seen": 0,
1212
  "num_train_epochs": 2,
1213
  "save_steps": 20,
1214
+ "total_flos": 8112574120919040.0,
1215
  "train_batch_size": 8,
1216
  "trial_name": null,
1217
  "trial_params": null