Femboyuwu2000 commited on
Commit
240ebb1
1 Parent(s): 1a4ab99

Training in progress, step 5840, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f2d12c00c3910f5c8aadf1c3043d575f58f0e94d4e1904af9295d91088e5ba8
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a05d66290a553ce7e5848692f87b4d47dd70832cd730d3e060d68efb842c89b
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2f018a91942bfac6141fce26126b81cc10d8b6d03258988a8ed48ad7a147fb9c
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c55dfcbb3adece00c0495fe03185267c02807bfaa50cb1106c5f7e6137980a8d
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:73ca75c8c20354aa6121b583b0e2d1196548a9e950e5c54e2824364c1617a488
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d37eee1f275542e53d152c87d42b36d0933fa3b25d32e8265c1afb8163e08bdb
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ffeee657eab485848cc5e4a398e8908b8f07d67a077ba1a196eb593774ff8630
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5ff425dd13fc2cddef013a46a6108a94fb4fb857872d9633c68069377e39e8a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.4656,
5
  "eval_steps": 500,
6
- "global_step": 5820,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2044,6 +2044,13 @@
2044
  "learning_rate": 2.495243304591236e-05,
2045
  "loss": 3.561,
2046
  "step": 5820
 
 
 
 
 
 
 
2047
  }
2048
  ],
2049
  "logging_steps": 20,
@@ -2051,7 +2058,7 @@
2051
  "num_input_tokens_seen": 0,
2052
  "num_train_epochs": 2,
2053
  "save_steps": 20,
2054
- "total_flos": 1.3779205011996672e+16,
2055
  "train_batch_size": 8,
2056
  "trial_name": null,
2057
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.4672,
5
  "eval_steps": 500,
6
+ "global_step": 5840,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2044
  "learning_rate": 2.495243304591236e-05,
2045
  "loss": 3.561,
2046
  "step": 5820
2047
+ },
2048
+ {
2049
+ "epoch": 0.47,
2050
+ "grad_norm": 43.67660140991211,
2051
+ "learning_rate": 2.4916033320336263e-05,
2052
+ "loss": 3.3979,
2053
+ "step": 5840
2054
  }
2055
  ],
2056
  "logging_steps": 20,
 
2058
  "num_input_tokens_seen": 0,
2059
  "num_train_epochs": 2,
2060
  "save_steps": 20,
2061
+ "total_flos": 1.3829463670063104e+16,
2062
  "train_batch_size": 8,
2063
  "trial_name": null,
2064
  "trial_params": null