Hanzalwi commited on
Commit
4f80bcf
1 Parent(s): 604ea80

Training in progress, step 1100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b6d4fff8398a005291af4bcafd5071060d689832ee1e2e0d16ca43f7ddcefa34
3
  size 9444296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:209a24c1da8a93ac16367365eec1fae0adaa3a62bfd35bbeaed228d1e940dcba
3
  size 9444296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f0e68c6f34dbbb9ee51991b9cc9eb88fc6a38115b951ae1314afef0e0449d853
3
  size 18902665
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f90ff78eb336ca2a0d175b16a248ed1a5ba3c8c8e3686c58f7b1676d16c99c58
3
  size 18902665
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cf4a977350df40c9b5afb65eb8e48a767be847c486a19a831f37b3267caf5f9f
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20c40dab02973b1cc46cf9629cd02e8f44b9b493929030f20bf23390e0e7e4d8
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3bb7339d1d2da342a44f3ba5e076fb6e5d93aee86f890c002703d2b3a0750820
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f5f3a2d7191245a70e1410d8951fd2f5c94b0b91f13bf39c3f76fe8aaf57fd4
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.1354840993881226,
3
- "best_model_checkpoint": "./outputs/checkpoint-1000",
4
- "epoch": 1.3333333333333333,
5
  "eval_steps": 100,
6
- "global_step": 1000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -147,6 +147,20 @@
147
  "eval_samples_per_second": 10.873,
148
  "eval_steps_per_second": 1.363,
149
  "step": 1000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
  }
151
  ],
152
  "logging_steps": 100,
@@ -154,7 +168,7 @@
154
  "num_input_tokens_seen": 0,
155
  "num_train_epochs": 3,
156
  "save_steps": 100,
157
- "total_flos": 7.474685431062528e+16,
158
  "trial_name": null,
159
  "trial_params": null
160
  }
 
1
  {
2
+ "best_metric": 1.1310428380966187,
3
+ "best_model_checkpoint": "./outputs/checkpoint-1100",
4
+ "epoch": 1.4666666666666668,
5
  "eval_steps": 100,
6
+ "global_step": 1100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
147
  "eval_samples_per_second": 10.873,
148
  "eval_steps_per_second": 1.363,
149
  "step": 1000
150
+ },
151
+ {
152
+ "epoch": 1.47,
153
+ "learning_rate": 0.0002,
154
+ "loss": 0.9393,
155
+ "step": 1100
156
+ },
157
+ {
158
+ "epoch": 1.47,
159
+ "eval_loss": 1.1310428380966187,
160
+ "eval_runtime": 177.407,
161
+ "eval_samples_per_second": 10.879,
162
+ "eval_steps_per_second": 1.364,
163
+ "step": 1100
164
  }
165
  ],
166
  "logging_steps": 100,
 
168
  "num_input_tokens_seen": 0,
169
  "num_train_epochs": 3,
170
  "save_steps": 100,
171
+ "total_flos": 8.219848108179456e+16,
172
  "trial_name": null,
173
  "trial_params": null
174
  }