ben81828 commited on
Commit
c39b8ce
·
verified ·
1 Parent(s): a0e3713

Training in progress, step 150, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7dcd3cc1733beba0d2cfe024b5800ccbcc1ab26ea9926cf8c4f3b88a8198614e
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f299ed065d076e986e51213d7bf889152797a26591dce7e23c56b2cecc05e88
3
  size 29034840
last-checkpoint/global_step150/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9afea4c76f157e2608c1f997a5ad60002c72ae851b59e9bde62ae07a14d9d80a
3
+ size 43429616
last-checkpoint/global_step150/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52305a9611c890a539013783f0f1598817c011d88270349b046c32183a126dfc
3
+ size 43429616
last-checkpoint/global_step150/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a844fb8b31d206016bdbf8e85c119d82e6d584dd28c40668d58d6d6c2b324370
3
+ size 43429616
last-checkpoint/global_step150/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2609b1ebf886b70182af89e62775c237894994283827b25c6af3569d1a850da7
3
+ size 43429616
last-checkpoint/global_step150/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34f144f33b7ac763950c684990157dad4d1ee523ae43797b9a6b90a998a9d314
3
+ size 637299
last-checkpoint/global_step150/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3e0249ea361e86670960dca0ca756a903dbb19568eef1a99e26ae12785a1d69
3
+ size 637171
last-checkpoint/global_step150/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82afbf3944b11f68d9499cbaa773c1fcc29e1a0e9b4eb333a671ffa88a5222fe
3
+ size 637171
last-checkpoint/global_step150/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a0caccf9e31a439cb573c5600a0e4625f1ff654d897f43ccfa9da9d8591fe0e
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step100
 
1
+ global_step150
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:308f94f9a5c24e1bad5c393d56ae7af7782600f4e791d9c6ac35b22fff2105b6
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70cc56408014c410353d4dd58ae9b03f4be043f5f800324f66fd8e20e99b840e
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b056f3c23cb32dc77a2ec9e7651e0b64e4440e21f0fdf969b86bfc56a1cbdf06
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49d1438e98cc9c53a6852464635ce62e9788e61eb3646b73e33813f487c4b6ae
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f3f8a05714bc528f4885a2816181652f2303b3e8150f89b56aaee6bec56aa520
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4388add9cec90932f8ff0100d27a0574d98e1bad52ff89d44e31967d2b4fbfde
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4f755bd3c330281961e5c03af9d10ce8c1e1678619d384f6f1fd5fd7dce2ff50
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a705d6dfaae4f2c1b4b2be6b25a6eb521ffae6fcba21cc1531e97b60037ed079
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e451a9e086b06d7c667be8442b2115f5c088953bade0b625e61f2ce5c7fd404
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c587ef55825cdfdbac47c29c3bd8a2996263c16833a5bd6aaa2fb014bea1e9d1
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.7517351508140564,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-100",
4
- "epoch": 0.025753283543651816,
5
  "eval_steps": 50,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -185,11 +185,100 @@
185
  "eval_steps_per_second": 0.774,
186
  "num_input_tokens_seen": 1049816,
187
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
  }
189
  ],
190
  "logging_steps": 5,
191
  "max_steps": 3400,
192
- "num_input_tokens_seen": 1049816,
193
  "num_train_epochs": 1,
194
  "save_steps": 50,
195
  "stateful_callbacks": {
@@ -204,7 +293,7 @@
204
  "attributes": {}
205
  }
206
  },
207
- "total_flos": 69233446486016.0,
208
  "train_batch_size": 1,
209
  "trial_name": null,
210
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.7309949994087219,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-150",
4
+ "epoch": 0.03862992531547772,
5
  "eval_steps": 50,
6
+ "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
185
  "eval_steps_per_second": 0.774,
186
  "num_input_tokens_seen": 1049816,
187
  "step": 100
188
+ },
189
+ {
190
+ "epoch": 0.027040947720834405,
191
+ "grad_norm": 0.7251208491150668,
192
+ "learning_rate": 6.176470588235295e-05,
193
+ "loss": 0.7579,
194
+ "num_input_tokens_seen": 1102584,
195
+ "step": 105
196
+ },
197
+ {
198
+ "epoch": 0.028328611898016998,
199
+ "grad_norm": 0.8217419839297042,
200
+ "learning_rate": 6.470588235294118e-05,
201
+ "loss": 0.7659,
202
+ "num_input_tokens_seen": 1155512,
203
+ "step": 110
204
+ },
205
+ {
206
+ "epoch": 0.029616276075199587,
207
+ "grad_norm": 0.6768053879888967,
208
+ "learning_rate": 6.764705882352942e-05,
209
+ "loss": 0.7469,
210
+ "num_input_tokens_seen": 1207976,
211
+ "step": 115
212
+ },
213
+ {
214
+ "epoch": 0.03090394025238218,
215
+ "grad_norm": 1.9562630849642013,
216
+ "learning_rate": 7.058823529411765e-05,
217
+ "loss": 0.7353,
218
+ "num_input_tokens_seen": 1259776,
219
+ "step": 120
220
+ },
221
+ {
222
+ "epoch": 0.03219160442956477,
223
+ "grad_norm": 0.6439041597153087,
224
+ "learning_rate": 7.352941176470589e-05,
225
+ "loss": 0.7537,
226
+ "num_input_tokens_seen": 1312760,
227
+ "step": 125
228
+ },
229
+ {
230
+ "epoch": 0.03347926860674736,
231
+ "grad_norm": 0.6124318582166212,
232
+ "learning_rate": 7.647058823529411e-05,
233
+ "loss": 0.7669,
234
+ "num_input_tokens_seen": 1365616,
235
+ "step": 130
236
+ },
237
+ {
238
+ "epoch": 0.03476693278392995,
239
+ "grad_norm": 0.7593534002488418,
240
+ "learning_rate": 7.941176470588235e-05,
241
+ "loss": 0.722,
242
+ "num_input_tokens_seen": 1417544,
243
+ "step": 135
244
+ },
245
+ {
246
+ "epoch": 0.036054596961112545,
247
+ "grad_norm": 0.7827834651032061,
248
+ "learning_rate": 8.23529411764706e-05,
249
+ "loss": 0.7502,
250
+ "num_input_tokens_seen": 1469856,
251
+ "step": 140
252
+ },
253
+ {
254
+ "epoch": 0.037342261138295134,
255
+ "grad_norm": 0.5444126155596626,
256
+ "learning_rate": 8.529411764705883e-05,
257
+ "loss": 0.7174,
258
+ "num_input_tokens_seen": 1521496,
259
+ "step": 145
260
+ },
261
+ {
262
+ "epoch": 0.03862992531547772,
263
+ "grad_norm": 0.40878703812837747,
264
+ "learning_rate": 8.823529411764706e-05,
265
+ "loss": 0.7018,
266
+ "num_input_tokens_seen": 1573376,
267
+ "step": 150
268
+ },
269
+ {
270
+ "epoch": 0.03862992531547772,
271
+ "eval_loss": 0.7309949994087219,
272
+ "eval_runtime": 38.2005,
273
+ "eval_samples_per_second": 3.141,
274
+ "eval_steps_per_second": 0.785,
275
+ "num_input_tokens_seen": 1573376,
276
+ "step": 150
277
  }
278
  ],
279
  "logging_steps": 5,
280
  "max_steps": 3400,
281
+ "num_input_tokens_seen": 1573376,
282
  "num_train_epochs": 1,
283
  "save_steps": 50,
284
  "stateful_callbacks": {
 
293
  "attributes": {}
294
  }
295
  },
296
+ "total_flos": 103777530806272.0,
297
  "train_batch_size": 1,
298
  "trial_name": null,
299
  "trial_params": null