ben81828 commited on
Commit
2c2790a
·
verified ·
1 Parent(s): feceb2d

Training in progress, step 700, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a040fdacc28b9de2977636cd1c1956bffb7f97df44e5bd2640662f6a98fa674b
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cc88a9133bd30e9f761aeca11737097aa2bc75e291ca7a8f1b1b21cc4ee3a5a
3
  size 29034840
last-checkpoint/global_step700/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d985b6f15413cc381cd06576e8a189c829649d7182bc4c870a1ece58675f343
3
+ size 43429616
last-checkpoint/global_step700/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1659d3855a9ef0e3b05343b8679ed9b6213f31c669db88b09899390e3b0ee20
3
+ size 43429616
last-checkpoint/global_step700/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9b7c38ead9e590ae0766ffa9f93ce15c90caaf9de2544f3e12b04522c5c187c
3
+ size 43429616
last-checkpoint/global_step700/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1a5a1f7d2bc3c5fb7d4df15ae88afc8579c8af634c0897a4d2f635ceeeb9459
3
+ size 43429616
last-checkpoint/global_step700/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1016e4296788968087a2a2e95f3a62ee2c6085b5340c3e92171dae571481ea55
3
+ size 637299
last-checkpoint/global_step700/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb87f12b7ce572b38ad895ddc218de834da4bf60ec37d070e1017300431fe756
3
+ size 637171
last-checkpoint/global_step700/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19d458531685f08f79fecf5600b5b32a91ce3e7e9e5c6bf39ed77dc64a6777d6
3
+ size 637171
last-checkpoint/global_step700/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6548dde312f71fd6a7a282d97e0df54d5ebeb16b695ea5d8f6c774bbc1ca3bb
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step650
 
1
+ global_step700
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8044e4c53158c210a17648ba8f2dc2d25a25bbfc55f686015542618eb652a33e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7d74de51245105e1fbf57a6707ef3538b353952485508f6e2f8f74dc5d479d4
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4cd85d7fa425e7888c973f1c2985ac15ca21b5e6171fe140a401c2bc75ca46ff
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0617c9eb6cf7df57b2e0bb53cfe17c05f0910de56fe5b14427fe39ab54a44782
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d7915667371a58f1598639e0d1c20a0c59c783c14580cd040a6631eb4ea2311e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed68a365057022897d9645ee60902a77102f43215dcdf2ddd5d3842b6a8446d8
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:35dd78929ad7f0fbf37fdb1284e8edf0424350f6e6ce1cd5a3ee78979af3d3cb
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63ebaa0c302cadbdfcd9f8ee2289e35ecf9c9fc8c9968fc0c05f100dac20c6b9
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8cd94ecf5c982ee0e060d3e07a575ce03dc3b0f289b5e32a1f65d3b6366a8a0e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9089a2e73aa73e2c09752311bdfa67c1ed286ab83bb4bc61dbf851e8193bb593
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.5542036890983582,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-650",
4
- "epoch": 0.1673963430337368,
5
  "eval_steps": 50,
6
- "global_step": 650,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1164,11 +1164,100 @@
1164
  "eval_steps_per_second": 0.435,
1165
  "num_input_tokens_seen": 6824008,
1166
  "step": 650
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1167
  }
1168
  ],
1169
  "logging_steps": 5,
1170
  "max_steps": 3400,
1171
- "num_input_tokens_seen": 6824008,
1172
  "num_train_epochs": 1,
1173
  "save_steps": 50,
1174
  "stateful_callbacks": {
@@ -1183,7 +1272,7 @@
1183
  "attributes": {}
1184
  }
1185
  },
1186
- "total_flos": 450087645085696.0,
1187
  "train_batch_size": 1,
1188
  "trial_name": null,
1189
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.5440100431442261,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-700",
4
+ "epoch": 0.1802729848055627,
5
  "eval_steps": 50,
6
+ "global_step": 700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1164
  "eval_steps_per_second": 0.435,
1165
  "num_input_tokens_seen": 6824008,
1166
  "step": 650
1167
+ },
1168
+ {
1169
+ "epoch": 0.1686840072109194,
1170
+ "grad_norm": 3.428410481447858,
1171
+ "learning_rate": 9.453928183013385e-05,
1172
+ "loss": 0.5344,
1173
+ "num_input_tokens_seen": 6875432,
1174
+ "step": 655
1175
+ },
1176
+ {
1177
+ "epoch": 0.16997167138810199,
1178
+ "grad_norm": 2.9137495299009846,
1179
+ "learning_rate": 9.442825912005202e-05,
1180
+ "loss": 0.56,
1181
+ "num_input_tokens_seen": 6927768,
1182
+ "step": 660
1183
+ },
1184
+ {
1185
+ "epoch": 0.17125933556528458,
1186
+ "grad_norm": 4.2956604210715925,
1187
+ "learning_rate": 9.431618567508933e-05,
1188
+ "loss": 0.5701,
1189
+ "num_input_tokens_seen": 6980544,
1190
+ "step": 665
1191
+ },
1192
+ {
1193
+ "epoch": 0.17254699974246718,
1194
+ "grad_norm": 4.3977584083656405,
1195
+ "learning_rate": 9.420306414579925e-05,
1196
+ "loss": 0.5604,
1197
+ "num_input_tokens_seen": 7032584,
1198
+ "step": 670
1199
+ },
1200
+ {
1201
+ "epoch": 0.17383466391964975,
1202
+ "grad_norm": 4.48381006313936,
1203
+ "learning_rate": 9.408889720752266e-05,
1204
+ "loss": 0.5763,
1205
+ "num_input_tokens_seen": 7085048,
1206
+ "step": 675
1207
+ },
1208
+ {
1209
+ "epoch": 0.17512232809683234,
1210
+ "grad_norm": 2.189534287393346,
1211
+ "learning_rate": 9.397368756032445e-05,
1212
+ "loss": 0.5962,
1213
+ "num_input_tokens_seen": 7137952,
1214
+ "step": 680
1215
+ },
1216
+ {
1217
+ "epoch": 0.17640999227401494,
1218
+ "grad_norm": 3.34591241093722,
1219
+ "learning_rate": 9.385743792892982e-05,
1220
+ "loss": 0.5935,
1221
+ "num_input_tokens_seen": 7190584,
1222
+ "step": 685
1223
+ },
1224
+ {
1225
+ "epoch": 0.17769765645119753,
1226
+ "grad_norm": 2.7509902524242507,
1227
+ "learning_rate": 9.374015106265968e-05,
1228
+ "loss": 0.5267,
1229
+ "num_input_tokens_seen": 7243440,
1230
+ "step": 690
1231
+ },
1232
+ {
1233
+ "epoch": 0.17898532062838013,
1234
+ "grad_norm": 2.322454948468365,
1235
+ "learning_rate": 9.362182973536569e-05,
1236
+ "loss": 0.5351,
1237
+ "num_input_tokens_seen": 7295568,
1238
+ "step": 695
1239
+ },
1240
+ {
1241
+ "epoch": 0.1802729848055627,
1242
+ "grad_norm": 3.4615171229405046,
1243
+ "learning_rate": 9.35024767453647e-05,
1244
+ "loss": 0.5014,
1245
+ "num_input_tokens_seen": 7347040,
1246
+ "step": 700
1247
+ },
1248
+ {
1249
+ "epoch": 0.1802729848055627,
1250
+ "eval_loss": 0.5440100431442261,
1251
+ "eval_runtime": 39.1181,
1252
+ "eval_samples_per_second": 3.068,
1253
+ "eval_steps_per_second": 0.767,
1254
+ "num_input_tokens_seen": 7347040,
1255
+ "step": 700
1256
  }
1257
  ],
1258
  "logging_steps": 5,
1259
  "max_steps": 3400,
1260
+ "num_input_tokens_seen": 7347040,
1261
  "num_train_epochs": 1,
1262
  "save_steps": 50,
1263
  "stateful_callbacks": {
 
1272
  "attributes": {}
1273
  }
1274
  },
1275
+ "total_flos": 484589024051200.0,
1276
  "train_batch_size": 1,
1277
  "trial_name": null,
1278
  "trial_params": null