ben81828 commited on
Commit
3e58f21
·
verified ·
1 Parent(s): 600b787

Training in progress, step 2400, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8464937776286fdba4fee3aa59e0e0cefe29cc3b83812b6842ff0a56228048f2
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2874dfb474adef2e86193b1ab18660860287a5e9abf08e0b2d06a88e2eefcb7a
3
  size 29034840
last-checkpoint/global_step2400/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9317d695d9f53cd93973509664a22e899f24b338919af23a6bf6915df7d3a35
3
+ size 43429616
last-checkpoint/global_step2400/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1eaa3db7e6d09a6adb220216d1b11d80f65308138095cae32a7906d8071e772d
3
+ size 43429616
last-checkpoint/global_step2400/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0998d8084f4151b2c8ead5dcaf9e1293b892ae5907f8819b94aa41a84519964
3
+ size 43429616
last-checkpoint/global_step2400/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bee44e2d0ba29219b9a7e0f0192ec3f7c2375000da164d8a81a247f9a00b087
3
+ size 43429616
last-checkpoint/global_step2400/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb28cb80645e4e5b08d929feb6cb284b80b9374d0abb5cac7a034e04e045bf67
3
+ size 637299
last-checkpoint/global_step2400/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e18a2b0b8ad83bd9cd5698849f6aafc46f23435824e7200c2e85a9eb4e492a1
3
+ size 637171
last-checkpoint/global_step2400/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc63c2624e3a26f46cb4e76250e1daf40e4de23f6d0694e319d1c3d09e6246cc
3
+ size 637171
last-checkpoint/global_step2400/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9469a2da3dcd4ac7538c85ff2efc3e7edb38b7bb9ddacc4ebbcca32aabfa5fe
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2350
 
1
+ global_step2400
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f12bf3da75454e5aae4644f2a1d46fdf90f68e680dbf5bdaa86861f825d32d80
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:681faefe4cf303ca7f9bc3073b09b166da4f558d55bee3d5eee90ba5d83159bb
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2faef1b8798e7516fd96ee7b3363866a8f97ca2d0ec5a8dd27bbfe70b0c6a733
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0fb250f1d137fb55cef85743a342508178b4fe3a20c6793c82e279730ea280b
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f35f6d27fbd414dd4285d91816b37fb6b97ff10fbba4c074d56ad2a7f723033
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34a0d7cfaa34f7e3738b4ef4989d693ed7864fed3b2a44ef1b6892fdcf026bb9
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ea5c64dfc7e3b2729065483dfef8e4bb0af0d9bae32df888d258ee3c2859d676
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a82aabe23bc62e289ef7d075c79f353bbc81286ec0f8964eabda4209d630e10
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d71a7ba4d3f18f2d8b75b4d8773a5d2a13dce7aa34d7060fa3e4c5c241599a29
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33f198cd3e0b21f350098b16849fc1ced7c1d5261e89a1c503f4a0d3ce688f30
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.4145541489124298,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-2350",
4
- "epoch": 0.6052021632758177,
5
  "eval_steps": 50,
6
- "global_step": 2350,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -4190,11 +4190,100 @@
4190
  "eval_steps_per_second": 0.786,
4191
  "num_input_tokens_seen": 24678768,
4192
  "step": 2350
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4193
  }
4194
  ],
4195
  "logging_steps": 5,
4196
  "max_steps": 3400,
4197
- "num_input_tokens_seen": 24678768,
4198
  "num_train_epochs": 1,
4199
  "save_steps": 50,
4200
  "stateful_callbacks": {
@@ -4209,7 +4298,7 @@
4209
  "attributes": {}
4210
  }
4211
  },
4212
- "total_flos": 1628169367453696.0,
4213
  "train_batch_size": 1,
4214
  "trial_name": null,
4215
  "trial_params": null
 
1
  {
2
  "best_metric": 0.4145541489124298,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-2350",
4
+ "epoch": 0.6180788050476436,
5
  "eval_steps": 50,
6
+ "global_step": 2400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
4190
  "eval_steps_per_second": 0.786,
4191
  "num_input_tokens_seen": 24678768,
4192
  "step": 2350
4193
+ },
4194
+ {
4195
+ "epoch": 0.6064898274530003,
4196
+ "grad_norm": 2.7740413001573057,
4197
+ "learning_rate": 2.3678391856132204e-05,
4198
+ "loss": 0.3229,
4199
+ "num_input_tokens_seen": 24730528,
4200
+ "step": 2355
4201
+ },
4202
+ {
4203
+ "epoch": 0.6077774916301828,
4204
+ "grad_norm": 6.766132750655314,
4205
+ "learning_rate": 2.3471967400628513e-05,
4206
+ "loss": 0.3308,
4207
+ "num_input_tokens_seen": 24784472,
4208
+ "step": 2360
4209
+ },
4210
+ {
4211
+ "epoch": 0.6090651558073654,
4212
+ "grad_norm": 6.8653733639649515,
4213
+ "learning_rate": 2.3266170337008398e-05,
4214
+ "loss": 0.356,
4215
+ "num_input_tokens_seen": 24838168,
4216
+ "step": 2365
4217
+ },
4218
+ {
4219
+ "epoch": 0.610352819984548,
4220
+ "grad_norm": 2.3990294356922615,
4221
+ "learning_rate": 2.306100553240274e-05,
4222
+ "loss": 0.2784,
4223
+ "num_input_tokens_seen": 24890552,
4224
+ "step": 2370
4225
+ },
4226
+ {
4227
+ "epoch": 0.6116404841617307,
4228
+ "grad_norm": 2.929256632803373,
4229
+ "learning_rate": 2.2856477838989456e-05,
4230
+ "loss": 0.2859,
4231
+ "num_input_tokens_seen": 24942904,
4232
+ "step": 2375
4233
+ },
4234
+ {
4235
+ "epoch": 0.6129281483389132,
4236
+ "grad_norm": 3.932586185965905,
4237
+ "learning_rate": 2.2652592093878666e-05,
4238
+ "loss": 0.3107,
4239
+ "num_input_tokens_seen": 24995776,
4240
+ "step": 2380
4241
+ },
4242
+ {
4243
+ "epoch": 0.6142158125160958,
4244
+ "grad_norm": 3.999914127947348,
4245
+ "learning_rate": 2.244935311899829e-05,
4246
+ "loss": 0.3131,
4247
+ "num_input_tokens_seen": 25047848,
4248
+ "step": 2385
4249
+ },
4250
+ {
4251
+ "epoch": 0.6155034766932784,
4252
+ "grad_norm": 3.803358403729212,
4253
+ "learning_rate": 2.224676572098007e-05,
4254
+ "loss": 0.3175,
4255
+ "num_input_tokens_seen": 25100896,
4256
+ "step": 2390
4257
+ },
4258
+ {
4259
+ "epoch": 0.6167911408704609,
4260
+ "grad_norm": 10.600664919848047,
4261
+ "learning_rate": 2.2044834691045873e-05,
4262
+ "loss": 0.3482,
4263
+ "num_input_tokens_seen": 25153912,
4264
+ "step": 2395
4265
+ },
4266
+ {
4267
+ "epoch": 0.6180788050476436,
4268
+ "grad_norm": 5.122783317200166,
4269
+ "learning_rate": 2.184356480489432e-05,
4270
+ "loss": 0.3445,
4271
+ "num_input_tokens_seen": 25206168,
4272
+ "step": 2400
4273
+ },
4274
+ {
4275
+ "epoch": 0.6180788050476436,
4276
+ "eval_loss": 0.42807063460350037,
4277
+ "eval_runtime": 38.2326,
4278
+ "eval_samples_per_second": 3.139,
4279
+ "eval_steps_per_second": 0.785,
4280
+ "num_input_tokens_seen": 25206168,
4281
+ "step": 2400
4282
  }
4283
  ],
4284
  "logging_steps": 5,
4285
  "max_steps": 3400,
4286
+ "num_input_tokens_seen": 25206168,
4287
  "num_train_epochs": 1,
4288
  "save_steps": 50,
4289
  "stateful_callbacks": {
 
4298
  "attributes": {}
4299
  }
4300
  },
4301
+ "total_flos": 1663008045268992.0,
4302
  "train_batch_size": 1,
4303
  "trial_name": null,
4304
  "trial_params": null