ben81828 commited on
Commit
3ab2467
·
verified ·
1 Parent(s): 36afac4

Training in progress, step 2450, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2874dfb474adef2e86193b1ab18660860287a5e9abf08e0b2d06a88e2eefcb7a
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76ea3377531bb34d473bfe1ac7fb7435074a39aec464fce193b5f47faf92de11
3
  size 29034840
last-checkpoint/global_step2450/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86c991cb2b0c6fce97f4086578fe94d5c23c70f076fb52a48baa7b11bb09d498
3
+ size 43429616
last-checkpoint/global_step2450/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3984a1bec2c2e9e3da8d70da197414e9b2122f690c41781d08adee936459d118
3
+ size 43429616
last-checkpoint/global_step2450/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:405fb66f00ad893a4539afd425a0d8c524d25fea660dbac42f31168c44fe86e2
3
+ size 43429616
last-checkpoint/global_step2450/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17fd8d9f5eaf834a0f5c3b85d6f12abeaa7aca49b42993cb71a3233cec52c024
3
+ size 43429616
last-checkpoint/global_step2450/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7bb58eb71a4b6e22de5fbcac0e3524600ccd3b9e748afdf98ae5444c4762c21
3
+ size 637299
last-checkpoint/global_step2450/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1b730284eeffe8a55bc9b46bf9d83671b158f9632e1a667dce54c8c675dc34c
3
+ size 637171
last-checkpoint/global_step2450/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3d5d72e19d9b42ecf282357f58153c490d57412b040b45b0c8c2357329e8de7
3
+ size 637171
last-checkpoint/global_step2450/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2a68efce44c4f1c6e156c0a70c7e22e0768504f6d042427b7d15db49da0f9fb
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2400
 
1
+ global_step2450
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:681faefe4cf303ca7f9bc3073b09b166da4f558d55bee3d5eee90ba5d83159bb
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b8aff7a1897a7eaf48c78ea1f8115c061edfa2b6fa42280e2c1c58fe66b1f8a
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c0fb250f1d137fb55cef85743a342508178b4fe3a20c6793c82e279730ea280b
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d73a7524f07999ef35d5d9b107dcc1678eae2ada841644e1bd00ec0734368c2
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:34a0d7cfaa34f7e3738b4ef4989d693ed7864fed3b2a44ef1b6892fdcf026bb9
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:149a2ed30e88bf94d622f8d7693f382286a49ac536a3f63efc50cab63f6b9f39
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2a82aabe23bc62e289ef7d075c79f353bbc81286ec0f8964eabda4209d630e10
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18f12c1b5aae2b7d4bb968649839fc7ff1ce6131508baad4b633693b04cee910
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:33f198cd3e0b21f350098b16849fc1ced7c1d5261e89a1c503f4a0d3ce688f30
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf7df6dd05dd94a7aa9222f36c7d07fc54cb5fa02909dfb44c3de4cef2e774f9
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.4145541489124298,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-2350",
4
- "epoch": 0.6180788050476436,
5
  "eval_steps": 50,
6
- "global_step": 2400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -4279,11 +4279,100 @@
4279
  "eval_steps_per_second": 0.785,
4280
  "num_input_tokens_seen": 25206168,
4281
  "step": 2400
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4282
  }
4283
  ],
4284
  "logging_steps": 5,
4285
  "max_steps": 3400,
4286
- "num_input_tokens_seen": 25206168,
4287
  "num_train_epochs": 1,
4288
  "save_steps": 50,
4289
  "stateful_callbacks": {
@@ -4298,7 +4387,7 @@
4298
  "attributes": {}
4299
  }
4300
  },
4301
- "total_flos": 1663008045268992.0,
4302
  "train_batch_size": 1,
4303
  "trial_name": null,
4304
  "trial_params": null
 
1
  {
2
  "best_metric": 0.4145541489124298,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-2350",
4
+ "epoch": 0.6309554468194695,
5
  "eval_steps": 50,
6
+ "global_step": 2450,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
4279
  "eval_steps_per_second": 0.785,
4280
  "num_input_tokens_seen": 25206168,
4281
  "step": 2400
4282
+ },
4283
+ {
4284
+ "epoch": 0.6193664692248262,
4285
+ "grad_norm": 9.751299586219911,
4286
+ "learning_rate": 2.1642960822587878e-05,
4287
+ "loss": 0.3147,
4288
+ "num_input_tokens_seen": 25258880,
4289
+ "step": 2405
4290
+ },
4291
+ {
4292
+ "epoch": 0.6206541334020087,
4293
+ "grad_norm": 1.3518511785939038,
4294
+ "learning_rate": 2.1443027488440338e-05,
4295
+ "loss": 0.3467,
4296
+ "num_input_tokens_seen": 25310976,
4297
+ "step": 2410
4298
+ },
4299
+ {
4300
+ "epoch": 0.6219417975791913,
4301
+ "grad_norm": 4.735151631850019,
4302
+ "learning_rate": 2.124376953090456e-05,
4303
+ "loss": 0.3085,
4304
+ "num_input_tokens_seen": 25363520,
4305
+ "step": 2415
4306
+ },
4307
+ {
4308
+ "epoch": 0.623229461756374,
4309
+ "grad_norm": 6.109775613794598,
4310
+ "learning_rate": 2.104519166246059e-05,
4311
+ "loss": 0.3376,
4312
+ "num_input_tokens_seen": 25415400,
4313
+ "step": 2420
4314
+ },
4315
+ {
4316
+ "epoch": 0.6245171259335566,
4317
+ "grad_norm": 1.896185856776787,
4318
+ "learning_rate": 2.0847298579504344e-05,
4319
+ "loss": 0.3312,
4320
+ "num_input_tokens_seen": 25468296,
4321
+ "step": 2425
4322
+ },
4323
+ {
4324
+ "epoch": 0.6258047901107391,
4325
+ "grad_norm": 3.9069124431889932,
4326
+ "learning_rate": 2.065009496223638e-05,
4327
+ "loss": 0.3282,
4328
+ "num_input_tokens_seen": 25520816,
4329
+ "step": 2430
4330
+ },
4331
+ {
4332
+ "epoch": 0.6270924542879217,
4333
+ "grad_norm": 4.998056844440976,
4334
+ "learning_rate": 2.045358547455138e-05,
4335
+ "loss": 0.321,
4336
+ "num_input_tokens_seen": 25573416,
4337
+ "step": 2435
4338
+ },
4339
+ {
4340
+ "epoch": 0.6283801184651043,
4341
+ "grad_norm": 4.15352407282998,
4342
+ "learning_rate": 2.0257774763927655e-05,
4343
+ "loss": 0.33,
4344
+ "num_input_tokens_seen": 25626536,
4345
+ "step": 2440
4346
+ },
4347
+ {
4348
+ "epoch": 0.6296677826422868,
4349
+ "grad_norm": 7.962840580433044,
4350
+ "learning_rate": 2.0062667461317426e-05,
4351
+ "loss": 0.2833,
4352
+ "num_input_tokens_seen": 25679208,
4353
+ "step": 2445
4354
+ },
4355
+ {
4356
+ "epoch": 0.6309554468194695,
4357
+ "grad_norm": 3.645397800601146,
4358
+ "learning_rate": 1.9868268181037185e-05,
4359
+ "loss": 0.3413,
4360
+ "num_input_tokens_seen": 25730432,
4361
+ "step": 2450
4362
+ },
4363
+ {
4364
+ "epoch": 0.6309554468194695,
4365
+ "eval_loss": 0.46914541721343994,
4366
+ "eval_runtime": 38.1221,
4367
+ "eval_samples_per_second": 3.148,
4368
+ "eval_steps_per_second": 0.787,
4369
+ "num_input_tokens_seen": 25730432,
4370
+ "step": 2450
4371
  }
4372
  ],
4373
  "logging_steps": 5,
4374
  "max_steps": 3400,
4375
+ "num_input_tokens_seen": 25730432,
4376
  "num_train_epochs": 1,
4377
  "save_steps": 50,
4378
  "stateful_callbacks": {
 
4387
  "attributes": {}
4388
  }
4389
  },
4390
+ "total_flos": 1697604769677312.0,
4391
  "train_batch_size": 1,
4392
  "trial_name": null,
4393
  "trial_params": null