ben81828 commited on
Commit
dc33c5a
·
verified ·
1 Parent(s): fee63c2

Training in progress, step 2550, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0cd56f5dfc9655c1cce72c58e7171b43d809ae6db173fb5bf3e8fc8c8fe2e604
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:171dfcb8bb8ac5a7df90ac9ee419e7a8a301f24e9f99f3e8664ab7c1ada5f55f
3
  size 29034840
last-checkpoint/global_step2550/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7306fbf7c90e0959db1277b3c0b7e5b92ea93823605298953ad51fa7fbd9a197
3
+ size 43429616
last-checkpoint/global_step2550/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1df1034db16dd3aa7480d586822b2c0fff91111cb4b6dfdae59f54b5cbe915f2
3
+ size 43429616
last-checkpoint/global_step2550/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62b83d05c2cfecfdfd49127decbdcbc0520d46ee34a6e406adc9e6bdfb17fc8d
3
+ size 43429616
last-checkpoint/global_step2550/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ecefff3e20c3e6e020ef8f7fd37175a04b52fb99b6ec3604862d40100759a7c
3
+ size 43429616
last-checkpoint/global_step2550/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:459c71b1156c55fcd73acb65442792d22282e3260b89f66cdf8ad0815d56f457
3
+ size 637299
last-checkpoint/global_step2550/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c42effbfd8441de135408f95c55462d6c1c4c8dd96b6417b67de471d2e95afcb
3
+ size 637171
last-checkpoint/global_step2550/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:304c5ba32c89fec95d937699d77819f8e533af674d93476e22a507dac625cebf
3
+ size 637171
last-checkpoint/global_step2550/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcaea5da1d1730a97458c88513e37f66edcf7420f14ca71eb82cab77a1de4424
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2500
 
1
+ global_step2550
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a97c73c15a2a5b2de7dc426a700b2053aee43809425431c513cc5e3aab6c2107
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0362dfd92e8da01e4a0deedcbd1c493b8162d5d1d84d5a4c1cd210c556f2cf9b
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1296b339c1b16ab7e14352a269004d20ede428aef748283fb0a6650d62f58129
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e65c5adee1a22c5343e38495a6905880496fb22d5e3ec5b16b87aadb731969d2
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:000b1637f5e73170f2337500a6a083df3a43d967d642b6c3a68f60deb6c3b960
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8d8858483b0c6944d55621cc2633469e3e0d04c48b6671eee92d4abab2352c2
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b8bcb6e7802f6d888bc099642911087298cfb1adf7053a2d43a67192a53404ef
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca42ef4f7a2f8c2285c4cf6cef585dcc0b132b21e8bb33d96d53b6db837f5e54
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:be1e9cd300c4f4c1fc9be3848ef7e995abd4a81c17c7a3b103813aaad4725565
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb561d82386bf4b227a10b6e6e08effab17d0e684e1cd302e30cfc0a843bd1df
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.4145541489124298,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-2350",
4
- "epoch": 0.6438320885912954,
5
  "eval_steps": 50,
6
- "global_step": 2500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -4457,11 +4457,100 @@
4457
  "eval_steps_per_second": 0.785,
4458
  "num_input_tokens_seen": 26252584,
4459
  "step": 2500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4460
  }
4461
  ],
4462
  "logging_steps": 5,
4463
  "max_steps": 3400,
4464
- "num_input_tokens_seen": 26252584,
4465
  "num_train_epochs": 1,
4466
  "save_steps": 50,
4467
  "stateful_callbacks": {
@@ -4476,7 +4565,7 @@
4476
  "attributes": {}
4477
  }
4478
  },
4479
- "total_flos": 1732044831850496.0,
4480
  "train_batch_size": 1,
4481
  "trial_name": null,
4482
  "trial_params": null
 
1
  {
2
  "best_metric": 0.4145541489124298,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-2350",
4
+ "epoch": 0.6567087303631213,
5
  "eval_steps": 50,
6
+ "global_step": 2550,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
4457
  "eval_steps_per_second": 0.785,
4458
  "num_input_tokens_seen": 26252584,
4459
  "step": 2500
4460
+ },
4461
+ {
4462
+ "epoch": 0.645119752768478,
4463
+ "grad_norm": 12.715005532803087,
4464
+ "learning_rate": 1.7777908288191176e-05,
4465
+ "loss": 0.3113,
4466
+ "num_input_tokens_seen": 26304800,
4467
+ "step": 2505
4468
+ },
4469
+ {
4470
+ "epoch": 0.6464074169456606,
4471
+ "grad_norm": 4.389623559119695,
4472
+ "learning_rate": 1.7592358989400883e-05,
4473
+ "loss": 0.3581,
4474
+ "num_input_tokens_seen": 26357680,
4475
+ "step": 2510
4476
+ },
4477
+ {
4478
+ "epoch": 0.6476950811228431,
4479
+ "grad_norm": 4.708341940810254,
4480
+ "learning_rate": 1.740757613610028e-05,
4481
+ "loss": 0.3353,
4482
+ "num_input_tokens_seen": 26410432,
4483
+ "step": 2515
4484
+ },
4485
+ {
4486
+ "epoch": 0.6489827453000258,
4487
+ "grad_norm": 2.698266437964572,
4488
+ "learning_rate": 1.7223564098431067e-05,
4489
+ "loss": 0.2796,
4490
+ "num_input_tokens_seen": 26463016,
4491
+ "step": 2520
4492
+ },
4493
+ {
4494
+ "epoch": 0.6502704094772084,
4495
+ "grad_norm": 2.4430847474817843,
4496
+ "learning_rate": 1.704032722830512e-05,
4497
+ "loss": 0.3197,
4498
+ "num_input_tokens_seen": 26515408,
4499
+ "step": 2525
4500
+ },
4501
+ {
4502
+ "epoch": 0.6515580736543909,
4503
+ "grad_norm": 2.729151807047382,
4504
+ "learning_rate": 1.68578698593014e-05,
4505
+ "loss": 0.3182,
4506
+ "num_input_tokens_seen": 26567024,
4507
+ "step": 2530
4508
+ },
4509
+ {
4510
+ "epoch": 0.6528457378315735,
4511
+ "grad_norm": 12.016926866019531,
4512
+ "learning_rate": 1.6676196306563613e-05,
4513
+ "loss": 0.3822,
4514
+ "num_input_tokens_seen": 26619744,
4515
+ "step": 2535
4516
+ },
4517
+ {
4518
+ "epoch": 0.6541334020087561,
4519
+ "grad_norm": 3.7284612790252294,
4520
+ "learning_rate": 1.6495310866698093e-05,
4521
+ "loss": 0.2853,
4522
+ "num_input_tokens_seen": 26672408,
4523
+ "step": 2540
4524
+ },
4525
+ {
4526
+ "epoch": 0.6554210661859388,
4527
+ "grad_norm": 4.562253048250174,
4528
+ "learning_rate": 1.631521781767214e-05,
4529
+ "loss": 0.3622,
4530
+ "num_input_tokens_seen": 26724488,
4531
+ "step": 2545
4532
+ },
4533
+ {
4534
+ "epoch": 0.6567087303631213,
4535
+ "grad_norm": 9.803435725573266,
4536
+ "learning_rate": 1.6135921418712956e-05,
4537
+ "loss": 0.3195,
4538
+ "num_input_tokens_seen": 26776816,
4539
+ "step": 2550
4540
+ },
4541
+ {
4542
+ "epoch": 0.6567087303631213,
4543
+ "eval_loss": 0.43731561303138733,
4544
+ "eval_runtime": 38.3205,
4545
+ "eval_samples_per_second": 3.131,
4546
+ "eval_steps_per_second": 0.783,
4547
+ "num_input_tokens_seen": 26776816,
4548
+ "step": 2550
4549
  }
4550
  ],
4551
  "logging_steps": 5,
4552
  "max_steps": 3400,
4553
+ "num_input_tokens_seen": 26776816,
4554
  "num_train_epochs": 1,
4555
  "save_steps": 50,
4556
  "stateful_callbacks": {
 
4565
  "attributes": {}
4566
  }
4567
  },
4568
+ "total_flos": 1766674781700096.0,
4569
  "train_batch_size": 1,
4570
  "trial_name": null,
4571
  "trial_params": null