ben81828 commited on
Commit
04f708a
·
verified ·
1 Parent(s): 446622b

Training in progress, step 3100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c0919a08fffa1587acbe31835d5a8f8669c94a21edd38c6366ed4e1822b6b15
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:006fbddb28ccd3f24fce77178564cf32b9a0d6002aa0e149d2a632f6bc384cad
3
  size 29034840
last-checkpoint/global_step3100/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:459ce3ca820ba1c67329dd8fd2836b0a7285fc19f9ba64456fa90126662cbcfb
3
+ size 43429616
last-checkpoint/global_step3100/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:695d20e01e322e423f3b86d3e1d2ee57cc166f9666f03049c87386da42551953
3
+ size 43429616
last-checkpoint/global_step3100/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf081ec30b6ae77d1be0a545e0b545f01df47721153713c632ad4ebc0a3966d5
3
+ size 43429616
last-checkpoint/global_step3100/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfcee3947b0e659ea58510a73a6c88b19f179fca4ba79b51a6cd4597d9505b40
3
+ size 43429616
last-checkpoint/global_step3100/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:769a527c4e6f510809f5b2b13928892b9b7024384bc00772661a2cccde5e4fa4
3
+ size 637299
last-checkpoint/global_step3100/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d05589d1f79cab339f6f64907a5dbf1bd44b1e8d65a9a1f019d2ec8e2b3cd2a6
3
+ size 637171
last-checkpoint/global_step3100/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e28ad7a834b66d1ee2fe12359362f2fdb764eaf9b451b44e1d6df40abfd8079c
3
+ size 637171
last-checkpoint/global_step3100/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:226452aea7fb29b0cd6c8e9e340122bb65353ec4a0df00019fe9614dc4195bc6
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step3050
 
1
+ global_step3100
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6f30f7f3c3620ccd30020e1ea4b81d1a56ee511b742c91370577d55399c14412
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:381f090b12cbb3fcce976bd2e72d07a7786154c2848cc881d75715648e7c4dc2
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:440cc1f8e138e1a90606722bd350460b4460991a2f3671f46f880f5743522dca
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4321dc1beb6c245d4a16a8fb2bed2ce2a40d89e5a9c611c4572f63a09523846c
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d78d5df466f5b34e85649b90825d4b168464f6d49c668313415473184409b799
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b79020117e301cd96518c9d1e3eb43a609bc85799f14bac63d9c572fc04cb89
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6d8be6df9e66c8bba407df289d2aa6b4422668727a1e2419037de032213393a4
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5429623c7323b4a820ea3d76194bc456c2affeeb1f3af8978aec5aee11b2d1ef
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:65132092aa084390575a45bea99d0dcabb8005a8fae760edf38c6251571f2afa
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d44cc93262eaa36be09e5c05cb4a41bcbfdd9c2c86797c6c680d23dd8657e6ca
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.4145541489124298,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-2350",
4
- "epoch": 0.7854751480813804,
5
  "eval_steps": 50,
6
- "global_step": 3050,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -5436,11 +5436,100 @@
5436
  "eval_steps_per_second": 0.789,
5437
  "num_input_tokens_seen": 32030016,
5438
  "step": 3050
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5439
  }
5440
  ],
5441
  "logging_steps": 5,
5442
  "max_steps": 3400,
5443
- "num_input_tokens_seen": 32030016,
5444
  "num_train_epochs": 1,
5445
  "save_steps": 50,
5446
  "stateful_callbacks": {
@@ -5455,7 +5544,7 @@
5455
  "attributes": {}
5456
  }
5457
  },
5458
- "total_flos": 2113197121732608.0,
5459
  "train_batch_size": 1,
5460
  "trial_name": null,
5461
  "trial_params": null
 
1
  {
2
  "best_metric": 0.4145541489124298,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-2350",
4
+ "epoch": 0.7983517898532063,
5
  "eval_steps": 50,
6
+ "global_step": 3100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
5436
  "eval_steps_per_second": 0.789,
5437
  "num_input_tokens_seen": 32030016,
5438
  "step": 3050
5439
+ },
5440
+ {
5441
+ "epoch": 0.7867628122585629,
5442
+ "grad_norm": 1.6401540308745686,
5443
+ "learning_rate": 2.7886498764184588e-06,
5444
+ "loss": 0.3247,
5445
+ "num_input_tokens_seen": 32082256,
5446
+ "step": 3055
5447
+ },
5448
+ {
5449
+ "epoch": 0.7880504764357456,
5450
+ "grad_norm": 2.305310160123287,
5451
+ "learning_rate": 2.7091379149682685e-06,
5452
+ "loss": 0.2895,
5453
+ "num_input_tokens_seen": 32134592,
5454
+ "step": 3060
5455
+ },
5456
+ {
5457
+ "epoch": 0.7893381406129282,
5458
+ "grad_norm": 7.79411199672483,
5459
+ "learning_rate": 2.6307443893812843e-06,
5460
+ "loss": 0.294,
5461
+ "num_input_tokens_seen": 32187064,
5462
+ "step": 3065
5463
+ },
5464
+ {
5465
+ "epoch": 0.7906258047901107,
5466
+ "grad_norm": 7.8803119684251355,
5467
+ "learning_rate": 2.5534711536759404e-06,
5468
+ "loss": 0.3205,
5469
+ "num_input_tokens_seen": 32238944,
5470
+ "step": 3070
5471
+ },
5472
+ {
5473
+ "epoch": 0.7919134689672933,
5474
+ "grad_norm": 1.651743530845747,
5475
+ "learning_rate": 2.4773200353756798e-06,
5476
+ "loss": 0.2726,
5477
+ "num_input_tokens_seen": 32291528,
5478
+ "step": 3075
5479
+ },
5480
+ {
5481
+ "epoch": 0.7932011331444759,
5482
+ "grad_norm": 5.642476416103777,
5483
+ "learning_rate": 2.4022928354656473e-06,
5484
+ "loss": 0.3012,
5485
+ "num_input_tokens_seen": 32343600,
5486
+ "step": 3080
5487
+ },
5488
+ {
5489
+ "epoch": 0.7944887973216586,
5490
+ "grad_norm": 3.639368693424175,
5491
+ "learning_rate": 2.3283913283502044e-06,
5492
+ "loss": 0.2712,
5493
+ "num_input_tokens_seen": 32396128,
5494
+ "step": 3085
5495
+ },
5496
+ {
5497
+ "epoch": 0.7957764614988411,
5498
+ "grad_norm": 6.532937861900995,
5499
+ "learning_rate": 2.2556172618108997e-06,
5500
+ "loss": 0.3342,
5501
+ "num_input_tokens_seen": 32448624,
5502
+ "step": 3090
5503
+ },
5504
+ {
5505
+ "epoch": 0.7970641256760237,
5506
+ "grad_norm": 1.4088613654984938,
5507
+ "learning_rate": 2.183972356965125e-06,
5508
+ "loss": 0.3132,
5509
+ "num_input_tokens_seen": 32500664,
5510
+ "step": 3095
5511
+ },
5512
+ {
5513
+ "epoch": 0.7983517898532063,
5514
+ "grad_norm": 3.205147557757995,
5515
+ "learning_rate": 2.113458308225458e-06,
5516
+ "loss": 0.2856,
5517
+ "num_input_tokens_seen": 32553288,
5518
+ "step": 3100
5519
+ },
5520
+ {
5521
+ "epoch": 0.7983517898532063,
5522
+ "eval_loss": 0.4740166962146759,
5523
+ "eval_runtime": 37.5378,
5524
+ "eval_samples_per_second": 3.197,
5525
+ "eval_steps_per_second": 0.799,
5526
+ "num_input_tokens_seen": 32553288,
5527
+ "step": 3100
5528
  }
5529
  ],
5530
  "logging_steps": 5,
5531
  "max_steps": 3400,
5532
+ "num_input_tokens_seen": 32553288,
5533
  "num_train_epochs": 1,
5534
  "save_steps": 50,
5535
  "stateful_callbacks": {
 
5544
  "attributes": {}
5545
  }
5546
  },
5547
+ "total_flos": 2147751457718272.0,
5548
  "train_batch_size": 1,
5549
  "trial_name": null,
5550
  "trial_params": null