ben81828 commited on
Commit
072ac28
·
verified ·
1 Parent(s): bbf7e9f

Training in progress, step 3150, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:006fbddb28ccd3f24fce77178564cf32b9a0d6002aa0e149d2a632f6bc384cad
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d53665e5e9c8528df47b7ef76a4152d56ed4d96010a44cb39a4f16dca91d64bb
3
  size 29034840
last-checkpoint/global_step3150/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2e0fb046093a81ad8e43afc7a08b41d46414aa1e33cf97ec353ec7163a1bb5f
3
+ size 43429616
last-checkpoint/global_step3150/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe875ee161babcf03d0770eb26eb263041dadae0cd2cbd75dacff805321898f4
3
+ size 43429616
last-checkpoint/global_step3150/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71b97fddeea106f58b8d7fffd5293f8bf4b53641c0c710ff7c3dd2cc88f6bf73
3
+ size 43429616
last-checkpoint/global_step3150/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0148271fb37af87f1bee086b9aff46e286b003549b2331daa512affb62c41b9a
3
+ size 43429616
last-checkpoint/global_step3150/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:281a26e358d9b17c197c48c2ae0a968e2da59326496f249cb7f83e8232ef30a0
3
+ size 637299
last-checkpoint/global_step3150/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65df32237bfebf0fa592beec8e1e4c267976dbf7907532a46eb21a9345b10fac
3
+ size 637171
last-checkpoint/global_step3150/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:039e6c13e3b222cdbb597c188657b75ea26ce47df30b29d835301e61700e88b1
3
+ size 637171
last-checkpoint/global_step3150/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0a9da893289b58af4423adf1a183e2d4cf555b333a8fc3cb9ac838f1b86a164
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step3100
 
1
+ global_step3150
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:381f090b12cbb3fcce976bd2e72d07a7786154c2848cc881d75715648e7c4dc2
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f819cf2c7fe8719d427eaa6ec9775983252517a512a5eb46040621049bb676c6
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4321dc1beb6c245d4a16a8fb2bed2ce2a40d89e5a9c611c4572f63a09523846c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a78180f5e29cd98437c35591ab402d42bd98c8a12a813c23dfdfe59b5a850b5
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5b79020117e301cd96518c9d1e3eb43a609bc85799f14bac63d9c572fc04cb89
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f43520b5104ee75a59749a66f5367557a7f191243b389309b6acad668af81d2
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5429623c7323b4a820ea3d76194bc456c2affeeb1f3af8978aec5aee11b2d1ef
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c502c4157180ff49243cd3eeb13c063be673f3bf094a73b62e8315ca6f65442
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d44cc93262eaa36be09e5c05cb4a41bcbfdd9c2c86797c6c680d23dd8657e6ca
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d8ecaac6ed58c567c8bab5c2469f31d2d7dbb24618ff65b711fa863465976b7
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.4145541489124298,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-2350",
4
- "epoch": 0.7983517898532063,
5
  "eval_steps": 50,
6
- "global_step": 3100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -5525,11 +5525,100 @@
5525
  "eval_steps_per_second": 0.799,
5526
  "num_input_tokens_seen": 32553288,
5527
  "step": 3100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5528
  }
5529
  ],
5530
  "logging_steps": 5,
5531
  "max_steps": 3400,
5532
- "num_input_tokens_seen": 32553288,
5533
  "num_train_epochs": 1,
5534
  "save_steps": 50,
5535
  "stateful_callbacks": {
@@ -5544,7 +5633,7 @@
5544
  "attributes": {}
5545
  }
5546
  },
5547
- "total_flos": 2147751457718272.0,
5548
  "train_batch_size": 1,
5549
  "trial_name": null,
5550
  "trial_params": null
 
1
  {
2
  "best_metric": 0.4145541489124298,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-2350",
4
+ "epoch": 0.8112284316250322,
5
  "eval_steps": 50,
6
+ "global_step": 3150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
5525
  "eval_steps_per_second": 0.799,
5526
  "num_input_tokens_seen": 32553288,
5527
  "step": 3100
5528
+ },
5529
+ {
5530
+ "epoch": 0.7996394540303888,
5531
+ "grad_norm": 8.05961476917595,
5532
+ "learning_rate": 2.0440767832595574e-06,
5533
+ "loss": 0.3052,
5534
+ "num_input_tokens_seen": 32606096,
5535
+ "step": 3105
5536
+ },
5537
+ {
5538
+ "epoch": 0.8009271182075715,
5539
+ "grad_norm": 3.1428977500375326,
5540
+ "learning_rate": 1.975829422950709e-06,
5541
+ "loss": 0.2125,
5542
+ "num_input_tokens_seen": 32659376,
5543
+ "step": 3110
5544
+ },
5545
+ {
5546
+ "epoch": 0.8022147823847541,
5547
+ "grad_norm": 2.8855336999591295,
5548
+ "learning_rate": 1.908717841359048e-06,
5549
+ "loss": 0.3122,
5550
+ "num_input_tokens_seen": 32712168,
5551
+ "step": 3115
5552
+ },
5553
+ {
5554
+ "epoch": 0.8035024465619367,
5555
+ "grad_norm": 12.674047700213576,
5556
+ "learning_rate": 1.8427436256833852e-06,
5557
+ "loss": 0.3006,
5558
+ "num_input_tokens_seen": 32764296,
5559
+ "step": 3120
5560
+ },
5561
+ {
5562
+ "epoch": 0.8047901107391192,
5563
+ "grad_norm": 1.5292819995856641,
5564
+ "learning_rate": 1.7779083362236547e-06,
5565
+ "loss": 0.3077,
5566
+ "num_input_tokens_seen": 32815296,
5567
+ "step": 3125
5568
+ },
5569
+ {
5570
+ "epoch": 0.8060777749163018,
5571
+ "grad_norm": 12.068461011216378,
5572
+ "learning_rate": 1.7142135063440035e-06,
5573
+ "loss": 0.29,
5574
+ "num_input_tokens_seen": 32867288,
5575
+ "step": 3130
5576
+ },
5577
+ {
5578
+ "epoch": 0.8073654390934845,
5579
+ "grad_norm": 9.37062799812982,
5580
+ "learning_rate": 1.6516606424365643e-06,
5581
+ "loss": 0.3574,
5582
+ "num_input_tokens_seen": 32919584,
5583
+ "step": 3135
5584
+ },
5585
+ {
5586
+ "epoch": 0.808653103270667,
5587
+ "grad_norm": 5.777474878278418,
5588
+ "learning_rate": 1.5902512238857858e-06,
5589
+ "loss": 0.2414,
5590
+ "num_input_tokens_seen": 32972736,
5591
+ "step": 3140
5592
+ },
5593
+ {
5594
+ "epoch": 0.8099407674478496,
5595
+ "grad_norm": 3.0096174763729864,
5596
+ "learning_rate": 1.5299867030334814e-06,
5597
+ "loss": 0.2521,
5598
+ "num_input_tokens_seen": 33026320,
5599
+ "step": 3145
5600
+ },
5601
+ {
5602
+ "epoch": 0.8112284316250322,
5603
+ "grad_norm": 5.573236169553209,
5604
+ "learning_rate": 1.4708685051444515e-06,
5605
+ "loss": 0.2669,
5606
+ "num_input_tokens_seen": 33078960,
5607
+ "step": 3150
5608
+ },
5609
+ {
5610
+ "epoch": 0.8112284316250322,
5611
+ "eval_loss": 0.4687062203884125,
5612
+ "eval_runtime": 37.4391,
5613
+ "eval_samples_per_second": 3.205,
5614
+ "eval_steps_per_second": 0.801,
5615
+ "num_input_tokens_seen": 33078960,
5616
+ "step": 3150
5617
  }
5618
  ],
5619
  "logging_steps": 5,
5620
  "max_steps": 3400,
5621
+ "num_input_tokens_seen": 33078960,
5622
  "num_train_epochs": 1,
5623
  "save_steps": 50,
5624
  "stateful_callbacks": {
 
5633
  "attributes": {}
5634
  }
5635
  },
5636
+ "total_flos": 2182476112592896.0,
5637
  "train_batch_size": 1,
5638
  "trial_name": null,
5639
  "trial_params": null