ben81828 commited on
Commit
85d67ef
·
verified ·
1 Parent(s): efd5b0c

Training in progress, step 2150, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f75c51d9ca969a9d3cbb26206d8c3751152111b430ee8f78c774b9d99426948
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:023a38accdab689b6f8ae25c0137c8832b5274377f50198dd88dfa1030f6f9c7
3
  size 29034840
last-checkpoint/global_step2150/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d40d3bb1f6583d95896af685dc5228c8bd83fd49b36fbf1c17e0c3fc49e62e1f
3
+ size 43429616
last-checkpoint/global_step2150/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fd2c00c48889adf4e5546f739a5b0b524cf9df71d1a3435ca52b98ff0dd69da
3
+ size 43429616
last-checkpoint/global_step2150/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e33a6bea860860e919efb1298c892f524ceb7ce017c75d7a1053df2b9e4f6968
3
+ size 43429616
last-checkpoint/global_step2150/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05f5afcc420acf2bc32c65c32c8cc37f106c4f89935f5e4c445a926a1f9de435
3
+ size 43429616
last-checkpoint/global_step2150/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5a104173fae309cddd5cec659a0d42963dd55180042b7dffd3c0a9f5330d16c
3
+ size 637299
last-checkpoint/global_step2150/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c382bb29b9b9b93ee74ce79ebdc7427abdd4946c08810e6ab54148107b238730
3
+ size 637171
last-checkpoint/global_step2150/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d55d5f44769829dccf11857c354f886ec28168ac4145aad5146200fcbf91acf
3
+ size 637171
last-checkpoint/global_step2150/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5959cc89357511b968c257c43a7386704691106ffbfe1db32582fe21f5f1e6b3
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2100
 
1
+ global_step2150
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d1ddd7e7b4dc44903837b0414e4659f8383cd8f16b41dd396d4eaf5b9829f79
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b105708e2c99c8661b46698b8ccc5799ac83c1f0fc6a30c2d41c9fbfb349d480
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eeddfb77fe4d3b495c4e08307767e08df90e96ef241c3eb80d5f75adec393e80
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc44cd2015d8c8fc2f109f07c797876873a52f478c57b0350b8a2cf5dcb17f25
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e6260196fabb00061b1f1c8de6288382570dc14d02d2aa308050ca858880a97
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9843dec201b5a542ebd69abfc596f99ad5a000cf81dab52c6a2c52a5b9224ea7
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ea023bc5b1def54e0c49389175c0fae812f5f764c502525ce775d993d5ab2c03
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2296f0efda653dd4c7e861f5a867baa09d6d8bb50e57bc69af930268b40de9ef
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:41ca6ff868655333e43bb2ded3dfe8c66b7ed82fa8a98bace2e730c42bb9959b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca5770eed90150126cfd6d4c180a03d91fe366663610f38ec72b635b0b8cfd11
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.4194311797618866,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-1800",
4
- "epoch": 0.5408189544166881,
5
  "eval_steps": 50,
6
- "global_step": 2100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3745,11 +3745,100 @@
3745
  "eval_steps_per_second": 0.79,
3746
  "num_input_tokens_seen": 22050376,
3747
  "step": 2100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3748
  }
3749
  ],
3750
  "logging_steps": 5,
3751
  "max_steps": 3400,
3752
- "num_input_tokens_seen": 22050376,
3753
  "num_train_epochs": 1,
3754
  "save_steps": 50,
3755
  "stateful_callbacks": {
@@ -3764,7 +3853,7 @@
3764
  "attributes": {}
3765
  }
3766
  },
3767
- "total_flos": 1454769949900800.0,
3768
  "train_batch_size": 1,
3769
  "trial_name": null,
3770
  "trial_params": null
 
1
  {
2
  "best_metric": 0.4194311797618866,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-1800",
4
+ "epoch": 0.553695596188514,
5
  "eval_steps": 50,
6
+ "global_step": 2150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3745
  "eval_steps_per_second": 0.79,
3746
  "num_input_tokens_seen": 22050376,
3747
  "step": 2100
3748
+ },
3749
+ {
3750
+ "epoch": 0.5421066185938708,
3751
+ "grad_norm": 4.2067016944481965,
3752
+ "learning_rate": 3.4687969674819906e-05,
3753
+ "loss": 0.4067,
3754
+ "num_input_tokens_seen": 22102848,
3755
+ "step": 2105
3756
+ },
3757
+ {
3758
+ "epoch": 0.5433942827710533,
3759
+ "grad_norm": 3.273955419211119,
3760
+ "learning_rate": 3.445667702902969e-05,
3761
+ "loss": 0.36,
3762
+ "num_input_tokens_seen": 22155432,
3763
+ "step": 2110
3764
+ },
3765
+ {
3766
+ "epoch": 0.5446819469482359,
3767
+ "grad_norm": 3.815876908682745,
3768
+ "learning_rate": 3.4225751985120215e-05,
3769
+ "loss": 0.3569,
3770
+ "num_input_tokens_seen": 22207528,
3771
+ "step": 2115
3772
+ },
3773
+ {
3774
+ "epoch": 0.5459696111254185,
3775
+ "grad_norm": 3.5864972836865845,
3776
+ "learning_rate": 3.3995200004502816e-05,
3777
+ "loss": 0.3503,
3778
+ "num_input_tokens_seen": 22260016,
3779
+ "step": 2120
3780
+ },
3781
+ {
3782
+ "epoch": 0.547257275302601,
3783
+ "grad_norm": 10.259154585756033,
3784
+ "learning_rate": 3.3765026539765834e-05,
3785
+ "loss": 0.342,
3786
+ "num_input_tokens_seen": 22312616,
3787
+ "step": 2125
3788
+ },
3789
+ {
3790
+ "epoch": 0.5485449394797837,
3791
+ "grad_norm": 10.27121418750564,
3792
+ "learning_rate": 3.3535237034545675e-05,
3793
+ "loss": 0.4113,
3794
+ "num_input_tokens_seen": 22364776,
3795
+ "step": 2130
3796
+ },
3797
+ {
3798
+ "epoch": 0.5498326036569663,
3799
+ "grad_norm": 7.798195914668443,
3800
+ "learning_rate": 3.330583692339802e-05,
3801
+ "loss": 0.311,
3802
+ "num_input_tokens_seen": 22416944,
3803
+ "step": 2135
3804
+ },
3805
+ {
3806
+ "epoch": 0.5511202678341488,
3807
+ "grad_norm": 4.484699705421769,
3808
+ "learning_rate": 3.307683163166934e-05,
3809
+ "loss": 0.358,
3810
+ "num_input_tokens_seen": 22468960,
3811
+ "step": 2140
3812
+ },
3813
+ {
3814
+ "epoch": 0.5524079320113314,
3815
+ "grad_norm": 4.1926914172544665,
3816
+ "learning_rate": 3.284822657536856e-05,
3817
+ "loss": 0.378,
3818
+ "num_input_tokens_seen": 22521624,
3819
+ "step": 2145
3820
+ },
3821
+ {
3822
+ "epoch": 0.553695596188514,
3823
+ "grad_norm": 4.378365479925035,
3824
+ "learning_rate": 3.262002716103897e-05,
3825
+ "loss": 0.3687,
3826
+ "num_input_tokens_seen": 22574104,
3827
+ "step": 2150
3828
+ },
3829
+ {
3830
+ "epoch": 0.553695596188514,
3831
+ "eval_loss": 0.4504742920398712,
3832
+ "eval_runtime": 38.0971,
3833
+ "eval_samples_per_second": 3.15,
3834
+ "eval_steps_per_second": 0.787,
3835
+ "num_input_tokens_seen": 22574104,
3836
+ "step": 2150
3837
  }
3838
  ],
3839
  "logging_steps": 5,
3840
  "max_steps": 3400,
3841
+ "num_input_tokens_seen": 22574104,
3842
  "num_train_epochs": 1,
3843
  "save_steps": 50,
3844
  "stateful_callbacks": {
 
3853
  "attributes": {}
3854
  }
3855
  },
3856
+ "total_flos": 1489272114511872.0,
3857
  "train_batch_size": 1,
3858
  "trial_name": null,
3859
  "trial_params": null