ben81828 commited on
Commit
ff46753
·
verified ·
1 Parent(s): c14f2c5

Training in progress, step 2700, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:47c3a246645edf5ddfde12f7e3a3985714315a8a332a960d7d8036f942d6495e
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b583b602b9e38c1b3b50b506d83107d06e11e7b44aab1de71c95fee8b4886d2
3
  size 29034840
last-checkpoint/global_step2700/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:540443c66d2040cd33e5bdd649402f7be912c5f1406d772d104c9d3a71a33966
3
+ size 43429616
last-checkpoint/global_step2700/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d57307d65737a8c825e31c625ba2abcbbe8eb77d5118e36d5c7932ebecad6163
3
+ size 43429616
last-checkpoint/global_step2700/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7b052cc1ede348e35f475256347d57cde55f84446ce2058160b5971d0394660
3
+ size 43429616
last-checkpoint/global_step2700/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:951b94aecdba34438a91f45d5d5b118d89da8e9b47d38e706ddf1e5da3d9f594
3
+ size 43429616
last-checkpoint/global_step2700/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07653c959da414fae2d050ab7f94e043d9c9b24db42c193fdee0c381da235380
3
+ size 637299
last-checkpoint/global_step2700/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2cd6d33dfac1715cc7310636621c794fb2c4bc82ea9f78747ff361ddfc2d0bc
3
+ size 637171
last-checkpoint/global_step2700/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8b22641256262b58b47ab78d3087c36e2ab93894e42dddb21a15d9010bf7b87
3
+ size 637171
last-checkpoint/global_step2700/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f945c803427e31a4bfb7d69ec05db4391e922a7d4f5c9f65e457b4938ef0f5d
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2650
 
1
+ global_step2700
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7bf1e520c0ffd0912cdaf36292baf0834dd187b6292436dd343acbd7d39f37df
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e599331812a34463d102d64a4034a0b702a893f362f752003aa577fe71dcc1d
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:480a3937172137fc1b4a3886bc929d7dd5dd0507aebd6b20448e5d03ffe9f33f
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ed431e5e71393a0174ad2fd492755f8c1142596f1af3bfe7827c1f8f815dd80
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9cc7d6afeed03ecf805019b6cd001604bbcd5e4ef0cbc38e4dc6f5587e48dbf2
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2e70789f26a9f56b6b779e87cb1a405615af81562a256e5afe579f40972e827
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:48c0ace59112a18d7f177314d1a9eb2b6b894f2028df1dd368b8c67c8732e18d
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c8c18bc74d5211e761da269c814d7da0687633993838ec22e81ac939a14e91b
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bfcf85c736e61a53c653b2d1f3342ce104fa9dc3f2c57b7753ecb2c25635e267
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e5dcca4048a125fff8fd284657b0498882f3efcb97d36e331842fc3d6d7b6e6
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.4145541489124298,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-2350",
4
- "epoch": 0.6824620139067731,
5
  "eval_steps": 50,
6
- "global_step": 2650,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -4724,11 +4724,100 @@
4724
  "eval_steps_per_second": 0.795,
4725
  "num_input_tokens_seen": 27827480,
4726
  "step": 2650
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4727
  }
4728
  ],
4729
  "logging_steps": 5,
4730
  "max_steps": 3400,
4731
- "num_input_tokens_seen": 27827480,
4732
  "num_train_epochs": 1,
4733
  "save_steps": 50,
4734
  "stateful_callbacks": {
@@ -4743,7 +4832,7 @@
4743
  "attributes": {}
4744
  }
4745
  },
4746
- "total_flos": 1835918893252608.0,
4747
  "train_batch_size": 1,
4748
  "trial_name": null,
4749
  "trial_params": null
 
1
  {
2
  "best_metric": 0.4145541489124298,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-2350",
4
+ "epoch": 0.695338655678599,
5
  "eval_steps": 50,
6
+ "global_step": 2700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
4724
  "eval_steps_per_second": 0.795,
4725
  "num_input_tokens_seen": 27827480,
4726
  "step": 2650
4727
+ },
4728
+ {
4729
+ "epoch": 0.6837496780839557,
4730
+ "grad_norm": 4.053406132978294,
4731
+ "learning_rate": 1.2562061892553473e-05,
4732
+ "loss": 0.3189,
4733
+ "num_input_tokens_seen": 27879064,
4734
+ "step": 2655
4735
+ },
4736
+ {
4737
+ "epoch": 0.6850373422611383,
4738
+ "grad_norm": 3.0207067473597937,
4739
+ "learning_rate": 1.2401330199290367e-05,
4740
+ "loss": 0.2458,
4741
+ "num_input_tokens_seen": 27931864,
4742
+ "step": 2660
4743
+ },
4744
+ {
4745
+ "epoch": 0.6863250064383208,
4746
+ "grad_norm": 4.476781511295854,
4747
+ "learning_rate": 1.224148772011346e-05,
4748
+ "loss": 0.3055,
4749
+ "num_input_tokens_seen": 27984408,
4750
+ "step": 2665
4751
+ },
4752
+ {
4753
+ "epoch": 0.6876126706155035,
4754
+ "grad_norm": 10.00871121504839,
4755
+ "learning_rate": 1.2082538235320929e-05,
4756
+ "loss": 0.2993,
4757
+ "num_input_tokens_seen": 28037368,
4758
+ "step": 2670
4759
+ },
4760
+ {
4761
+ "epoch": 0.6889003347926861,
4762
+ "grad_norm": 8.5963867396194,
4763
+ "learning_rate": 1.1924485504091565e-05,
4764
+ "loss": 0.3572,
4765
+ "num_input_tokens_seen": 28090768,
4766
+ "step": 2675
4767
+ },
4768
+ {
4769
+ "epoch": 0.6901879989698687,
4770
+ "grad_norm": 2.8349545388422857,
4771
+ "learning_rate": 1.1767333264395736e-05,
4772
+ "loss": 0.4043,
4773
+ "num_input_tokens_seen": 28142432,
4774
+ "step": 2680
4775
+ },
4776
+ {
4777
+ "epoch": 0.6914756631470512,
4778
+ "grad_norm": 3.857351389318571,
4779
+ "learning_rate": 1.1611085232907132e-05,
4780
+ "loss": 0.3288,
4781
+ "num_input_tokens_seen": 28194896,
4782
+ "step": 2685
4783
+ },
4784
+ {
4785
+ "epoch": 0.6927633273242338,
4786
+ "grad_norm": 2.9121673846993943,
4787
+ "learning_rate": 1.14557451049147e-05,
4788
+ "loss": 0.3491,
4789
+ "num_input_tokens_seen": 28247264,
4790
+ "step": 2690
4791
+ },
4792
+ {
4793
+ "epoch": 0.6940509915014165,
4794
+ "grad_norm": 5.691957290096305,
4795
+ "learning_rate": 1.1301316554235397e-05,
4796
+ "loss": 0.2881,
4797
+ "num_input_tokens_seen": 28299864,
4798
+ "step": 2695
4799
+ },
4800
+ {
4801
+ "epoch": 0.695338655678599,
4802
+ "grad_norm": 3.366628982199851,
4803
+ "learning_rate": 1.114780323312724e-05,
4804
+ "loss": 0.3076,
4805
+ "num_input_tokens_seen": 28352368,
4806
+ "step": 2700
4807
+ },
4808
+ {
4809
+ "epoch": 0.695338655678599,
4810
+ "eval_loss": 0.4338160753250122,
4811
+ "eval_runtime": 38.6118,
4812
+ "eval_samples_per_second": 3.108,
4813
+ "eval_steps_per_second": 0.777,
4814
+ "num_input_tokens_seen": 28352368,
4815
+ "step": 2700
4816
  }
4817
  ],
4818
  "logging_steps": 5,
4819
  "max_steps": 3400,
4820
+ "num_input_tokens_seen": 28352368,
4821
  "num_train_epochs": 1,
4822
  "save_steps": 50,
4823
  "stateful_callbacks": {
 
4832
  "attributes": {}
4833
  }
4834
  },
4835
+ "total_flos": 1870594227044352.0,
4836
  "train_batch_size": 1,
4837
  "trial_name": null,
4838
  "trial_params": null