ben81828 commited on
Commit
976804f
·
verified ·
1 Parent(s): a3c59a0

Training in progress, step 1550, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b0263e90dbe014ac70c526fa71d2fd7a1bebb2c4fe954d285088a83f154c10e0
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1764f403b39c14f233fd2d6443f410cab81d66e3cc9f1d476e6e55a6642ff67
3
  size 29034840
last-checkpoint/global_step1550/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:692ec3b98c0932e8a11070876eb1fa1d8a671e4eac1d43ece644fabc90eec0ca
3
+ size 43429616
last-checkpoint/global_step1550/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b61c26876bdfdb016a6b966ea1cfbd5f94dd5ee6d99f15a124b9bfe78ce213f
3
+ size 43429616
last-checkpoint/global_step1550/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:feeff9860ed00084fe5e75ca93276989e3037881afe67c9162b613db6e1c1951
3
+ size 43429616
last-checkpoint/global_step1550/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd2fb29dda5fe5770a9a3b31c2dddcba2c2c0c1c4abe476dc6b8b1bafde6c8c5
3
+ size 43429616
last-checkpoint/global_step1550/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a43dd54cbbc70405ab57738adb3ec4581722d997b526b922cea67d12d7a3154
3
+ size 637299
last-checkpoint/global_step1550/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2160593af1ce08c3f12b9b90e1c3cbef9f94fa69d7b34c91af296952a4c32634
3
+ size 637171
last-checkpoint/global_step1550/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a26b6fdb593b50a1684d8681f4281e3d55be3afd1e2403c04c5de137e4315716
3
+ size 637171
last-checkpoint/global_step1550/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4062e6de2aea9cf48856d37c0a6068948c993876856dc4d6c5e69e86ba09a28e
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1500
 
1
+ global_step1550
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dd3566049ad1f65d2f434d990deb65584d2b2dcb1aac8e89c68ea37dc533eab7
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f1e48a120d69830576f7b582aa6cc46f0ca41d30015a7a674eaec3dcdfc0f09
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2f96a47dc4176412bc893ccb49c004c8fa1cc8c306d67689d87ed20944233c62
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4dbabb9273d3983e52a4a981b5f60f8c2e19da375765d05bb9f2caad284b9652
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ebd9fa3db1079ccf750b71f4eeedbf1f04422fc748026a4b866afe133f9fbfd1
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:554ac925bb9c9ea292b7a41caac1cf75285511cf8aa440f37090891ee457a178
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:383e9e252cc8292eef0120c964bdc8033972e800c085c97c42af97379e6b4b5c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5be5e00123fc0a321e41599b50e07be02f4c165504c601192e5c73f5f5437c30
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c2260420369b34b5fd5cfacd3f600b6daef0af14020a30634db4ed5d6637b6b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8eb069683e7f84aa36296476346fc663361d9b05ad7b09b71f22f44afdb0ea48
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.4339977502822876,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-1250",
4
- "epoch": 0.38629925315477726,
5
  "eval_steps": 50,
6
- "global_step": 1500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2677,11 +2677,100 @@
2677
  "eval_steps_per_second": 0.781,
2678
  "num_input_tokens_seen": 15744848,
2679
  "step": 1500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2680
  }
2681
  ],
2682
  "logging_steps": 5,
2683
  "max_steps": 3400,
2684
- "num_input_tokens_seen": 15744848,
2685
  "num_train_epochs": 1,
2686
  "save_steps": 50,
2687
  "stateful_callbacks": {
@@ -2696,7 +2785,7 @@
2696
  "attributes": {}
2697
  }
2698
  },
2699
- "total_flos": 1038788106387456.0,
2700
  "train_batch_size": 1,
2701
  "trial_name": null,
2702
  "trial_params": null
 
1
  {
2
  "best_metric": 0.4339977502822876,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-1250",
4
+ "epoch": 0.39917589492660316,
5
  "eval_steps": 50,
6
+ "global_step": 1550,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2677
  "eval_steps_per_second": 0.781,
2678
  "num_input_tokens_seen": 15744848,
2679
  "step": 1500
2680
+ },
2681
+ {
2682
+ "epoch": 0.3875869173319598,
2683
+ "grad_norm": 5.097132399629058,
2684
+ "learning_rate": 6.344911366961934e-05,
2685
+ "loss": 0.4558,
2686
+ "num_input_tokens_seen": 15797632,
2687
+ "step": 1505
2688
+ },
2689
+ {
2690
+ "epoch": 0.3888745815091424,
2691
+ "grad_norm": 4.502325593575991,
2692
+ "learning_rate": 6.321475976211266e-05,
2693
+ "loss": 0.4518,
2694
+ "num_input_tokens_seen": 15850040,
2695
+ "step": 1510
2696
+ },
2697
+ {
2698
+ "epoch": 0.390162245686325,
2699
+ "grad_norm": 6.425152572566654,
2700
+ "learning_rate": 6.298009332358856e-05,
2701
+ "loss": 0.4092,
2702
+ "num_input_tokens_seen": 15902496,
2703
+ "step": 1515
2704
+ },
2705
+ {
2706
+ "epoch": 0.3914499098635076,
2707
+ "grad_norm": 3.968135032555422,
2708
+ "learning_rate": 6.274511990394294e-05,
2709
+ "loss": 0.478,
2710
+ "num_input_tokens_seen": 15954936,
2711
+ "step": 1520
2712
+ },
2713
+ {
2714
+ "epoch": 0.3927375740406902,
2715
+ "grad_norm": 4.636757769906518,
2716
+ "learning_rate": 6.250984506033183e-05,
2717
+ "loss": 0.4294,
2718
+ "num_input_tokens_seen": 16007624,
2719
+ "step": 1525
2720
+ },
2721
+ {
2722
+ "epoch": 0.3940252382178728,
2723
+ "grad_norm": 2.7967900169696347,
2724
+ "learning_rate": 6.227427435703997e-05,
2725
+ "loss": 0.3846,
2726
+ "num_input_tokens_seen": 16059440,
2727
+ "step": 1530
2728
+ },
2729
+ {
2730
+ "epoch": 0.39531290239505534,
2731
+ "grad_norm": 2.983520749639549,
2732
+ "learning_rate": 6.203841336534924e-05,
2733
+ "loss": 0.4372,
2734
+ "num_input_tokens_seen": 16111136,
2735
+ "step": 1535
2736
+ },
2737
+ {
2738
+ "epoch": 0.39660056657223797,
2739
+ "grad_norm": 8.364510466670477,
2740
+ "learning_rate": 6.180226766340688e-05,
2741
+ "loss": 0.484,
2742
+ "num_input_tokens_seen": 16163976,
2743
+ "step": 1540
2744
+ },
2745
+ {
2746
+ "epoch": 0.39788823074942054,
2747
+ "grad_norm": 4.45878743373729,
2748
+ "learning_rate": 6.156584283609359e-05,
2749
+ "loss": 0.3965,
2750
+ "num_input_tokens_seen": 16217192,
2751
+ "step": 1545
2752
+ },
2753
+ {
2754
+ "epoch": 0.39917589492660316,
2755
+ "grad_norm": 2.6831990995391717,
2756
+ "learning_rate": 6.132914447489137e-05,
2757
+ "loss": 0.3872,
2758
+ "num_input_tokens_seen": 16269896,
2759
+ "step": 1550
2760
+ },
2761
+ {
2762
+ "epoch": 0.39917589492660316,
2763
+ "eval_loss": 0.4416767656803131,
2764
+ "eval_runtime": 38.4671,
2765
+ "eval_samples_per_second": 3.12,
2766
+ "eval_steps_per_second": 0.78,
2767
+ "num_input_tokens_seen": 16269896,
2768
+ "step": 1550
2769
  }
2770
  ],
2771
  "logging_steps": 5,
2772
  "max_steps": 3400,
2773
+ "num_input_tokens_seen": 16269896,
2774
  "num_train_epochs": 1,
2775
  "save_steps": 50,
2776
  "stateful_callbacks": {
 
2785
  "attributes": {}
2786
  }
2787
  },
2788
+ "total_flos": 1073418423500800.0,
2789
  "train_batch_size": 1,
2790
  "trial_name": null,
2791
  "trial_params": null