ben81828 commited on
Commit
ce063fa
·
verified ·
1 Parent(s): a5fb203

Training in progress, step 1000, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:75f1851db882a056a02f5437327881cf6ebb42ca6898a9f9c061a473b54ce23c
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6e45eb274c1273932226b11445e2fd07b7c7ebefc12bc54da671bc0cebf939d
3
  size 29034840
last-checkpoint/global_step1000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:330eb06c9db21349ddc0e66a8debf20a48792c47ea58a16a47fd40af17274126
3
+ size 43429616
last-checkpoint/global_step1000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68a41236a0e7809eed8239767c0c2f900b61faf2e68412fb5bda170bf393dad0
3
+ size 43429616
last-checkpoint/global_step1000/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d861b816eaa9d54fe5cd1c19cf17691f7335d8ec708639db00653b8cdc39ef7
3
+ size 43429616
last-checkpoint/global_step1000/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61864cd72776620321c44f1a8ffc804645f013becee885e61b4d278bc0916514
3
+ size 43429616
last-checkpoint/global_step1000/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acd77f7915b0d991c0851d79075b241bd64c36eeb0d1e0e2f985a499c290a497
3
+ size 637299
last-checkpoint/global_step1000/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b3f26bda9e2a2460f4f3cc5008eaa37999cec73f83483980476af2f03541f21
3
+ size 637171
last-checkpoint/global_step1000/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7f0d6d4da9d606901f6d8359ce5b27f2560634e6ffb9fc47cc3a2d7bea3f975
3
+ size 637171
last-checkpoint/global_step1000/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c1c7eda36ab2045cf9de55c87c4f12c530a7dab4d03922e901e1a447629a96e
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step950
 
1
+ global_step1000
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f2439da621f14c22b4f733e91bfc9de6b506d28d7b8d6f3eaca2e0b4f24c078
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d73dfcc09cf3d6f08149535e03920234febc15f7e9a166987f3bc01ee871abf
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c9e3fb386557f376b8946af5b8c91f9418f374dddb2ad9da4868b1ef16778c32
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4396a64b6da4868d060d1e3c7c9ccb12c39d63bd0f7b146d2512400aff4c769c
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dc7774d06045635bece9e960378fdc6913bf7bbbc903444cc570d1ca6ac25645
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95877efc8fb5eb302819ee7effca4222569cdcfdebb9fa5d9846e68ed9e833fe
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d98c54a80a914fecf43d06ea81432499f46e70664f1d04651bf339163e30fa9e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9fa4f23377f00fdde731da68a8690098617a1fdd912e03cdaa8bde87c493179
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b9f224baf5bd2044314606c1d88f84cce32f1b37c43c15835b14e72f6a72a4fc
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bce7739c5bb5cf50e8f1c942e662e33e6aa589036d55e6fddd63bdf3171c1cae
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.49604204297065735,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-900",
4
- "epoch": 0.24465619366469224,
5
  "eval_steps": 50,
6
- "global_step": 950,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1698,11 +1698,100 @@
1698
  "eval_steps_per_second": 0.768,
1699
  "num_input_tokens_seen": 9970600,
1700
  "step": 950
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1701
  }
1702
  ],
1703
  "logging_steps": 5,
1704
  "max_steps": 3400,
1705
- "num_input_tokens_seen": 9970600,
1706
  "num_train_epochs": 1,
1707
  "save_steps": 50,
1708
  "stateful_callbacks": {
@@ -1717,7 +1806,7 @@
1717
  "attributes": {}
1718
  }
1719
  },
1720
- "total_flos": 657761558069248.0,
1721
  "train_batch_size": 1,
1722
  "trial_name": null,
1723
  "trial_params": null
 
1
  {
2
  "best_metric": 0.49604204297065735,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-900",
4
+ "epoch": 0.25753283543651817,
5
  "eval_steps": 50,
6
+ "global_step": 1000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1698
  "eval_steps_per_second": 0.768,
1699
  "num_input_tokens_seen": 9970600,
1700
  "step": 950
1701
+ },
1702
+ {
1703
+ "epoch": 0.24594385784187484,
1704
+ "grad_norm": 2.8827978223065363,
1705
+ "learning_rate": 8.612053338817581e-05,
1706
+ "loss": 0.4549,
1707
+ "num_input_tokens_seen": 10022248,
1708
+ "step": 955
1709
+ },
1710
+ {
1711
+ "epoch": 0.24723152201905743,
1712
+ "grad_norm": 6.662877258417003,
1713
+ "learning_rate": 8.595197232161824e-05,
1714
+ "loss": 0.4791,
1715
+ "num_input_tokens_seen": 10075280,
1716
+ "step": 960
1717
+ },
1718
+ {
1719
+ "epoch": 0.24851918619624003,
1720
+ "grad_norm": 8.140970355143077,
1721
+ "learning_rate": 8.578256098561275e-05,
1722
+ "loss": 0.4833,
1723
+ "num_input_tokens_seen": 10128392,
1724
+ "step": 965
1725
+ },
1726
+ {
1727
+ "epoch": 0.24980685037342262,
1728
+ "grad_norm": 3.243184767888501,
1729
+ "learning_rate": 8.561230338676239e-05,
1730
+ "loss": 0.4672,
1731
+ "num_input_tokens_seen": 10180720,
1732
+ "step": 970
1733
+ },
1734
+ {
1735
+ "epoch": 0.2510945145506052,
1736
+ "grad_norm": 6.588760068173114,
1737
+ "learning_rate": 8.544120355168451e-05,
1738
+ "loss": 0.5205,
1739
+ "num_input_tokens_seen": 10233256,
1740
+ "step": 975
1741
+ },
1742
+ {
1743
+ "epoch": 0.2523821787277878,
1744
+ "grad_norm": 2.6240987196110837,
1745
+ "learning_rate": 8.526926552691544e-05,
1746
+ "loss": 0.5124,
1747
+ "num_input_tokens_seen": 10284928,
1748
+ "step": 980
1749
+ },
1750
+ {
1751
+ "epoch": 0.2536698429049704,
1752
+ "grad_norm": 8.242761558538728,
1753
+ "learning_rate": 8.509649337881483e-05,
1754
+ "loss": 0.5034,
1755
+ "num_input_tokens_seen": 10338208,
1756
+ "step": 985
1757
+ },
1758
+ {
1759
+ "epoch": 0.254957507082153,
1760
+ "grad_norm": 8.922137566500533,
1761
+ "learning_rate": 8.492289119346943e-05,
1762
+ "loss": 0.5226,
1763
+ "num_input_tokens_seen": 10390224,
1764
+ "step": 990
1765
+ },
1766
+ {
1767
+ "epoch": 0.25624517125933555,
1768
+ "grad_norm": 4.922275874717211,
1769
+ "learning_rate": 8.474846307659658e-05,
1770
+ "loss": 0.5399,
1771
+ "num_input_tokens_seen": 10443080,
1772
+ "step": 995
1773
+ },
1774
+ {
1775
+ "epoch": 0.25753283543651817,
1776
+ "grad_norm": 6.866585614783304,
1777
+ "learning_rate": 8.457321315344694e-05,
1778
+ "loss": 0.483,
1779
+ "num_input_tokens_seen": 10495592,
1780
+ "step": 1000
1781
+ },
1782
+ {
1783
+ "epoch": 0.25753283543651817,
1784
+ "eval_loss": 0.5305114388465881,
1785
+ "eval_runtime": 38.9297,
1786
+ "eval_samples_per_second": 3.082,
1787
+ "eval_steps_per_second": 0.771,
1788
+ "num_input_tokens_seen": 10495592,
1789
+ "step": 1000
1790
  }
1791
  ],
1792
  "logging_steps": 5,
1793
  "max_steps": 3400,
1794
+ "num_input_tokens_seen": 10495592,
1795
  "num_train_epochs": 1,
1796
  "save_steps": 50,
1797
  "stateful_callbacks": {
 
1806
  "attributes": {}
1807
  }
1808
  },
1809
+ "total_flos": 692447581306880.0,
1810
  "train_batch_size": 1,
1811
  "trial_name": null,
1812
  "trial_params": null