ben81828 commited on
Commit
f0912cb
·
verified ·
1 Parent(s): db7288f

Training in progress, step 2850, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:656b339f848f4a5e39a2f325df5573bc98a1ce1de9df8113a07cd8eeae54367e
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01a330ed632b021642148062b09d22bb079b73a39c22d87b4a866893ac04d712
3
  size 29034840
last-checkpoint/global_step2850/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50fe4a2c735a04b54f2ad7e515f79d5744cbe9f8cd188ad98ce2cec5eb4279a7
3
+ size 43429616
last-checkpoint/global_step2850/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8baf9fa8afcf57a447a5eba2eb736a87d1c5a4a46f8bf569112a16ee452f4a68
3
+ size 43429616
last-checkpoint/global_step2850/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45e06a2fa0989959de6ce4c155e0fc226e7f4cfa015e32b83a6fc4007d3246f3
3
+ size 43429616
last-checkpoint/global_step2850/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14341d8679cc02bf357ae30fd671c5bafae090d513ab2cb8f5e6db57f60be75f
3
+ size 43429616
last-checkpoint/global_step2850/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6812b284fbf3417e77fd86a27d65a3311729d5aa0ad1760be1c080039b6a22c
3
+ size 637299
last-checkpoint/global_step2850/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04a961275b937b710afe74a7a3b9fd6a401f9924641e566c39654df096230565
3
+ size 637171
last-checkpoint/global_step2850/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:911e755a27628af9831ca453ecfd7f1f364d53b6ef98c992957c2fffb659a58c
3
+ size 637171
last-checkpoint/global_step2850/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20554ce78011d819cef737623a9e66ee3291f99e51e1ff343f472c5d5f41a0bb
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2800
 
1
+ global_step2850
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:49db5a9fd0c84d580c671e52905ebeffc155b36537e76ff966d2e82906708999
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff8dba2341c0517760edfde50521977f02a5bd982ffd3bc03de6109439c4f478
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c8ca224562d8d97aaa131b3516288bb99f68d7dcf62170494326662bda0bb206
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2bf831df9fbade9ac2a8db79798bc2a7b1afb85a78a6e463ec7a7db4acc0f8e
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:56e86a11c89dba78d60e1b2a1855a651b90a5a22ef131ce65d26af83668c154e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8602ff0a0fa366d46b61c0ef2b23ce468387898cf2bc1027e5450de73ddf647f
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3dd63019c923e9692431619aced46b91aaf3fd22e1c22ec0a64347f2fe635a0e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bb51d675cf23603b1b765cd645f53d6b66ddb104d56d48674e9c798e086f696
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:61a7c605cf80a46d2e0c661d5469c16671b681f268e3ecd5d1d64188653910db
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28cb8f1d536b1fc196e353ec37fd07d574fab0a464ddd8b31c73c59dcab3c03b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.4145541489124298,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-2350",
4
- "epoch": 0.7210919392222508,
5
  "eval_steps": 50,
6
- "global_step": 2800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -4991,11 +4991,100 @@
4991
  "eval_steps_per_second": 0.789,
4992
  "num_input_tokens_seen": 29402040,
4993
  "step": 2800
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4994
  }
4995
  ],
4996
  "logging_steps": 5,
4997
  "max_steps": 3400,
4998
- "num_input_tokens_seen": 29402040,
4999
  "num_train_epochs": 1,
5000
  "save_steps": 50,
5001
  "stateful_callbacks": {
@@ -5010,7 +5099,7 @@
5010
  "attributes": {}
5011
  }
5012
  },
5013
- "total_flos": 1939869403709440.0,
5014
  "train_batch_size": 1,
5015
  "trial_name": null,
5016
  "trial_params": null
 
1
  {
2
  "best_metric": 0.4145541489124298,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-2350",
4
+ "epoch": 0.7339685809940767,
5
  "eval_steps": 50,
6
+ "global_step": 2850,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
4991
  "eval_steps_per_second": 0.789,
4992
  "num_input_tokens_seen": 29402040,
4993
  "step": 2800
4994
+ },
4995
+ {
4996
+ "epoch": 0.7223796033994334,
4997
+ "grad_norm": 3.561696884073749,
4998
+ "learning_rate": 8.141676086873572e-06,
4999
+ "loss": 0.2538,
5000
+ "num_input_tokens_seen": 29455456,
5001
+ "step": 2805
5002
+ },
5003
+ {
5004
+ "epoch": 0.723667267576616,
5005
+ "grad_norm": 2.941091025150912,
5006
+ "learning_rate": 8.009177137203794e-06,
5007
+ "loss": 0.3374,
5008
+ "num_input_tokens_seen": 29507136,
5009
+ "step": 2810
5010
+ },
5011
+ {
5012
+ "epoch": 0.7249549317537987,
5013
+ "grad_norm": 1.7626408698187983,
5014
+ "learning_rate": 7.877671276687898e-06,
5015
+ "loss": 0.3303,
5016
+ "num_input_tokens_seen": 29558760,
5017
+ "step": 2815
5018
+ },
5019
+ {
5020
+ "epoch": 0.7262425959309812,
5021
+ "grad_norm": 2.788131053787238,
5022
+ "learning_rate": 7.747161615458902e-06,
5023
+ "loss": 0.2834,
5024
+ "num_input_tokens_seen": 29612000,
5025
+ "step": 2820
5026
+ },
5027
+ {
5028
+ "epoch": 0.7275302601081638,
5029
+ "grad_norm": 3.8899073323572444,
5030
+ "learning_rate": 7.617651240089546e-06,
5031
+ "loss": 0.2746,
5032
+ "num_input_tokens_seen": 29664472,
5033
+ "step": 2825
5034
+ },
5035
+ {
5036
+ "epoch": 0.7288179242853464,
5037
+ "grad_norm": 1.3611659955678468,
5038
+ "learning_rate": 7.489143213519301e-06,
5039
+ "loss": 0.315,
5040
+ "num_input_tokens_seen": 29716440,
5041
+ "step": 2830
5042
+ },
5043
+ {
5044
+ "epoch": 0.7301055884625289,
5045
+ "grad_norm": 5.373259186650034,
5046
+ "learning_rate": 7.361640574981937e-06,
5047
+ "loss": 0.2877,
5048
+ "num_input_tokens_seen": 29769248,
5049
+ "step": 2835
5050
+ },
5051
+ {
5052
+ "epoch": 0.7313932526397116,
5053
+ "grad_norm": 2.796854999712465,
5054
+ "learning_rate": 7.2351463399336735e-06,
5055
+ "loss": 0.2953,
5056
+ "num_input_tokens_seen": 29821968,
5057
+ "step": 2840
5058
+ },
5059
+ {
5060
+ "epoch": 0.7326809168168942,
5061
+ "grad_norm": 6.039730307609144,
5062
+ "learning_rate": 7.109663499981834e-06,
5063
+ "loss": 0.2709,
5064
+ "num_input_tokens_seen": 29875104,
5065
+ "step": 2845
5066
+ },
5067
+ {
5068
+ "epoch": 0.7339685809940767,
5069
+ "grad_norm": 4.572150266141393,
5070
+ "learning_rate": 6.985195022814067e-06,
5071
+ "loss": 0.3024,
5072
+ "num_input_tokens_seen": 29928032,
5073
+ "step": 2850
5074
+ },
5075
+ {
5076
+ "epoch": 0.7339685809940767,
5077
+ "eval_loss": 0.47043517231941223,
5078
+ "eval_runtime": 37.451,
5079
+ "eval_samples_per_second": 3.204,
5080
+ "eval_steps_per_second": 0.801,
5081
+ "num_input_tokens_seen": 29928032,
5082
+ "step": 2850
5083
  }
5084
  ],
5085
  "logging_steps": 5,
5086
  "max_steps": 3400,
5087
+ "num_input_tokens_seen": 29928032,
5088
  "num_train_epochs": 1,
5089
  "save_steps": 50,
5090
  "stateful_callbacks": {
 
5099
  "attributes": {}
5100
  }
5101
  },
5102
+ "total_flos": 1974549322924032.0,
5103
  "train_batch_size": 1,
5104
  "trial_name": null,
5105
  "trial_params": null