ben81828 commited on
Commit
64da39a
·
verified ·
1 Parent(s): 007d573

Training in progress, step 2950, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e2acd206b2dadd260ab38262d5014a0182f253ac5bb1be9e70b1fc1dcf9a565
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:728e00b9bd6d98b9b5a2353e9d15e1290fa4bfd90b752c01f9851f89b2b4a800
3
  size 29034840
last-checkpoint/global_step2950/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9854bb199ec762c6a50aa4e7af9a7bee8e571c29595fb71051fefa93989f095b
3
+ size 43429616
last-checkpoint/global_step2950/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fe767231cb19cde51cee8aa8e8c15166b25ae817d64afaf257416294df828d4
3
+ size 43429616
last-checkpoint/global_step2950/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edd4c653099ae8e71867f4c6a65ae4429a301e9eb0db412589121157bc2108b0
3
+ size 43429616
last-checkpoint/global_step2950/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:310543773c29d4e030219be557fdb78886e7e150e065471e0bac57fd9c0a0966
3
+ size 43429616
last-checkpoint/global_step2950/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:512575152585707305753bb11f9e58a6063a289cf07716493a20893fbd1142b9
3
+ size 637299
last-checkpoint/global_step2950/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd557eb4b9edd0a1910792057338dca17083fa9a8f40f41e28c2c90ec22a93ae
3
+ size 637171
last-checkpoint/global_step2950/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbd7c1168f13cf3fc959af479e28a5a467ab4dd67204cffc114ba3660c0517c0
3
+ size 637171
last-checkpoint/global_step2950/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06808762d99a725be346c2151fb638282bfb9e97c3c8bcd58aaef62a9f5b3815
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2900
 
1
+ global_step2950
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ce92cea831a04716b4b472f1dad1cc986b2021dee9aac057217f5d455b27ec42
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfc5d4e344535f1dd0ff5275633ec3d55bb6249e432442365ff24445d82ec35c
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3cddb73bbdf0f6f6a2c3182d70f7ad5d587353b164c08dd4f383b940d6b61e4e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a898928042c09dc123c1025557279997043b7f607bc91ee2ff2d4b4d2b9f1ba
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b24b508e466beb446d37377d2a04757d3bc2b4230de3ac56b25a65d7753a74c1
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6923d07d979aa78d66765208f598662fd5092b5227cd87920feedfb923fa375f
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4c6a18a7de8b25b21673ba2ff7efbaaae00ec8c453c7975b467c1df87b87022
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9ba35b9b3c512fbe857d909557329ba47dbefe5f521014123c05901c32edb6d
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8dda654474af0b5740253998af53f5373a7c152f4ee919c2beac18e6e6497944
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73c86b9086b6b89862ebd9f93fabc942ae3b46fef2af762a390db6fe9f845897
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.4145541489124298,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-2350",
4
- "epoch": 0.7468452227659027,
5
  "eval_steps": 50,
6
- "global_step": 2900,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -5169,11 +5169,100 @@
5169
  "eval_steps_per_second": 0.78,
5170
  "num_input_tokens_seen": 30455480,
5171
  "step": 2900
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5172
  }
5173
  ],
5174
  "logging_steps": 5,
5175
  "max_steps": 3400,
5176
- "num_input_tokens_seen": 30455480,
5177
  "num_train_epochs": 1,
5178
  "save_steps": 50,
5179
  "stateful_callbacks": {
@@ -5188,7 +5277,7 @@
5188
  "attributes": {}
5189
  }
5190
  },
5191
- "total_flos": 2009329826529280.0,
5192
  "train_batch_size": 1,
5193
  "trial_name": null,
5194
  "trial_params": null
 
1
  {
2
  "best_metric": 0.4145541489124298,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-2350",
4
+ "epoch": 0.7597218645377286,
5
  "eval_steps": 50,
6
+ "global_step": 2950,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
5169
  "eval_steps_per_second": 0.78,
5170
  "num_input_tokens_seen": 30455480,
5171
  "step": 2900
5172
+ },
5173
+ {
5174
+ "epoch": 0.7481328869430852,
5175
+ "grad_norm": 2.3992253819845457,
5176
+ "learning_rate": 5.683814324910685e-06,
5177
+ "loss": 0.3063,
5178
+ "num_input_tokens_seen": 30507096,
5179
+ "step": 2905
5180
+ },
5181
+ {
5182
+ "epoch": 0.7494205511202678,
5183
+ "grad_norm": 1.9374646045639132,
5184
+ "learning_rate": 5.571740879947979e-06,
5185
+ "loss": 0.2694,
5186
+ "num_input_tokens_seen": 30558760,
5187
+ "step": 2910
5188
+ },
5189
+ {
5190
+ "epoch": 0.7507082152974505,
5191
+ "grad_norm": 2.935921090645564,
5192
+ "learning_rate": 5.4607181698661634e-06,
5193
+ "loss": 0.2578,
5194
+ "num_input_tokens_seen": 30612024,
5195
+ "step": 2915
5196
+ },
5197
+ {
5198
+ "epoch": 0.751995879474633,
5199
+ "grad_norm": 3.630419333089186,
5200
+ "learning_rate": 5.35074882036869e-06,
5201
+ "loss": 0.3526,
5202
+ "num_input_tokens_seen": 30665272,
5203
+ "step": 2920
5204
+ },
5205
+ {
5206
+ "epoch": 0.7532835436518156,
5207
+ "grad_norm": 5.477070657190314,
5208
+ "learning_rate": 5.241835432246889e-06,
5209
+ "loss": 0.2965,
5210
+ "num_input_tokens_seen": 30717104,
5211
+ "step": 2925
5212
+ },
5213
+ {
5214
+ "epoch": 0.7545712078289982,
5215
+ "grad_norm": 4.515862685899356,
5216
+ "learning_rate": 5.133980581318459e-06,
5217
+ "loss": 0.3122,
5218
+ "num_input_tokens_seen": 30769656,
5219
+ "step": 2930
5220
+ },
5221
+ {
5222
+ "epoch": 0.7558588720061807,
5223
+ "grad_norm": 2.2550437769263585,
5224
+ "learning_rate": 5.027186818366542e-06,
5225
+ "loss": 0.2968,
5226
+ "num_input_tokens_seen": 30822016,
5227
+ "step": 2935
5228
+ },
5229
+ {
5230
+ "epoch": 0.7571465361833634,
5231
+ "grad_norm": 5.093463419929814,
5232
+ "learning_rate": 4.921456669079366e-06,
5233
+ "loss": 0.3536,
5234
+ "num_input_tokens_seen": 30873336,
5235
+ "step": 2940
5236
+ },
5237
+ {
5238
+ "epoch": 0.758434200360546,
5239
+ "grad_norm": 3.53245369843195,
5240
+ "learning_rate": 4.816792633990569e-06,
5241
+ "loss": 0.2721,
5242
+ "num_input_tokens_seen": 30926104,
5243
+ "step": 2945
5244
+ },
5245
+ {
5246
+ "epoch": 0.7597218645377286,
5247
+ "grad_norm": 6.403963575911819,
5248
+ "learning_rate": 4.713197188420026e-06,
5249
+ "loss": 0.2899,
5250
+ "num_input_tokens_seen": 30979312,
5251
+ "step": 2950
5252
+ },
5253
+ {
5254
+ "epoch": 0.7597218645377286,
5255
+ "eval_loss": 0.4720001518726349,
5256
+ "eval_runtime": 38.5709,
5257
+ "eval_samples_per_second": 3.111,
5258
+ "eval_steps_per_second": 0.778,
5259
+ "num_input_tokens_seen": 30979312,
5260
+ "step": 2950
5261
  }
5262
  ],
5263
  "logging_steps": 5,
5264
  "max_steps": 3400,
5265
+ "num_input_tokens_seen": 30979312,
5266
  "num_train_epochs": 1,
5267
  "save_steps": 50,
5268
  "stateful_callbacks": {
 
5277
  "attributes": {}
5278
  }
5279
  },
5280
+ "total_flos": 2043857780342784.0,
5281
  "train_batch_size": 1,
5282
  "trial_name": null,
5283
  "trial_params": null