ben81828 commited on
Commit
52cfcf1
·
verified ·
1 Parent(s): ed7d58b

Training in progress, step 650, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -19,7 +19,7 @@
19
  "r": 8,
20
  "rank_pattern": {},
21
  "revision": null,
22
- "target_modules": "^(?!.*patch_embed).*(?:down_proj|q_proj|fc2|o_proj|gate_proj|up_proj|k_proj|fc1|v_proj|qkv|proj).*",
23
  "task_type": "CAUSAL_LM",
24
  "use_dora": false,
25
  "use_rslora": false
 
19
  "r": 8,
20
  "rank_pattern": {},
21
  "revision": null,
22
+ "target_modules": "^(?!.*patch_embed).*(?:up_proj|proj|fc2|fc1|qkv|o_proj|k_proj|gate_proj|v_proj|down_proj|q_proj).*",
23
  "task_type": "CAUSAL_LM",
24
  "use_dora": false,
25
  "use_rslora": false
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f3607f7be0eeae44a002686249cfd8440e2093656b0084bace7dafe752090325
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a040fdacc28b9de2977636cd1c1956bffb7f97df44e5bd2640662f6a98fa674b
3
  size 29034840
last-checkpoint/global_step650/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d38ea936450c0cdde6dfdfd8c877bad13d30215a768bfb9d4db61232d766ba53
3
+ size 43429616
last-checkpoint/global_step650/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d00a58f420ce44852dde4e0d1724f02377fdbe090859b41ed6eaf672a3deea48
3
+ size 43429616
last-checkpoint/global_step650/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2839fddbe80dd74b71e29f35cf804ba105c77ad6fb7db1262c16ead07f282a73
3
+ size 43429616
last-checkpoint/global_step650/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de1ac5430078e2adeec886f04da5b2323e6bdf488c2942cde8fd74eeb39be709
3
+ size 43429616
last-checkpoint/global_step650/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f9450a0ced61d6c33a66cdf1b2511022f0ec24416b17e1f16ed49dc98ff62dd
3
+ size 637299
last-checkpoint/global_step650/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d01b9093c4ff061c6453535b14beda5a9d96a531472a58effac685152d63de95
3
+ size 637171
last-checkpoint/global_step650/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a3bfb0ef98b44969e473f6d5c11a92b5a607c48dc2a278b5d72591611de4079
3
+ size 637171
last-checkpoint/global_step650/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a41c42f45b9260afbaae285e2a90de1413db2108ea931952bc69942ea6f19e1e
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step600
 
1
+ global_step650
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a81e3916b1392c4c49afb171dee5415c15f5a5a5af8749b28195fcfa0596699c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8044e4c53158c210a17648ba8f2dc2d25a25bbfc55f686015542618eb652a33e
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a781038dd714b87b8adb1aac8dbc8217ceb607428a992133954ad522365236e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cd85d7fa425e7888c973f1c2985ac15ca21b5e6171fe140a401c2bc75ca46ff
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9446c3db15f382a5546f13622787fc99392a5e0bc8a9ca2da1838de7ab621a37
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7915667371a58f1598639e0d1c20a0c59c783c14580cd040a6631eb4ea2311e
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1f11e7a6b3faa884fc23044e3772ff9dd72c257f02e121665061e2a03d518bd9
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35dd78929ad7f0fbf37fdb1284e8edf0424350f6e6ce1cd5a3ee78979af3d3cb
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b76b388bede074656df32b92902ac42b965557bfee0c930366af07d8382b1b4a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cd94ecf5c982ee0e060d3e07a575ce03dc3b0f289b5e32a1f65d3b6366a8a0e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.5832681059837341,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-550",
4
- "epoch": 0.1545197012619109,
5
  "eval_steps": 50,
6
- "global_step": 600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1075,11 +1075,100 @@
1075
  "eval_steps_per_second": 0.783,
1076
  "num_input_tokens_seen": 6299656,
1077
  "step": 600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1078
  }
1079
  ],
1080
  "logging_steps": 5,
1081
  "max_steps": 3400,
1082
- "num_input_tokens_seen": 6299656,
1083
  "num_train_epochs": 1,
1084
  "save_steps": 50,
1085
  "stateful_callbacks": {
@@ -1094,7 +1183,7 @@
1094
  "attributes": {}
1095
  }
1096
  },
1097
- "total_flos": 415517873799168.0,
1098
  "train_batch_size": 1,
1099
  "trial_name": null,
1100
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.5542036890983582,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-650",
4
+ "epoch": 0.1673963430337368,
5
  "eval_steps": 50,
6
+ "global_step": 650,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1075
  "eval_steps_per_second": 0.783,
1076
  "num_input_tokens_seen": 6299656,
1077
  "step": 600
1078
+ },
1079
+ {
1080
+ "epoch": 0.1558073654390935,
1081
+ "grad_norm": 5.90634634073773,
1082
+ "learning_rate": 9.55911532374151e-05,
1083
+ "loss": 0.6106,
1084
+ "num_input_tokens_seen": 6351680,
1085
+ "step": 605
1086
+ },
1087
+ {
1088
+ "epoch": 0.15709502961627608,
1089
+ "grad_norm": 3.5429043559071034,
1090
+ "learning_rate": 9.549077815930636e-05,
1091
+ "loss": 0.5812,
1092
+ "num_input_tokens_seen": 6403648,
1093
+ "step": 610
1094
+ },
1095
+ {
1096
+ "epoch": 0.15838269379345868,
1097
+ "grad_norm": 2.8753548663225144,
1098
+ "learning_rate": 9.538932721758474e-05,
1099
+ "loss": 0.5992,
1100
+ "num_input_tokens_seen": 6456328,
1101
+ "step": 615
1102
+ },
1103
+ {
1104
+ "epoch": 0.15967035797064125,
1105
+ "grad_norm": 2.4013005755622467,
1106
+ "learning_rate": 9.528680281157999e-05,
1107
+ "loss": 0.587,
1108
+ "num_input_tokens_seen": 6509024,
1109
+ "step": 620
1110
+ },
1111
+ {
1112
+ "epoch": 0.16095802214782384,
1113
+ "grad_norm": 3.860358696946306,
1114
+ "learning_rate": 9.518320736600943e-05,
1115
+ "loss": 0.5836,
1116
+ "num_input_tokens_seen": 6561336,
1117
+ "step": 625
1118
+ },
1119
+ {
1120
+ "epoch": 0.16224568632500644,
1121
+ "grad_norm": 3.187917212328382,
1122
+ "learning_rate": 9.507854333092063e-05,
1123
+ "loss": 0.5913,
1124
+ "num_input_tokens_seen": 6614024,
1125
+ "step": 630
1126
+ },
1127
+ {
1128
+ "epoch": 0.16353335050218903,
1129
+ "grad_norm": 3.5342177024321586,
1130
+ "learning_rate": 9.497281318163346e-05,
1131
+ "loss": 0.5693,
1132
+ "num_input_tokens_seen": 6666416,
1133
+ "step": 635
1134
+ },
1135
+ {
1136
+ "epoch": 0.16482101467937163,
1137
+ "grad_norm": 3.90374612709263,
1138
+ "learning_rate": 9.486601941868154e-05,
1139
+ "loss": 0.572,
1140
+ "num_input_tokens_seen": 6718200,
1141
+ "step": 640
1142
+ },
1143
+ {
1144
+ "epoch": 0.1661086788565542,
1145
+ "grad_norm": 4.4270591027201665,
1146
+ "learning_rate": 9.475816456775313e-05,
1147
+ "loss": 0.6111,
1148
+ "num_input_tokens_seen": 6771256,
1149
+ "step": 645
1150
+ },
1151
+ {
1152
+ "epoch": 0.1673963430337368,
1153
+ "grad_norm": 5.04761388655614,
1154
+ "learning_rate": 9.464925117963133e-05,
1155
+ "loss": 0.5959,
1156
+ "num_input_tokens_seen": 6824008,
1157
+ "step": 650
1158
+ },
1159
+ {
1160
+ "epoch": 0.1673963430337368,
1161
+ "eval_loss": 0.5542036890983582,
1162
+ "eval_runtime": 68.9048,
1163
+ "eval_samples_per_second": 1.742,
1164
+ "eval_steps_per_second": 0.435,
1165
+ "num_input_tokens_seen": 6824008,
1166
+ "step": 650
1167
  }
1168
  ],
1169
  "logging_steps": 5,
1170
  "max_steps": 3400,
1171
+ "num_input_tokens_seen": 6824008,
1172
  "num_train_epochs": 1,
1173
  "save_steps": 50,
1174
  "stateful_callbacks": {
 
1183
  "attributes": {}
1184
  }
1185
  },
1186
+ "total_flos": 450087645085696.0,
1187
  "train_batch_size": 1,
1188
  "trial_name": null,
1189
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:250d976171954308b1426496d02274452f4f7036ecb43184794d9d3e92b560db
3
- size 7352
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b2ceb98c0b4a50d3909c4f866386ff0e5093b24fd71a2054110090af7b4ef0e
3
+ size 7416