ben81828 commited on
Commit
9f7674d
·
verified ·
1 Parent(s): 21becb9

Training in progress, step 1150, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:38c92d201c343dd983a22c7fe6d61ce77cf92ee8cd85263b720e40f9ba6a14e5
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad37ecfc8f56cfb958ea72a1146b5dd079374376e542f0312701bdbcbb5deae5
3
  size 29034840
last-checkpoint/global_step1150/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca8a10436260e618014a2b4be40396eaebb8403c6df01b2cbcb1b8aa674fcd6a
3
+ size 43429616
last-checkpoint/global_step1150/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b29fd3e42b02a0adcea87482aa72e33972b31215f1f79a1b20fc474b64830ee
3
+ size 43429616
last-checkpoint/global_step1150/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e7451401a8c4d48ff12340bcb65dad7b7fe4d4bbc57787fc077709274ea1b62
3
+ size 43429616
last-checkpoint/global_step1150/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87c5c85cc22f71e8ae3b26b29873d81eae73e21c9fa93f8c1e6286760b0c056d
3
+ size 43429616
last-checkpoint/global_step1150/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa758cf6fa59b0c76382480e4cb3a02f4e3bcc7c5814e40e5eff7203544b973f
3
+ size 637299
last-checkpoint/global_step1150/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a739833690f638f8f3ea52ef250f6914f0d6cc7cf09d4ba85216fa45aed5b895
3
+ size 637171
last-checkpoint/global_step1150/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64a9559916dc6f652d640305c67706ef9b17164de4b12a7f651920cf80c4d653
3
+ size 637171
last-checkpoint/global_step1150/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94a93cc4afb60025355716cc53e16bf4936ae88fb37858092e9cfc5c70e1b9a9
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1100
 
1
+ global_step1150
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9d9fea52fb92cc51e76feeb2b139ce35723c0cb651da383e4f7eec2606ed6c2a
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:274dc3860ee0c7f4d5348f60910a4b568498c04adfefb89f905b1c78a82c1312
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b5820ebfcc2e1cfe1ad2619a05ea9a484ff21635e13e386bf14abd302f2c0f7
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9aa441491b9ca89e796944520fa1db332a67c0a1a920be83edd2d96d741716d
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb8d96a68e732fca41980516622a50990bbd3ee989e72076a35c8608d9b4d136
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1ee3434533b24fb771504fa8cceb5c2ea25fe0de1641128feaceccc65afe6ed
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ce6f1db6d09f4d89a9b2bd8dc8eeb99f1fada2ec04376e23b5a7a13004994005
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93b4a44be1335173d2e3120bd0d1e6346f3e832d8935752c70ce1e98f017fa87
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d0c7456eafeee3179566bb381c9153771d7e0f21738d2398944d053915d0651
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d461c8d7517d4b88333bff7984fc3bfc149292198b04bbc18a49aee698ffb5c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.4798590838909149,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-1100",
4
- "epoch": 0.28328611898017,
5
  "eval_steps": 50,
6
- "global_step": 1100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1965,11 +1965,100 @@
1965
  "eval_steps_per_second": 0.737,
1966
  "num_input_tokens_seen": 11547008,
1967
  "step": 1100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1968
  }
1969
  ],
1970
  "logging_steps": 5,
1971
  "max_steps": 3400,
1972
- "num_input_tokens_seen": 11547008,
1973
  "num_train_epochs": 1,
1974
  "save_steps": 50,
1975
  "stateful_callbacks": {
@@ -1984,7 +2073,7 @@
1984
  "attributes": {}
1985
  }
1986
  },
1987
- "total_flos": 761815920803840.0,
1988
  "train_batch_size": 1,
1989
  "trial_name": null,
1990
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.45381438732147217,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-1150",
4
+ "epoch": 0.2961627607519959,
5
  "eval_steps": 50,
6
+ "global_step": 1150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1965
  "eval_steps_per_second": 0.737,
1966
  "num_input_tokens_seen": 11547008,
1967
  "step": 1100
1968
+ },
1969
+ {
1970
+ "epoch": 0.28457378315735254,
1971
+ "grad_norm": 11.308296783543483,
1972
+ "learning_rate": 8.07106356344834e-05,
1973
+ "loss": 0.4927,
1974
+ "num_input_tokens_seen": 11600032,
1975
+ "step": 1105
1976
+ },
1977
+ {
1978
+ "epoch": 0.28586144733453517,
1979
+ "grad_norm": 4.902660398367944,
1980
+ "learning_rate": 8.051838793910038e-05,
1981
+ "loss": 0.4353,
1982
+ "num_input_tokens_seen": 11652120,
1983
+ "step": 1110
1984
+ },
1985
+ {
1986
+ "epoch": 0.28714911151171774,
1987
+ "grad_norm": 4.185631754620407,
1988
+ "learning_rate": 8.032541847934146e-05,
1989
+ "loss": 0.4891,
1990
+ "num_input_tokens_seen": 11705184,
1991
+ "step": 1115
1992
+ },
1993
+ {
1994
+ "epoch": 0.28843677568890036,
1995
+ "grad_norm": 6.049695709018542,
1996
+ "learning_rate": 8.013173181896283e-05,
1997
+ "loss": 0.4497,
1998
+ "num_input_tokens_seen": 11758032,
1999
+ "step": 1120
2000
+ },
2001
+ {
2002
+ "epoch": 0.28972443986608293,
2003
+ "grad_norm": 4.598736726589848,
2004
+ "learning_rate": 7.993733253868256e-05,
2005
+ "loss": 0.4927,
2006
+ "num_input_tokens_seen": 11810736,
2007
+ "step": 1125
2008
+ },
2009
+ {
2010
+ "epoch": 0.2910121040432655,
2011
+ "grad_norm": 41.010822412039396,
2012
+ "learning_rate": 7.974222523607236e-05,
2013
+ "loss": 0.4853,
2014
+ "num_input_tokens_seen": 11863152,
2015
+ "step": 1130
2016
+ },
2017
+ {
2018
+ "epoch": 0.2922997682204481,
2019
+ "grad_norm": 5.591270811303827,
2020
+ "learning_rate": 7.954641452544865e-05,
2021
+ "loss": 0.4458,
2022
+ "num_input_tokens_seen": 11914536,
2023
+ "step": 1135
2024
+ },
2025
+ {
2026
+ "epoch": 0.2935874323976307,
2027
+ "grad_norm": 4.526048407550314,
2028
+ "learning_rate": 7.934990503776363e-05,
2029
+ "loss": 0.3976,
2030
+ "num_input_tokens_seen": 11966064,
2031
+ "step": 1140
2032
+ },
2033
+ {
2034
+ "epoch": 0.2948750965748133,
2035
+ "grad_norm": 4.778105875378293,
2036
+ "learning_rate": 7.915270142049566e-05,
2037
+ "loss": 0.508,
2038
+ "num_input_tokens_seen": 12018928,
2039
+ "step": 1145
2040
+ },
2041
+ {
2042
+ "epoch": 0.2961627607519959,
2043
+ "grad_norm": 8.075837130866274,
2044
+ "learning_rate": 7.89548083375394e-05,
2045
+ "loss": 0.4553,
2046
+ "num_input_tokens_seen": 12071088,
2047
+ "step": 1150
2048
+ },
2049
+ {
2050
+ "epoch": 0.2961627607519959,
2051
+ "eval_loss": 0.45381438732147217,
2052
+ "eval_runtime": 38.3303,
2053
+ "eval_samples_per_second": 3.131,
2054
+ "eval_steps_per_second": 0.783,
2055
+ "num_input_tokens_seen": 12071088,
2056
+ "step": 1150
2057
  }
2058
  ],
2059
  "logging_steps": 5,
2060
  "max_steps": 3400,
2061
+ "num_input_tokens_seen": 12071088,
2062
  "num_train_epochs": 1,
2063
  "save_steps": 50,
2064
  "stateful_callbacks": {
 
2073
  "attributes": {}
2074
  }
2075
  },
2076
+ "total_flos": 796381052928000.0,
2077
  "train_batch_size": 1,
2078
  "trial_name": null,
2079
  "trial_params": null