src/submission/check_validity.py CHANGED
@@ -7,7 +7,7 @@ from datetime import datetime, timedelta, timezone
7
 
8
  import huggingface_hub
9
  from huggingface_hub import ModelCard
10
- from huggingface_hub.hf_api import ModelInfo, get_safetensors_metadata
11
  from transformers import AutoConfig, AutoTokenizer
12
 
13
  from src.display.utils import parse_iso8601_datetime, curated_authors
@@ -24,16 +24,15 @@ def check_model_card(repo_id: str) -> tuple[bool, str]:
24
  return False, "Please add a model card to your model to explain how you trained/fine-tuned it.", None
25
 
26
  # Enforce license metadata
27
- if card.data.license is None:
28
- if not ("license_name" in card.data and "license_link" in card.data):
29
- return (
30
- False,
31
- (
32
- "License not found. Please add a license to your model card using the `license` metadata or a"
33
- " `license_name`/`license_link` pair."
34
- ),
35
- None,
36
- )
37
 
38
  # Enforce card content
39
  if len(card.text) < 200:
@@ -43,15 +42,15 @@ def check_model_card(repo_id: str) -> tuple[bool, str]:
43
 
44
 
45
  def is_model_on_hub(
46
- model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False
47
  ) -> tuple[bool, str, AutoConfig]:
48
  try:
49
  config = AutoConfig.from_pretrained(
50
  model_name, revision=revision, trust_remote_code=trust_remote_code, token=token, force_download=True)
51
  if test_tokenizer:
52
  try:
53
- tk = AutoTokenizer.from_pretrained(
54
- model_name, revision=revision, trust_remote_code=trust_remote_code, token=token
55
  )
56
  except ValueError as e:
57
  return (False, f"uses a tokenizer which is not in a transformers release: {e}", None)
@@ -61,6 +60,12 @@ def is_model_on_hub(
61
  "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?",
62
  None,
63
  )
 
 
 
 
 
 
64
  return True, None, config
65
 
66
  except ValueError:
@@ -76,17 +81,30 @@ def is_model_on_hub(
76
  return False, f"was not found or misconfigured on the hub! Error raised was {e.args[0]}", None
77
 
78
 
79
- def get_model_size(model_info: ModelInfo, precision: str) -> float:
80
  size_pattern = re.compile(r"(\d+\.)?\d+(b|m)")
81
  safetensors = None
 
 
 
82
 
83
  try:
84
- safetensors = get_safetensors_metadata(model_info.id)
 
 
 
 
 
 
 
85
  except Exception as e:
86
- logging.error(f"Failed to get safetensors metadata for model {model_info.id}: {str(e)}")
87
 
88
  if safetensors is not None:
89
- model_size = round(sum(safetensors.parameter_count.values()) / 1e9, 3)
 
 
 
90
  else:
91
  try:
92
  size_match = re.search(size_pattern, model_info.id.lower())
@@ -94,15 +112,15 @@ def get_model_size(model_info: ModelInfo, precision: str) -> float:
94
  model_size = size_match.group(0)
95
  model_size = round(float(model_size[:-1]) if model_size[-1] == "b" else float(model_size[:-1]) / 1e3, 3)
96
  else:
97
- return -1 # Unknown model size
98
  except AttributeError:
99
  logging.warning(f"Unable to parse model size from ID: {model_info.id}")
100
- return -1 # Unknown model size
101
 
102
  size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.id.lower()) else 1
103
  model_size = size_factor * model_size
104
 
105
- return model_size
106
 
107
  def get_model_arch(model_info: ModelInfo):
108
  return model_info.config.get("architectures", "Unknown")
@@ -112,7 +130,6 @@ def user_submission_permission(org_or_user, users_to_submission_dates, rate_limi
112
  # No limit for curated authors
113
  if org_or_user in curated_authors:
114
  return True, ""
115
-
116
  # Increase quota first if user has higher limits
117
  if org_or_user in HAS_HIGHER_RATE_LIMIT:
118
  rate_limit_quota *= 2
 
7
 
8
  import huggingface_hub
9
  from huggingface_hub import ModelCard
10
+ from huggingface_hub.hf_api import ModelInfo, get_safetensors_metadata, parse_safetensors_file_metadata
11
  from transformers import AutoConfig, AutoTokenizer
12
 
13
  from src.display.utils import parse_iso8601_datetime, curated_authors
 
24
  return False, "Please add a model card to your model to explain how you trained/fine-tuned it.", None
25
 
26
  # Enforce license metadata
27
+ if card.data.license is None and not ("license_name" in card.data and "license_link" in card.data):
28
+ return (
29
+ False,
30
+ (
31
+ "License not found. Please add a license to your model card using the `license` metadata or a"
32
+ " `license_name`/`license_link` pair."
33
+ ),
34
+ None,
35
+ )
 
36
 
37
  # Enforce card content
38
  if len(card.text) < 200:
 
42
 
43
 
44
  def is_model_on_hub(
45
+ model_name: str, revision: str, token: str | None = None, trust_remote_code: bool = False, test_tokenizer: bool = False,
46
  ) -> tuple[bool, str, AutoConfig]:
47
  try:
48
  config = AutoConfig.from_pretrained(
49
  model_name, revision=revision, trust_remote_code=trust_remote_code, token=token, force_download=True)
50
  if test_tokenizer:
51
  try:
52
+ AutoTokenizer.from_pretrained(
53
+ model_name, revision=revision, trust_remote_code=trust_remote_code, token=token,
54
  )
55
  except ValueError as e:
56
  return (False, f"uses a tokenizer which is not in a transformers release: {e}", None)
 
60
  "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?",
61
  None,
62
  )
63
+ except Exception:
64
+ return (
65
+ False,
66
+ "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?",
67
+ None,
68
+ )
69
  return True, None, config
70
 
71
  except ValueError:
 
81
  return False, f"was not found or misconfigured on the hub! Error raised was {e.args[0]}", None
82
 
83
 
84
+ def get_model_size(model_info: ModelInfo, precision: str, base_model: str| None) -> tuple[float | None, str]:
85
  size_pattern = re.compile(r"(\d+\.)?\d+(b|m)")
86
  safetensors = None
87
+ adapter_safetensors = None
88
+ # hack way to check that model is adapter
89
+ is_adapter = "adapter_config.json" in (s.rfilename for s in model_info.siblings)
90
 
91
  try:
92
+ if is_adapter:
93
+ if not base_model:
94
+ return None, "Adapter model submission detected. Please ensure the base model information is provided."
95
+
96
+ adapter_safetensors = parse_safetensors_file_metadata(model_info.id, "adapter_model.safetensors")
97
+ safetensors = get_safetensors_metadata(base_model)
98
+ else:
99
+ safetensors = get_safetensors_metadata(model_info.id)
100
  except Exception as e:
101
+ logging.warning(f"Failed to get safetensors metadata for model {model_info.id}: {e!s}")
102
 
103
  if safetensors is not None:
104
+ model_size = sum(safetensors.parameter_count.values())
105
+ if adapter_safetensors is not None:
106
+ model_size += sum(safetensors.parameter_count.values())
107
+ model_size = round(model_size / 1e9, 3)
108
  else:
109
  try:
110
  size_match = re.search(size_pattern, model_info.id.lower())
 
112
  model_size = size_match.group(0)
113
  model_size = round(float(model_size[:-1]) if model_size[-1] == "b" else float(model_size[:-1]) / 1e3, 3)
114
  else:
115
+ return None, "Unknown model size"
116
  except AttributeError:
117
  logging.warning(f"Unable to parse model size from ID: {model_info.id}")
118
+ return None, "Unknown model size"
119
 
120
  size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.id.lower()) else 1
121
  model_size = size_factor * model_size
122
 
123
+ return model_size, ""
124
 
125
  def get_model_arch(model_info: ModelInfo):
126
  return model_info.config.get("architectures", "Unknown")
 
130
  # No limit for curated authors
131
  if org_or_user in curated_authors:
132
  return True, ""
 
133
  # Increase quota first if user has higher limits
134
  if org_or_user in HAS_HIGHER_RATE_LIMIT:
135
  rate_limit_quota *= 2
src/submission/submit.py CHANGED
@@ -119,7 +119,10 @@ def add_new_eval(
119
  return styled_error(f"The model '{model}' with revision '{model_info.sha}' and precision '{precision}' has already been submitted.")
120
 
121
  # Check model size early
122
- model_size = get_model_size(model_info=model_info, precision=precision)
 
 
 
123
  # First check: Absolute size limit for float16 and bfloat16
124
  if precision in ["float16", "bfloat16"] and model_size > 100:
125
  return styled_error(f"Sadly, models larger than 100B parameters cannot be submitted in {precision} precision at this time. "
 
119
  return styled_error(f"The model '{model}' with revision '{model_info.sha}' and precision '{precision}' has already been submitted.")
120
 
121
  # Check model size early
122
+ model_size, error_text = get_model_size(model_info=model_info, precision=precision, base_model=base_model)
123
+ if model_size is None:
124
+ return styled_error(error_text)
125
+
126
  # First check: Absolute size limit for float16 and bfloat16
127
  if precision in ["float16", "bfloat16"] and model_size > 100:
128
  return styled_error(f"Sadly, models larger than 100B parameters cannot be submitted in {precision} precision at this time. "