Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
upgrade rtansfoemrs
Browse files
is_model_on_hub_cache/joblib/src/submission/check_validity/is_model_on_hub/func_code.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
# first line: 37
|
2 |
@memory.cache
|
3 |
def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
|
4 |
-
if '
|
5 |
return False, "is not allowed to be submitted to the leaderboard.", None
|
6 |
try:
|
7 |
config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
|
|
|
1 |
# first line: 37
|
2 |
@memory.cache
|
3 |
def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
|
4 |
+
if 'Bielik-11' in model_name:
|
5 |
return False, "is not allowed to be submitted to the leaderboard.", None
|
6 |
try:
|
7 |
config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
|
src/about.py
CHANGED
@@ -43,7 +43,7 @@ class Tasks(Enum):
|
|
43 |
task27 = Task("polish_poquad_reranking", "acc,none", "poquad_reranking", "other", 0.0)
|
44 |
task28 = Task("polish_abstractive_poquad_rag", "levenshtein,none", "abstractive_poquad_rag", "other", 0.0)
|
45 |
task29 = Task("polish_abstractive_poquad_open_book", "levenshtein,none", "abstractive_poquad_open_book", "other", 0.0)
|
46 |
-
task30 = Task("
|
47 |
|
48 |
|
49 |
g_tasks = [task.value.benchmark for task in Tasks if task.value.type == "generate_until"]
|
|
|
43 |
task27 = Task("polish_poquad_reranking", "acc,none", "poquad_reranking", "other", 0.0)
|
44 |
task28 = Task("polish_abstractive_poquad_rag", "levenshtein,none", "abstractive_poquad_rag", "other", 0.0)
|
45 |
task29 = Task("polish_abstractive_poquad_open_book", "levenshtein,none", "abstractive_poquad_open_book", "other", 0.0)
|
46 |
+
task30 = Task("polish_pes", "exact_match,score-first", "pes", "other", 0.2)
|
47 |
|
48 |
|
49 |
g_tasks = [task.value.benchmark for task in Tasks if task.value.type == "generate_until"]
|
src/submission/check_validity.py
CHANGED
@@ -36,7 +36,7 @@ memory = Memory('is_model_on_hub_cache', verbose=0)
|
|
36 |
|
37 |
@memory.cache
|
38 |
def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
|
39 |
-
if '
|
40 |
return False, "is not allowed to be submitted to the leaderboard.", None
|
41 |
try:
|
42 |
config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
|
|
|
36 |
|
37 |
@memory.cache
|
38 |
def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
|
39 |
+
if 'Bielik-11' in model_name:
|
40 |
return False, "is not allowed to be submitted to the leaderboard.", None
|
41 |
try:
|
42 |
config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
|