Spaces:
Sleeping
Sleeping
ZeroCommand
commited on
Commit
·
2029400
1
Parent(s):
346fe42
add error msg for token invalid
Browse files- app_text_classification.py +16 -3
- text_classification.py +13 -1
- wordings.py +8 -2
app_text_classification.py
CHANGED
@@ -12,12 +12,17 @@ from text_classification_ui_helpers import (
|
|
12 |
write_column_mapping_to_config,
|
13 |
)
|
14 |
|
15 |
-
from text_classification import
|
|
|
|
|
|
|
|
|
16 |
from wordings import (
|
17 |
CONFIRM_MAPPING_DETAILS_MD,
|
18 |
INTRODUCTION_MD,
|
19 |
USE_INFERENCE_API_TIP,
|
20 |
-
CHECK_LOG_SECTION_RAW
|
|
|
21 |
)
|
22 |
|
23 |
MAX_LABELS = 40
|
@@ -96,6 +101,13 @@ def get_demo():
|
|
96 |
visible=True,
|
97 |
interactive=True,
|
98 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
with gr.Accordion(label="Scanner Advance Config (optional)", open=False):
|
101 |
scanners = gr.CheckboxGroup(label="Scan Settings", visible=True)
|
@@ -103,7 +115,7 @@ def get_demo():
|
|
103 |
@gr.on(triggers=[uid_label.change], inputs=[uid_label], outputs=[scanners])
|
104 |
def get_scanners(uid):
|
105 |
selected = read_scanners(uid)
|
106 |
-
#
|
107 |
# Reason: data_leakage barely raises any issues and takes too many requests
|
108 |
# when using inference API, causing rate limit error
|
109 |
scan_config = selected + ["data_leakage"]
|
@@ -236,6 +248,7 @@ def get_demo():
|
|
236 |
model_id, dataset_id, dataset_config, dataset_split, inference_token
|
237 |
)
|
238 |
if not isinstance(prediction_response, HuggingFaceInferenceAPIResponse):
|
|
|
239 |
return gr.update(interactive=False)
|
240 |
return gr.update(interactive=True)
|
241 |
|
|
|
12 |
write_column_mapping_to_config,
|
13 |
)
|
14 |
|
15 |
+
from text_classification import (
|
16 |
+
get_example_prediction,
|
17 |
+
check_hf_token_validity,
|
18 |
+
HuggingFaceInferenceAPIResponse
|
19 |
+
)
|
20 |
from wordings import (
|
21 |
CONFIRM_MAPPING_DETAILS_MD,
|
22 |
INTRODUCTION_MD,
|
23 |
USE_INFERENCE_API_TIP,
|
24 |
+
CHECK_LOG_SECTION_RAW,
|
25 |
+
HF_TOKEN_INVALID_STYLED
|
26 |
)
|
27 |
|
28 |
MAX_LABELS = 40
|
|
|
101 |
visible=True,
|
102 |
interactive=True,
|
103 |
)
|
104 |
+
inference_token_info = gr.HTML(value=HF_TOKEN_INVALID_STYLED, visible=False)
|
105 |
+
|
106 |
+
inference_token.change(
|
107 |
+
lambda token: gr.update(visible=lambda: check_hf_token_validity(token)),
|
108 |
+
inputs=[inference_token],
|
109 |
+
outputs=[inference_token_info],
|
110 |
+
)
|
111 |
|
112 |
with gr.Accordion(label="Scanner Advance Config (optional)", open=False):
|
113 |
scanners = gr.CheckboxGroup(label="Scan Settings", visible=True)
|
|
|
115 |
@gr.on(triggers=[uid_label.change], inputs=[uid_label], outputs=[scanners])
|
116 |
def get_scanners(uid):
|
117 |
selected = read_scanners(uid)
|
118 |
+
# we remove data_leakage from the default scanners
|
119 |
# Reason: data_leakage barely raises any issues and takes too many requests
|
120 |
# when using inference API, causing rate limit error
|
121 |
scan_config = selected + ["data_leakage"]
|
|
|
248 |
model_id, dataset_id, dataset_config, dataset_split, inference_token
|
249 |
)
|
250 |
if not isinstance(prediction_response, HuggingFaceInferenceAPIResponse):
|
251 |
+
gr.warning("Your HF token is invalid. Please check your token.")
|
252 |
return gr.update(interactive=False)
|
253 |
return gr.update(interactive=True)
|
254 |
|
text_classification.py
CHANGED
@@ -380,4 +380,16 @@ def text_classification_fix_column_mapping(column_mapping, ppl, d_id, config, sp
|
|
380 |
def strip_model_id_from_url(model_id):
|
381 |
if model_id.startswith("https://huggingface.co/"):
|
382 |
return "/".join(model_id.split("/")[-2])
|
383 |
-
return model_id
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
380 |
def strip_model_id_from_url(model_id):
|
381 |
if model_id.startswith("https://huggingface.co/"):
|
382 |
return "/".join(model_id.split("/")[-2])
|
383 |
+
return model_id
|
384 |
+
|
385 |
+
def check_hf_token_validity(hf_token):
|
386 |
+
if hf_token == "":
|
387 |
+
return False
|
388 |
+
if not isinstance(hf_token, str):
|
389 |
+
return False
|
390 |
+
# use inference api to check the token
|
391 |
+
payload = {"inputs": "This is a test", "options": {"use_cache": True}}
|
392 |
+
response = hf_inference_api("cardiffnlp/twitter-roberta-base-sentiment-latest", hf_token, payload)
|
393 |
+
if "error" in response:
|
394 |
+
return False
|
395 |
+
return True
|
wordings.py
CHANGED
@@ -2,7 +2,7 @@ INTRODUCTION_MD = """
|
|
2 |
<h1 style="text-align: center;">
|
3 |
🐢Giskard Evaluator
|
4 |
</h1>
|
5 |
-
Welcome to Giskard Evaluator Space! Get your report immediately by simply input your model id and dataset id below. Follow our leads and improve your model
|
6 |
"""
|
7 |
CONFIRM_MAPPING_DETAILS_MD = """
|
8 |
<h1 style="text-align: center;">
|
@@ -71,7 +71,13 @@ USE_INFERENCE_API_TIP = """
|
|
71 |
</b>
|
72 |
"""
|
73 |
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
def get_styled_input(input):
|
75 |
return f"""<h3 style="text-align: center;color: #4ca154; background-color: #e2fbe8; border-radius: 8px; padding: 10px; ">
|
76 |
-
Sample input: {input}
|
77 |
</h3>"""
|
|
|
2 |
<h1 style="text-align: center;">
|
3 |
🐢Giskard Evaluator
|
4 |
</h1>
|
5 |
+
Welcome to Giskard Evaluator Space! Get your report immediately by simply input your model id and dataset id below. Follow our leads and improve your model.
|
6 |
"""
|
7 |
CONFIRM_MAPPING_DETAILS_MD = """
|
8 |
<h1 style="text-align: center;">
|
|
|
71 |
</b>
|
72 |
"""
|
73 |
|
74 |
+
HF_TOKEN_INVALID_STYLED= """
|
75 |
+
<h3 style="text-align: center;color: #fa5f5f; background-color: #fbe2e2; border-radius: 8px; padding: 10px; ">
|
76 |
+
Your Hugging Face token is invalid. Please double check your token.
|
77 |
+
</h3>
|
78 |
+
"""
|
79 |
+
|
80 |
def get_styled_input(input):
|
81 |
return f"""<h3 style="text-align: center;color: #4ca154; background-color: #e2fbe8; border-radius: 8px; padding: 10px; ">
|
82 |
+
Your model and dataset have been validated! <br /> Sample input: {input}
|
83 |
</h3>"""
|