File size: 4,012 Bytes
699e8ff
 
 
460d762
8c49cb6
 
6254b87
460d762
8c49cb6
 
 
460d762
1df8383
460d762
 
adb0416
6254b87
8c49cb6
 
 
 
699e8ff
 
 
 
 
 
 
 
 
 
 
8c49cb6
624b3c8
8c49cb6
 
699e8ff
8c49cb6
699e8ff
 
 
624b3c8
699e8ff
8c49cb6
624b3c8
 
 
 
 
 
 
699e8ff
8c49cb6
 
 
 
 
 
699e8ff
 
 
 
8c49cb6
 
699e8ff
 
 
8c49cb6
 
 
 
 
 
 
 
699e8ff
ed1fdef
 
 
 
 
 
 
 
 
 
8c49cb6
460d762
ed1fdef
adb0416
699e8ff
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import glob
import json
import os
from typing import List

from huggingface_hub import HfApi
from tqdm import tqdm

from src.display_models.model_metadata_flags import DO_NOT_SUBMIT_MODELS, FLAGGED_MODELS
from src.display_models.model_metadata_type import MODEL_TYPE_METADATA, ModelType, model_type_from_str
from src.display_models.utils import AutoEvalColumn, model_hyperlink

api = HfApi(token=os.environ.get("H4_TOKEN", None))


def get_model_metadata(leaderboard_data: List[dict]):
    for model_data in tqdm(leaderboard_data):
        request_files = os.path.join(
            "eval-queue",
            model_data["model_name_for_query"] + "_eval_request_*" + ".json",
        )
        request_files = glob.glob(request_files)

        # Select correct request file (precision)
        request_file = ""
        if len(request_files) == 1:
            request_file = request_files[0]
        elif len(request_files) > 1:
            request_files = sorted(request_files, reverse=True)
            for tmp_request_file in request_files:
                with open(tmp_request_file, "r") as f:
                    req_content = json.load(f)
                    if (
                        req_content["status"] in ["FINISHED", "PENDING_NEW_EVAL"]
                        and req_content["precision"] == model_data["Precision"].split(".")[-1]
                    ):
                        request_file = tmp_request_file

        try:
            with open(request_file, "r") as f:
                request = json.load(f)
            model_type = model_type_from_str(request.get("model_type", ""))
            model_data[AutoEvalColumn.model_type.name] = model_type.value.name
            model_data[AutoEvalColumn.model_type_symbol.name] = model_type.value.symbol  # + ("🔺" if is_delta else "")
            model_data[AutoEvalColumn.license.name] = request.get("license", "?")
            model_data[AutoEvalColumn.likes.name] = request.get("likes", 0)
            model_data[AutoEvalColumn.params.name] = request.get("params", 0)
        except Exception as e:
            print(f"Could not find request file for {model_data['model_name_for_query']}: {e}")
            print(f"{request_file=}")
            print(f"{request_files=}")
            if model_data["model_name_for_query"] in MODEL_TYPE_METADATA:
                model_data[AutoEvalColumn.model_type.name] = MODEL_TYPE_METADATA[
                    model_data["model_name_for_query"]
                ].value.name
                model_data[AutoEvalColumn.model_type_symbol.name] = MODEL_TYPE_METADATA[
                    model_data["model_name_for_query"]
                ].value.symbol  # + ("🔺" if is_delta else "")
            else:
                model_data[AutoEvalColumn.model_type.name] = ModelType.Unknown.value.name
                model_data[AutoEvalColumn.model_type_symbol.name] = ModelType.Unknown.value.symbol


def flag_models(leaderboard_data: List[dict]):
    for model_data in leaderboard_data:
        if model_data["model_name_for_query"] in FLAGGED_MODELS:
            issue_num = FLAGGED_MODELS[model_data["model_name_for_query"]].split("/")[-1]
            issue_link = model_hyperlink(
                FLAGGED_MODELS[model_data["model_name_for_query"]],
                f"See discussion #{issue_num}",
            )
            model_data[
                AutoEvalColumn.model.name
            ] = f"{model_data[AutoEvalColumn.model.name]} has been flagged! {issue_link}"


def remove_forbidden_models(leaderboard_data: List[dict]):
    indices_to_remove = []
    for ix, model in enumerate(leaderboard_data):
        if model["model_name_for_query"] in DO_NOT_SUBMIT_MODELS:
            indices_to_remove.append(ix)

    for ix in reversed(indices_to_remove):
        leaderboard_data.pop(ix)
    return leaderboard_data


def apply_metadata(leaderboard_data: List[dict]):
    leaderboard_data = remove_forbidden_models(leaderboard_data)
    get_model_metadata(leaderboard_data)
    flag_models(leaderboard_data)