Datasets:
mteb
/

Modalities:
Text
License:
File size: 7,249 Bytes
ecad05e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
import json
import datasets




_DESCRIPTION = """\
    Combines multilingual HateCheck datasets (10 languages, including English), by Paul Roettger.
    The original English dataset can be found under https://github.com/Paul/hatecheck.
    Datasets for other languages are found at:
    - https://github.com/Paul/hatecheck-arabic
    - https://github.com/Paul/hatecheck-mandarin
    - https://github.com/Paul/hatecheck-german
    - https://github.com/Paul/hatecheck-french
    - https://github.com/Paul/hatecheck-hindi
    - https://github.com/Paul/hatecheck-italian
    - https://github.com/Paul/hatecheck-dutch
    - https://github.com/Paul/hatecheck-portuguese
    - https://github.com/Paul/hatecheck-spanish
Make sure to credit the authors and cite relevant papers if you use these datasets.
"""
_CITATION = """\
@inproceedings{rottger-etal-2021-hatecheck,
    title = "{H}ate{C}heck: Functional Tests for Hate Speech Detection Models",
    author = {R{\"o}ttger, Paul  and
      Vidgen, Bertie  and
      Nguyen, Dong  and
      Waseem, Zeerak  and
      Margetts, Helen  and
      Pierrehumbert, Janet},
    editor = "Zong, Chengqing  and
      Xia, Fei  and
      Li, Wenjie  and
      Navigli, Roberto",
    booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)",
    month = aug,
    year = "2021",
    address = "Online",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2021.acl-long.4",
    doi = "10.18653/v1/2021.acl-long.4",
    pages = "41--58",
    abstract = "Detecting online hate is a difficult task that even state-of-the-art models struggle with. Typically, hate speech detection models are evaluated by measuring their performance on held-out test data using metrics such as accuracy and F1 score. However, this approach makes it difficult to identify specific model weak points. It also risks overestimating generalisable model performance due to increasingly well-evidenced systematic gaps and biases in hate speech datasets. To enable more targeted diagnostic insights, we introduce HateCheck, a suite of functional tests for hate speech detection models. We specify 29 model functionalities motivated by a review of previous research and a series of interviews with civil society stakeholders. We craft test cases for each functionality and validate their quality through a structured annotation process. To illustrate HateCheck{'}s utility, we test near-state-of-the-art transformer models as well as two popular commercial models, revealing critical model weaknesses.",
}

@inproceedings{rottger-etal-2022-multilingual,
    title = "Multilingual {H}ate{C}heck: Functional Tests for Multilingual Hate Speech Detection Models",
    author = {R{\"o}ttger, Paul  and
      Seelawi, Haitham  and
      Nozza, Debora  and
      Talat, Zeerak  and
      Vidgen, Bertie},
    editor = "Narang, Kanika  and
      Mostafazadeh Davani, Aida  and
      Mathias, Lambert  and
      Vidgen, Bertie  and
      Talat, Zeerak",
    booktitle = "Proceedings of the Sixth Workshop on Online Abuse and Harms (WOAH)",
    month = jul,
    year = "2022",
    address = "Seattle, Washington (Hybrid)",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2022.woah-1.15",
    doi = "10.18653/v1/2022.woah-1.15",
    pages = "154--169",
    abstract = "Hate speech detection models are typically evaluated on held-out test sets. However, this risks painting an incomplete and potentially misleading picture of model performance because of increasingly well-documented systematic gaps and biases in hate speech datasets. To enable more targeted diagnostic insights, recent research has thus introduced functional tests for hate speech detection models. However, these tests currently only exist for English-language content, which means that they cannot support the development of more effective models in other languages spoken by billions across the world. To help address this issue, we introduce Multilingual HateCheck (MHC), a suite of functional tests for multilingual hate speech detection models. MHC covers 34 functionalities across ten languages, which is more languages than any other hate speech dataset. To illustrate MHC{'}s utility, we train and test a high-performing multilingual hate speech detection model, and reveal critical model weaknesses for monolingual and cross-lingual applications.",
}
"""

_LICENSE = "Original datasets are released under cc-by-4.0."


_LANGUAGES = {"ara": "Arabic", 
              "cmn": "Mandarin", 
              "eng": "English", 
              "deu": "German", 
              "fra": "French",
              "hin": "Hindi", 
              "ita": "Italian", 
              "nld": "Dutch", 
              "por": "Portuguese", 
              "spa": "Spanish"}

_ALL_LANGUAGES = "all_languages"
_DOWNLOAD_URL = "{lang}/{split}.jsonl"
_VERSION = "2.18.0"


class MultiHatecheckConfig(datasets.BuilderConfig):
    """BuilderConfig for HatecheckMulti"""

    def __init__(self, languages=None, **kwargs):
        super(MultiHatecheckConfig, self).__init__(version=datasets.Version(_VERSION, ""), **kwargs),
        self.languages = languages


class MultiHatecheck(datasets.GeneratorBasedBuilder):

    """Multilingual Hatecheck Corpus, by Paul Roettger"""

    BUILDER_CONFIGS = [
        MultiHatecheckConfig(
            name=_ALL_LANGUAGES,
            languages=_LANGUAGES,
            description="Hate speech detection dataset with binary (hateful vs non-hateful) labels. Includes 25+ distinct types of hate and challenging non-hate.",
        )
    ] + [
        MultiHatecheckConfig(
            name=lang,
            languages=[lang],
            description=f"{_LANGUAGES[lang]} examples of hate speech, with binary (hateful vs non-hateful) labels. Includes 25+ distinct types of hate and challenging non-hate.",
        )
        for lang in _LANGUAGES
    ]
    BUILDER_CONFIG_CLASS = MultiHatecheckConfig
    DEFAULT_CONFIG_NAME = _ALL_LANGUAGES

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "text": datasets.Value("string"),
                    "is_hateful": datasets.Value("string"),
                    "functionality": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        test_urls = [_DOWNLOAD_URL.format(split="test", lang=lang) for lang in self.config.languages]
        test_paths = dl_manager.download_and_extract(test_urls)

        return [
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"file_paths": test_paths}),
        ]

    def _generate_examples(self, file_paths):
        row_count = 0
        for file_path in file_paths:
            with open(file_path, "r", encoding="utf-8") as f:
                for line in f:
                    yield row_count, json.loads(line)
                    row_count += 1