victoriadreis commited on
Commit
c42eaa8
1 Parent(s): 684cd61

Update tupy.py

Browse files
Files changed (1) hide show
  1. tupy.py +135 -68
tupy.py CHANGED
@@ -1,5 +1,5 @@
1
  # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
  #
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
  # you may not use this file except in compliance with the License.
@@ -12,100 +12,167 @@
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
- """Portuguese dataset for hate speech detection composed of 5,668 tweets with binary annotations (i.e. 'hate' vs. 'no-hate')."""
16
 
 
 
17
 
18
- import csv
 
 
 
19
 
20
  import datasets
21
 
22
 
 
 
23
  _CITATION = """\
24
- @inproceedings{fortuna-etal-2019-hierarchically,
25
- title = "A Hierarchically-Labeled {P}ortuguese Hate Speech Dataset",
26
- author = "Fortuna, Paula and
27
- Rocha da Silva, Jo{\\~a}o and
28
- Soler-Company, Juan and
29
- Wanner, Leo and
30
- Nunes, S{\'e}rgio",
31
- booktitle = "Proceedings of the Third Workshop on Abusive Language Online",
32
- month = aug,
33
- year = "2019",
34
- address = "Florence, Italy",
35
- publisher = "Association for Computational Linguistics",
36
- url = "https://www.aclweb.org/anthology/W19-3510",
37
- doi = "10.18653/v1/W19-3510",
38
- pages = "94--104",
39
- abstract = "Over the past years, the amount of online offensive speech has been growing steadily. To successfully cope with it, machine learning are applied. However, ML-based techniques require sufficiently large annotated datasets. In the last years, different datasets were published, mainly for English. In this paper, we present a new dataset for Portuguese, which has not been in focus so far. The dataset is composed of 5,668 tweets. For its annotation, we defined two different schemes used by annotators with different levels of expertise. Firstly, non-experts annotated the tweets with binary labels ({`}hate{'} vs. {`}no-hate{'}). Secondly, expert annotators classified the tweets following a fine-grained hierarchical multiple label scheme with 81 hate speech categories in total. The inter-annotator agreement varied from category to category, which reflects the insight that some types of hate speech are more subtle than others and that their detection depends on personal perception. This hierarchical annotation scheme is the main contribution of the presented work, as it facilitates the identification of different types of hate speech and their intersections. To demonstrate the usefulness of our dataset, we carried a baseline classification experiment with pre-trained word embeddings and LSTM on the binary classified data, with a state-of-the-art outcome.",
40
  }
41
  """
42
 
43
  _DESCRIPTION = """\
44
- Portuguese dataset for hate speech detection composed of 5,668 tweets with binary annotations (i.e. 'hate' vs. 'no-hate').
 
 
 
 
45
  """
46
 
47
- _HOMEPAGE = "https://github.com/paulafortuna/Portuguese-Hate-Speech-Dataset"
 
 
 
 
48
 
49
- _LICENSE = "Unknown"
 
 
50
 
51
- _URL = "https://github.com/paulafortuna/Portuguese-Hate-Speech-Dataset/raw/master/2019-05-28_portuguese_hate_speech_binary_classification.csv"
 
 
 
52
 
53
 
54
- class HateSpeechPortuguese(datasets.GeneratorBasedBuilder):
55
- """Portuguese dataset for hate speech detection composed of 5,668 tweets with binary annotations (i.e. 'hate' vs. 'no-hate')."""
56
 
57
  VERSION = datasets.Version("1.0.0")
58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  def _info(self):
60
- return datasets.DatasetInfo(
61
- description=_DESCRIPTION,
62
- features=datasets.Features(
63
  {
64
  "text": datasets.Value("string"),
65
- "label": datasets.ClassLabel(names=["no-hate", "hate"]),
66
- "hatespeech_G1": datasets.Value("string"),
67
- "annotator_G1": datasets.Value("string"),
68
- "hatespeech_G2": datasets.Value("string"),
69
- "annotator_G2": datasets.Value("string"),
70
- "hatespeech_G3": datasets.Value("string"),
71
- "annotator_G3": datasets.Value("string"),
72
  }
73
- ),
74
- supervised_keys=("text", "label"),
75
- homepage=_HOMEPAGE,
76
- license=_LICENSE,
77
- citation=_CITATION,
 
 
 
 
 
 
 
 
 
 
 
78
  )
79
 
80
  def _split_generators(self, dl_manager):
81
- """Returns SplitGenerators."""
82
-
83
- data_file = dl_manager.download_and_extract(_URL)
84
- return [
85
- datasets.SplitGenerator(
86
- name=datasets.Split.TRAIN,
87
- gen_kwargs={
88
- "filepath": data_file,
89
- },
90
- ),
91
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
  def _generate_examples(self, filepath):
94
- """Yields examples."""
95
-
96
- with open(filepath, encoding="utf-8") as f:
97
- reader = csv.reader(f)
98
- for id_, row in enumerate(reader):
99
- if id_ == 0:
100
- continue
101
-
102
- yield id_, {
103
- "text": row[0],
104
- "label": "hate" if row[1] == "1" else "no-hate",
105
- "hatespeech_G1": row[2],
106
- "annotator_G1": row[3],
107
- "hatespeech_G2": row[4],
108
- "annotator_G2": row[5],
109
- "hatespeech_G3": row[6],
110
- "annotator_G3": row[7],
111
  }
 
 
 
1
  # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
  #
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
  # you may not use this file except in compliance with the License.
 
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
 
15
 
16
+ # Lint as: python3
17
+ """Toxic/Abusive Tweets Multilabel Classification Dataset for Brazilian Portuguese."""
18
 
19
+
20
+ import os
21
+
22
+ import pandas as pd
23
 
24
  import datasets
25
 
26
 
27
+ # TODO: Add BibTeX citation
28
+ # Find for instance the citation on arxiv or on the dataset repo/website
29
  _CITATION = """\
30
+ @article{DBLP:journals/corr/abs-2010-04543,
31
+ author = {Joao Augusto Leite and
32
+ Diego F. Silva and
33
+ Kalina Bontcheva and
34
+ Carolina Scarton},
35
+ title = {Toxic Language Detection in Social Media for Brazilian Portuguese:
36
+ New Dataset and Multilingual Analysis},
37
+ journal = {CoRR},
38
+ volume = {abs/2010.04543},
39
+ year = {2020},
40
+ url = {https://arxiv.org/abs/2010.04543},
41
+ eprinttype = {arXiv},
42
+ eprint = {2010.04543},
43
+ timestamp = {Tue, 15 Dec 2020 16:10:16 +0100},
44
+ biburl = {https://dblp.org/rec/journals/corr/abs-2010-04543.bib},
45
+ bibsource = {dblp computer science bibliography, https://dblp.org}
46
  }
47
  """
48
 
49
  _DESCRIPTION = """\
50
+ ToLD-Br is the biggest dataset for toxic tweets in Brazilian Portuguese, crowdsourced
51
+ by 42 annotators selected from a pool of 129 volunteers. Annotators were selected aiming
52
+ to create a plural group in terms of demographics (ethnicity, sexual orientation, age, gender).
53
+ Each tweet was labeled by three annotators in 6 possible categories:
54
+ LGBTQ+phobia,Xenophobia, Obscene, Insult, Misogyny and Racism.
55
  """
56
 
57
+ # TODO: Add a link to an official homepage for the dataset here
58
+ _HOMEPAGE = "https://github.com/JAugusto97/ToLD-Br"
59
+
60
+ # TODO: Add the licence for the dataset here if you can find it
61
+ _LICENSE = "https://github.com/JAugusto97/ToLD-Br/blob/main/LICENSE_ToLD-Br.txt "
62
 
63
+ # TODO: Add link to the official dataset URLs here
64
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
65
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
66
 
67
+ _URLS = {
68
+ "multilabel": "https://raw.githubusercontent.com/JAugusto97/ToLD-Br/main/ToLD-BR.csv",
69
+ "binary": "https://github.com/JAugusto97/ToLD-Br/raw/main/experiments/data/1annotator.zip",
70
+ }
71
 
72
 
73
+ class ToldBr(datasets.GeneratorBasedBuilder):
74
+ """Toxic/Abusive Tweets Classification Dataset for Brazilian Portuguese."""
75
 
76
  VERSION = datasets.Version("1.0.0")
77
 
78
+ # This is an example of a dataset with multiple configurations.
79
+ # If you don't want/need to define several sub-sets in your dataset,
80
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
81
+
82
+ # If you need to make complex sub-parts in the datasets with configurable options
83
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
84
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
85
+
86
+ # You will be able to load one or the other configurations in the following list with
87
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
88
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
89
+ BUILDER_CONFIGS = [
90
+ datasets.BuilderConfig(
91
+ name="multilabel",
92
+ version=VERSION,
93
+ description="""
94
+ Full multilabel dataset with target values ranging
95
+ from 0 to 3 representing the votes from each annotator.
96
+ """,
97
+ ),
98
+ datasets.BuilderConfig(
99
+ name="binary",
100
+ version=VERSION,
101
+ description="""
102
+ Binary classification dataset version separated in train, dev and test test.
103
+ A text is considered toxic if at least one of the multilabel classes were labeled
104
+ by at least one annotator.
105
+ """,
106
+ ),
107
+ ]
108
+
109
+ DEFAULT_CONFIG_NAME = "binary"
110
+
111
  def _info(self):
112
+ if self.config.name == "binary":
113
+ features = datasets.Features(
 
114
  {
115
  "text": datasets.Value("string"),
116
+ "label": datasets.ClassLabel(names=["not-toxic", "toxic"]),
 
 
 
 
 
 
117
  }
118
+ )
119
+ else:
120
+ features = datasets.Features(
121
+ {
122
+ "text": datasets.Value("string"),
123
+ "homophobia": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
124
+ "obscene": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
125
+ "insult": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
126
+ "racism": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
127
+ "misogyny": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
128
+ "xenophobia": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
129
+ }
130
+ )
131
+
132
+ return datasets.DatasetInfo(
133
+ description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION
134
  )
135
 
136
  def _split_generators(self, dl_manager):
137
+ urls = _URLS[self.config.name]
138
+ data_dir = dl_manager.download_and_extract(urls)
139
+ if self.config.name == "binary":
140
+ return [
141
+ datasets.SplitGenerator(
142
+ name=datasets.Split.TRAIN,
143
+ gen_kwargs={"filepath": os.path.join(data_dir, "1annotator/ptbr_train_1annotator.csv")},
144
+ ),
145
+ datasets.SplitGenerator(
146
+ name=datasets.Split.TEST,
147
+ gen_kwargs={"filepath": os.path.join(data_dir, "1annotator/ptbr_test_1annotator.csv")},
148
+ ),
149
+ datasets.SplitGenerator(
150
+ name=datasets.Split.VALIDATION,
151
+ gen_kwargs={"filepath": os.path.join(data_dir, "1annotator/ptbr_validation_1annotator.csv")},
152
+ ),
153
+ ]
154
+ else:
155
+ return [
156
+ datasets.SplitGenerator(
157
+ name=datasets.Split.TRAIN,
158
+ gen_kwargs={
159
+ "filepath": os.path.join(data_dir),
160
+ },
161
+ )
162
+ ]
163
 
164
  def _generate_examples(self, filepath):
165
+ df = pd.read_csv(filepath, engine="python")
166
+ for key, row in enumerate(df.itertuples()):
167
+ if self.config.name == "multilabel":
168
+ yield key, {
169
+ "text": row.text,
170
+ "homophobia": int(float(row.homophobia)),
171
+ "obscene": int(float(row.obscene)),
172
+ "insult": int(float(row.insult)),
173
+ "racism": int(float(row.racism)),
174
+ "misogyny": int(float(row.misogyny)),
175
+ "xenophobia": int(float(row.xenophobia)),
 
 
 
 
 
 
176
  }
177
+ else:
178
+ yield key, {"text": row.text, "label": int(row.toxic)}