victoriadreis commited on
Commit
bc9ddc5
·
1 Parent(s): 99eed19

Update tupy.py

Browse files
Files changed (1) hide show
  1. tupy.py +88 -95
tupy.py CHANGED
@@ -1,118 +1,111 @@
1
- import os
2
- import pandas as pd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import datasets
4
 
5
- # TODO: Add BibTeX citation
6
- # Find for instance the citation on arxiv or on the dataset repo/website
7
  _CITATION = """\
8
- # Add your citation information here
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  """
10
 
11
  _DESCRIPTION = """\
12
- # Add your dataset description here
13
  """
14
 
15
- # TODO: Add a link to an official homepage for the dataset here
16
- _HOMEPAGE = "https://github.com/JAugusto97/ToLD-Br"
17
 
18
- # TODO: Add the license for the dataset here if you can find it
19
- _LICENSE = "https://github.com/JAugusto97/ToLD-Br/blob/main/LICENSE_ToLD-Br.txt "
20
 
21
- # TODO: Add link to the official dataset URLs here
22
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
23
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
24
 
25
- _URLS = {
26
- "multilabel": "https://raw.githubusercontent.com/JAugusto97/ToLD-Br/main/ToLD-BR.csv",
27
- "binary": "https://github.com/JAugusto97/ToLD-Br/raw/main/experiments/data/1annotator.zip",
28
- }
29
 
30
- class ToldBr(datasets.GeneratorBasedBuilder):
31
- """Toxic/Abusive Tweets Classification Dataset for Brazilian Portuguese."""
32
 
33
  VERSION = datasets.Version("1.0.0")
34
 
35
- BUILDER_CONFIGS = [
36
- datasets.BuilderConfig(
37
- name="multilabel",
38
- version=VERSION,
39
- description="""
40
- Full multilabel dataset with target values ranging
41
- from 0 to 3 representing the votes from each annotator.
42
- """,
43
- ),
44
- datasets.BuilderConfig(
45
- name="binary",
46
- version=VERSION,
47
- description="""
48
- Binary classification dataset version separated into train, dev, and test sets.
49
- A text is considered toxic if at least one of the multilabel classes was labeled
50
- by at least one annotator.
51
- """,
52
- ),
53
- ]
54
-
55
- DEFAULT_CONFIG_NAME = "binary"
56
-
57
  def _info(self):
58
- if self.config.name == "binary":
59
- features = datasets.Features(
60
- {
61
- "text": datasets.Value("string"),
62
- "label": datasets.ClassLabel(names=["not-hate", "hate"]),
63
- }
64
- )
65
- else:
66
- features = datasets.Features(
67
  {
68
  "text": datasets.Value("string"),
69
- "aggressive": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
70
- "hate": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
71
- "ageism": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
72
- "aporophobia": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
73
- "body_shame": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
74
- "capacitism": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
75
- "lgbtphobia": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
76
- "political": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
77
- "racism": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
78
- "religious_intolerance": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
79
- "misogyny": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
80
- "xenophobia": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
81
- "other": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
82
  }
83
- )
84
-
85
- return datasets.DatasetInfo(
86
- description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION
 
87
  )
88
 
89
  def _split_generators(self, dl_manager):
90
- urls = _URLS[self.config.name]
91
- data_dir = dl_manager.download_and_extract(urls)
92
- if self.config.name == "binary":
93
- return [
94
- datasets.SplitGenerator(
95
- name=datasets.Split.TRAIN,
96
- gen_kwargs={"filepath": os.path.join(data_dir, "path_to_binary_train.csv")},
97
- ),
98
- datasets.SplitGenerator(
99
- name=datasets.Split.TEST,
100
- gen_kwargs={"filepath": os.path.join(data_dir, "path_to_binary_test.csv")},
101
- ),
102
- datasets.SplitGenerator(
103
- name=datasets.Split.VALIDATION,
104
- gen_kwargs={"filepath": os.path.join(data_dir, "path_to_binary_validation.csv")},
105
- ),
106
- ]
107
- else:
108
- return [
109
- datasets.SplitGenerator(
110
- name=datasets.Split.TRAIN,
111
- gen_kwargs={
112
- "filepath": os.path.join(data_dir, "path_to_multilabel.csv"),
113
- },
114
- )
115
- ]
116
-
117
-
118
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Portuguese dataset for hate speech detection composed of 5,668 tweets with binary annotations (i.e. 'hate' vs. 'no-hate')."""
16
+
17
+
18
+ import csv
19
+
20
  import datasets
21
 
22
+
 
23
  _CITATION = """\
24
+ @inproceedings{fortuna-etal-2019-hierarchically,
25
+ title = "A Hierarchically-Labeled {P}ortuguese Hate Speech Dataset",
26
+ author = "Fortuna, Paula and
27
+ Rocha da Silva, Jo{\\~a}o and
28
+ Soler-Company, Juan and
29
+ Wanner, Leo and
30
+ Nunes, S{\'e}rgio",
31
+ booktitle = "Proceedings of the Third Workshop on Abusive Language Online",
32
+ month = aug,
33
+ year = "2019",
34
+ address = "Florence, Italy",
35
+ publisher = "Association for Computational Linguistics",
36
+ url = "https://www.aclweb.org/anthology/W19-3510",
37
+ doi = "10.18653/v1/W19-3510",
38
+ pages = "94--104",
39
+ abstract = "Over the past years, the amount of online offensive speech has been growing steadily. To successfully cope with it, machine learning are applied. However, ML-based techniques require sufficiently large annotated datasets. In the last years, different datasets were published, mainly for English. In this paper, we present a new dataset for Portuguese, which has not been in focus so far. The dataset is composed of 5,668 tweets. For its annotation, we defined two different schemes used by annotators with different levels of expertise. Firstly, non-experts annotated the tweets with binary labels ({`}hate{'} vs. {`}no-hate{'}). Secondly, expert annotators classified the tweets following a fine-grained hierarchical multiple label scheme with 81 hate speech categories in total. The inter-annotator agreement varied from category to category, which reflects the insight that some types of hate speech are more subtle than others and that their detection depends on personal perception. This hierarchical annotation scheme is the main contribution of the presented work, as it facilitates the identification of different types of hate speech and their intersections. To demonstrate the usefulness of our dataset, we carried a baseline classification experiment with pre-trained word embeddings and LSTM on the binary classified data, with a state-of-the-art outcome.",
40
+ }
41
  """
42
 
43
  _DESCRIPTION = """\
44
+ Portuguese dataset for hate speech detection composed of 5,668 tweets with binary annotations (i.e. 'hate' vs. 'no-hate').
45
  """
46
 
47
+ _HOMEPAGE = "https://github.com/paulafortuna/Portuguese-Hate-Speech-Dataset"
 
48
 
49
+ _LICENSE = "Unknown"
 
50
 
51
+ _URL = "https://github.com/paulafortuna/Portuguese-Hate-Speech-Dataset/raw/master/2019-05-28_portuguese_hate_speech_binary_classification.csv"
 
 
52
 
 
 
 
 
53
 
54
+ class HateSpeechPortuguese(datasets.GeneratorBasedBuilder):
55
+ """Portuguese dataset for hate speech detection composed of 5,668 tweets with binary annotations (i.e. 'hate' vs. 'no-hate')."""
56
 
57
  VERSION = datasets.Version("1.0.0")
58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  def _info(self):
60
+ return datasets.DatasetInfo(
61
+ description=_DESCRIPTION,
62
+ features=datasets.Features(
 
 
 
 
 
 
63
  {
64
  "text": datasets.Value("string"),
65
+ "label": datasets.ClassLabel(names=["no-hate", "hate"]),
66
+ "hatespeech_G1": datasets.Value("string"),
67
+ "annotator_G1": datasets.Value("string"),
68
+ "hatespeech_G2": datasets.Value("string"),
69
+ "annotator_G2": datasets.Value("string"),
70
+ "hatespeech_G3": datasets.Value("string"),
71
+ "annotator_G3": datasets.Value("string"),
 
 
 
 
 
 
72
  }
73
+ ),
74
+ supervised_keys=("text", "label"),
75
+ homepage=_HOMEPAGE,
76
+ license=_LICENSE,
77
+ citation=_CITATION,
78
  )
79
 
80
  def _split_generators(self, dl_manager):
81
+ """Returns SplitGenerators."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
+ data_file = dl_manager.download_and_extract(_URL)
84
+ return [
85
+ datasets.SplitGenerator(
86
+ name=datasets.Split.TRAIN,
87
+ gen_kwargs={
88
+ "filepath": data_file,
89
+ },
90
+ ),
91
+ ]
92
+
93
+ def _generate_examples(self, filepath):
94
+ """Yields examples."""
95
+
96
+ with open(filepath, encoding="utf-8") as f:
97
+ reader = csv.reader(f)
98
+ for id_, row in enumerate(reader):
99
+ if id_ == 0:
100
+ continue
101
+
102
+ yield id_, {
103
+ "text": row[0],
104
+ "label": "hate" if row[1] == "1" else "no-hate",
105
+ "hatespeech_G1": row[2],
106
+ "annotator_G1": row[3],
107
+ "hatespeech_G2": row[4],
108
+ "annotator_G2": row[5],
109
+ "hatespeech_G3": row[6],
110
+ "annotator_G3": row[7],
111
+ }