victoriadreis commited on
Commit
4bf6ee4
1 Parent(s): edcbb8a

Upload tupy.py

Browse files
Files changed (1) hide show
  1. tupy.py +118 -0
tupy.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ import datasets
4
+
5
+ # TODO: Add BibTeX citation
6
+ # Find for instance the citation on arxiv or on the dataset repo/website
7
+ _CITATION = """\
8
+ # Add your citation information here
9
+ """
10
+
11
+ _DESCRIPTION = """\
12
+ # Add your dataset description here
13
+ """
14
+
15
+ # TODO: Add a link to an official homepage for the dataset here
16
+ _HOMEPAGE = "https://github.com/JAugusto97/ToLD-Br"
17
+
18
+ # TODO: Add the license for the dataset here if you can find it
19
+ _LICENSE = "https://github.com/JAugusto97/ToLD-Br/blob/main/LICENSE_ToLD-Br.txt "
20
+
21
+ # TODO: Add link to the official dataset URLs here
22
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
23
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
24
+
25
+ _URLS = {
26
+ "multilabel": "https://raw.githubusercontent.com/JAugusto97/ToLD-Br/main/ToLD-BR.csv",
27
+ "binary": "https://github.com/JAugusto97/ToLD-Br/raw/main/experiments/data/1annotator.zip",
28
+ }
29
+
30
+ class ToldBr(datasets.GeneratorBasedBuilder):
31
+ """Toxic/Abusive Tweets Classification Dataset for Brazilian Portuguese."""
32
+
33
+ VERSION = datasets.Version("1.0.0")
34
+
35
+ BUILDER_CONFIGS = [
36
+ datasets.BuilderConfig(
37
+ name="multilabel",
38
+ version=VERSION,
39
+ description="""
40
+ Full multilabel dataset with target values ranging
41
+ from 0 to 3 representing the votes from each annotator.
42
+ """,
43
+ ),
44
+ datasets.BuilderConfig(
45
+ name="binary",
46
+ version=VERSION,
47
+ description="""
48
+ Binary classification dataset version separated into train, dev, and test sets.
49
+ A text is considered toxic if at least one of the multilabel classes was labeled
50
+ by at least one annotator.
51
+ """,
52
+ ),
53
+ ]
54
+
55
+ DEFAULT_CONFIG_NAME = "binary"
56
+
57
+ def _info(self):
58
+ if self.config.name == "binary":
59
+ features = datasets.Features(
60
+ {
61
+ "text": datasets.Value("string"),
62
+ "label": datasets.ClassLabel(names=["not-hate", "hate"]),
63
+ }
64
+ )
65
+ else:
66
+ features = datasets.Features(
67
+ {
68
+ "text": datasets.Value("string"),
69
+ "aggressive": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
70
+ "hate": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
71
+ "ageism": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
72
+ "aporophobia": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
73
+ "body_shame": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
74
+ "capacitism": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
75
+ "lgbtphobia": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
76
+ "political": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
77
+ "racism": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
78
+ "religious_intolerance": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
79
+ "misogyny": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
80
+ "xenophobia": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
81
+ "other": datasets.ClassLabel(names=["zero_votes", "one_vote", "two_votes", "three_votes"]),
82
+ }
83
+ )
84
+
85
+ return datasets.DatasetInfo(
86
+ description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION
87
+ )
88
+
89
+ def _split_generators(self, dl_manager):
90
+ urls = _URLS[self.config.name]
91
+ data_dir = dl_manager.download_and_extract(urls)
92
+ if self.config.name == "binary":
93
+ return [
94
+ datasets.SplitGenerator(
95
+ name=datasets.Split.TRAIN,
96
+ gen_kwargs={"filepath": os.path.join(data_dir, "path_to_binary_train.csv")},
97
+ ),
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split.TEST,
100
+ gen_kwargs={"filepath": os.path.join(data_dir, "path_to_binary_test.csv")},
101
+ ),
102
+ datasets.SplitGenerator(
103
+ name=datasets.Split.VALIDATION,
104
+ gen_kwargs={"filepath": os.path.join(data_dir, "path_to_binary_validation.csv")},
105
+ ),
106
+ ]
107
+ else:
108
+ return [
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.TRAIN,
111
+ gen_kwargs={
112
+ "filepath": os.path.join(data_dir, "path_to_multilabel.csv"),
113
+ },
114
+ )
115
+ ]
116
+
117
+
118
+