SamAdamDay commited on
Commit
32a38db
1 Parent(s): 788de9a

Create new file

Browse files
Files changed (1) hide show
  1. wiki_toxic.py +111 -0
wiki_toxic.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """Jigsaw Toxic Comment Challenge dataset"""
16
+
17
+ import pandas as pd
18
+
19
+ import datasets
20
+
21
+
22
+ # TODO: Add BibTeX citation
23
+ # Find for instance the citation on arxiv or on the dataset repo/website
24
+ _CITATION = """"""
25
+
26
+ _DESCRIPTION = """\
27
+ Jigsaw Toxic Comment Challenge dataset. This dataset was the basis of a Kaggle competition run by Jigsaw
28
+ """
29
+
30
+ _HOMEPAGE = "https://www.kaggle.com/competitions/jigsaw-toxic-comment-classification-challenge/data"
31
+
32
+ # TODO: Add the licence for the dataset here if you can find it
33
+ _LICENSE = ""
34
+
35
+
36
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
37
+ class WikiToxic(datasets.GeneratorBasedBuilder):
38
+ """Jigsaw Toxic Comment Challenge dataset."""
39
+
40
+ VERSION = datasets.Version("1.0.0")
41
+
42
+ # This is an example of a dataset with multiple configurations.
43
+ # If you don't want/need to define several sub-sets in your dataset,
44
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
45
+
46
+ # If you need to make complex sub-parts in the datasets with configurable options
47
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
48
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
49
+
50
+ # You will be able to load one or the other configurations in the following list with
51
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
52
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
53
+
54
+ def _info(self):
55
+ return datasets.DatasetInfo(
56
+ description=_DESCRIPTION,
57
+ homepage=_HOMEPAGE,
58
+ license=_LICENSE,
59
+ citation=_CITATION,
60
+ )
61
+
62
+ def _split_generators(self, dl_manager):
63
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
64
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
65
+
66
+ return [
67
+ datasets.SplitGenerator(
68
+ name=datasets.Split.TRAIN,
69
+ # These kwargs will be passed to _generate_examples
70
+ gen_kwargs={
71
+ "filepath": "train.csv",
72
+ "split": "train",
73
+ },
74
+ ),
75
+ datasets.SplitGenerator(
76
+ name=datasets.Split.VALIDATION,
77
+ # These kwargs will be passed to _generate_examples
78
+ gen_kwargs={
79
+ "filepath": "validation.csv",
80
+ "split": "validation",
81
+ },
82
+ ),
83
+ datasets.SplitGenerator(
84
+ name=datasets.Split.TEST,
85
+ # These kwargs will be passed to _generate_examples
86
+ gen_kwargs={
87
+ "filepath": "test.csv",
88
+ "split": "test"
89
+ },
90
+ ),
91
+ datasets.SplitGenerator(
92
+ name="balanced_train",
93
+ # These kwargs will be passed to _generate_examples
94
+ gen_kwargs={
95
+ "filepath": "balanced_train.csv",
96
+ "split": "balanced_train"
97
+ },
98
+ ),
99
+ ]
100
+
101
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
102
+ def _generate_examples(self, filepath, split):
103
+
104
+ df = pd.read_csv(filepath)
105
+
106
+ for index, row in df.iterrows():
107
+ yield index, {
108
+ "id": row["id"],
109
+ "comment_text": row["comment_text"],
110
+ "label": row["label"],
111
+ }