Datasets:

Modalities:
Text
Size:
< 1K
Libraries:
Datasets
dibyaaaaax commited on
Commit
6724b7a
·
1 Parent(s): 5edd0fb

Upload citeulike180.py

Browse files
Files changed (1) hide show
  1. citeulike180.py +144 -0
citeulike180.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+
4
+ # _SPLIT = ['test']
5
+ _CITATION = """\
6
+ @inproceedings{medelyan-etal-2009-human,
7
+ title = "Human-competitive tagging using automatic keyphrase extraction",
8
+ author = "Medelyan, Olena and
9
+ Frank, Eibe and
10
+ Witten, Ian H.",
11
+ booktitle = "Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing",
12
+ month = aug,
13
+ year = "2009",
14
+ address = "Singapore",
15
+ publisher = "Association for Computational Linguistics",
16
+ url = "https://aclanthology.org/D09-1137",
17
+ pages = "1318--1327",
18
+ }
19
+
20
+ """
21
+
22
+ _DESCRIPTION = """\
23
+
24
+ """
25
+
26
+ _HOMEPAGE = ""
27
+
28
+ # TODO: Add the licence for the dataset here if you can find it
29
+ _LICENSE = ""
30
+
31
+ # TODO: Add link to the official dataset URLs here
32
+
33
+ _URLS = {
34
+ "test": "test.jsonl"
35
+ }
36
+
37
+
38
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
39
+ class Citeulike180(datasets.GeneratorBasedBuilder):
40
+ """TODO: Short description of my dataset."""
41
+
42
+ VERSION = datasets.Version("0.0.1")
43
+
44
+ BUILDER_CONFIGS = [
45
+ datasets.BuilderConfig(name="extraction", version=VERSION,
46
+ description="This part of my dataset covers extraction"),
47
+ datasets.BuilderConfig(name="generation", version=VERSION,
48
+ description="This part of my dataset covers generation"),
49
+ datasets.BuilderConfig(name="raw", version=VERSION, description="This part of my dataset covers the raw data"),
50
+ ]
51
+
52
+ DEFAULT_CONFIG_NAME = "extraction"
53
+
54
+ def _info(self):
55
+ if self.config.name == "extraction": # This is the name of the configuration selected in BUILDER_CONFIGS above
56
+ features = datasets.Features(
57
+ {
58
+ "id": datasets.Value("int64"),
59
+ "document": datasets.features.Sequence(datasets.Value("string")),
60
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string"))
61
+
62
+ }
63
+ )
64
+ elif self.config.name == "generation":
65
+ features = datasets.Features(
66
+ {
67
+ "id": datasets.Value("int64"),
68
+ "document": datasets.features.Sequence(datasets.Value("string")),
69
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
70
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string"))
71
+
72
+ }
73
+ )
74
+ else:
75
+ features = datasets.Features(
76
+ {
77
+ "id": datasets.Value("int64"),
78
+ "document": datasets.features.Sequence(datasets.Value("string")),
79
+ "doc_bio_tags": datasets.features.Sequence(datasets.Value("string")),
80
+ "extractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
81
+ "abstractive_keyphrases": datasets.features.Sequence(datasets.Value("string")),
82
+ "other_metadata": datasets.features.Sequence(
83
+ {
84
+ "text": datasets.features.Sequence(datasets.Value("string")),
85
+ "bio_tags": datasets.features.Sequence(datasets.Value("string"))
86
+ }
87
+ )
88
+
89
+ }
90
+ )
91
+ return datasets.DatasetInfo(
92
+ # This is the description that will appear on the datasets page.
93
+ description=_DESCRIPTION,
94
+ # This defines the different columns of the dataset and their types
95
+ features=features,
96
+ homepage=_HOMEPAGE,
97
+ # License for the dataset if available
98
+ license=_LICENSE,
99
+ # Citation for the dataset
100
+ citation=_CITATION,
101
+ )
102
+
103
+ def _split_generators(self, dl_manager):
104
+
105
+ data_dir = dl_manager.download_and_extract(_URLS)
106
+ return [
107
+ datasets.SplitGenerator(
108
+ name=datasets.Split.TEST,
109
+ # These kwargs will be passed to _generate_examples
110
+ gen_kwargs={
111
+ "filepath": data_dir['test'],
112
+ "split": "test"
113
+ },
114
+ ),
115
+ ]
116
+
117
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
118
+ def _generate_examples(self, filepath, split):
119
+ with open(filepath, encoding="utf-8") as f:
120
+ for key, row in enumerate(f):
121
+ data = json.loads(row)
122
+ if self.config.name == "extraction":
123
+ # Yields examples as (key, example) tuples
124
+ yield key, {
125
+ "id": data['paper_id'],
126
+ "document": data["document"],
127
+ "doc_bio_tags": data.get("doc_bio_tags")
128
+ }
129
+ elif self.config.name == "generation":
130
+ yield key, {
131
+ "id": data['paper_id'],
132
+ "document": data["document"],
133
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
134
+ "abstractive_keyphrases": data.get("abstractive_keyphrases")
135
+ }
136
+ else:
137
+ yield key, {
138
+ "id": data['paper_id'],
139
+ "document": data["document"],
140
+ "doc_bio_tags": data.get("doc_bio_tags"),
141
+ "extractive_keyphrases": data.get("extractive_keyphrases"),
142
+ "abstractive_keyphrases": data.get("abstractive_keyphrases"),
143
+ "other_metadata": data["other_metadata"]
144
+ }