andreaschandra commited on
Commit
b54d592
1 Parent(s): 297eb1e
Files changed (3) hide show
  1. README.md +147 -1
  2. dataset_infos.json +1 -0
  3. indoqa.py +74 -0
README.md CHANGED
@@ -1,3 +1,149 @@
1
  ---
2
- license: cc-by-nc-nd-4.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ annotations_creators:
3
+ - expert-generated
4
+ language:
5
+ - id
6
+ language_creators:
7
+ - found
8
+ license:
9
+ - cc-by-nd-4.0
10
+ multilinguality:
11
+ - monolingual
12
+ pretty_name: Indonesian Question Answering Dataset
13
+ size_categories:
14
+ - 1K<n<10K
15
+ source_datasets:
16
+ - original
17
+ tags:
18
+ - indoqa
19
+ - qa
20
+ - question-answering
21
+ - indonesian
22
+ task_categories:
23
+ - question-answering
24
+ task_ids:
25
+ - extractive-qa
26
  ---
27
+
28
+ # Dataset Card for Indonesian Question Answering Dataset
29
+
30
+ ## Table of Contents
31
+ - [Table of Contents](#table-of-contents)
32
+ - [Dataset Description](#dataset-description)
33
+ - [Dataset Summary](#dataset-summary)
34
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
35
+ - [Languages](#languages)
36
+ - [Dataset Structure](#dataset-structure)
37
+ - [Data Instances](#data-instances)
38
+ - [Data Fields](#data-fields)
39
+ - [Data Splits](#data-splits)
40
+ - [Dataset Creation](#dataset-creation)
41
+ - [Curation Rationale](#curation-rationale)
42
+ - [Source Data](#source-data)
43
+ - [Annotations](#annotations)
44
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
45
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
46
+ - [Social Impact of Dataset](#social-impact-of-dataset)
47
+ - [Discussion of Biases](#discussion-of-biases)
48
+ - [Other Known Limitations](#other-known-limitations)
49
+ - [Additional Information](#additional-information)
50
+ - [Dataset Curators](#dataset-curators)
51
+ - [Licensing Information](#licensing-information)
52
+ - [Citation Information](#citation-information)
53
+ - [Contributions](#contributions)
54
+
55
+ ## Dataset Description
56
+
57
+ - **Homepage:**
58
+ - **Repository:**
59
+ - **Paper:**
60
+ - **Leaderboard:**
61
+ - **Point of Contact:**
62
+
63
+ ### Dataset Summary
64
+
65
+ [More Information Needed]
66
+
67
+ ### Supported Tasks and Leaderboards
68
+
69
+ [More Information Needed]
70
+
71
+ ### Languages
72
+
73
+ [More Information Needed]
74
+
75
+ ## Dataset Structure
76
+
77
+ ### Data Instances
78
+
79
+ [More Information Needed]
80
+
81
+ ### Data Fields
82
+
83
+ [More Information Needed]
84
+
85
+ ### Data Splits
86
+
87
+ [More Information Needed]
88
+
89
+ ## Dataset Creation
90
+
91
+ ### Curation Rationale
92
+
93
+ [More Information Needed]
94
+
95
+ ### Source Data
96
+
97
+ #### Initial Data Collection and Normalization
98
+
99
+ [More Information Needed]
100
+
101
+ #### Who are the source language producers?
102
+
103
+ [More Information Needed]
104
+
105
+ ### Annotations
106
+
107
+ #### Annotation process
108
+
109
+ [More Information Needed]
110
+
111
+ #### Who are the annotators?
112
+
113
+ [More Information Needed]
114
+
115
+ ### Personal and Sensitive Information
116
+
117
+ [More Information Needed]
118
+
119
+ ## Considerations for Using the Data
120
+
121
+ ### Social Impact of Dataset
122
+
123
+ [More Information Needed]
124
+
125
+ ### Discussion of Biases
126
+
127
+ [More Information Needed]
128
+
129
+ ### Other Known Limitations
130
+
131
+ [More Information Needed]
132
+
133
+ ## Additional Information
134
+
135
+ ### Dataset Curators
136
+
137
+ [More Information Needed]
138
+
139
+ ### Licensing Information
140
+
141
+ [More Information Needed]
142
+
143
+ ### Citation Information
144
+
145
+ [More Information Needed]
146
+
147
+ ### Contributions
148
+
149
+ Thanks to [@fhrzn](https://github.com/fhrzn)[@Kalzaik](https://github.com/Kalzaik) [@ibamibrahim](https://github.com/ibamibrahim) [@andreaschandra](https://github.com/andreaschandra) for adding this dataset.
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "This dataset is built for question answering task.\n", "citation": "", "homepage": "https://github.com/jakartaresearch", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "category": {"dtype": "string", "id": null, "_type": "Value"}, "span_start": {"dtype": "int16", "id": null, "_type": "Value"}, "span_end": {"dtype": "int16", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "google_play_review", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2497901, "num_examples": 3249, "dataset_name": "google_play_review"}, "validation": {"name": "validation", "num_bytes": 829111, "num_examples": 1084, "dataset_name": "google_play_review"}}, "download_checksums": {"https://drive.google.com/uc?id=1P5qyZQ2J4DoIiQtjvX_HG_qdEAEjxEGd": {"num_bytes": 2732563, "checksum": "ce9dc5720721a1ef71fcd4394867978ae4ac6b1540fbf4875007c4d2da906b79"}, "https://drive.google.com/uc?id=1rCzF8EJTLvOd0ppPgRI5RSr1NwXiCree": {"num_bytes": 907130, "checksum": "d2149c708735c0b568850402fa135eae7fe0c4b2743f90d1258592731946bd9c"}}, "download_size": 3639693, "post_processing_size": null, "dataset_size": 3327012, "size_in_bytes": 6966705}}
indoqa.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """IndoQA: Indonesian Question Answering Dataset."""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+ import gdown
22
+
23
+ import datasets
24
+
25
+ _DESCRIPTION = """\
26
+ This dataset is built for question answering task.
27
+ """
28
+
29
+ _HOMEPAGE = "https://github.com/jakartaresearch"
30
+
31
+ _TRAIN_URL = "https://drive.google.com/uc?id=1P5qyZQ2J4DoIiQtjvX_HG_qdEAEjxEGd"
32
+ _VAL_URL = "https://drive.google.com/uc?id=1rCzF8EJTLvOd0ppPgRI5RSr1NwXiCree"
33
+
34
+ class GooglePlayReview(datasets.GeneratorBasedBuilder):
35
+ """IndoQA: Indonesian Question Answering Dataset."""
36
+
37
+ VERSION = datasets.Version("1.0.0")
38
+
39
+ def _info(self):
40
+
41
+ features = datasets.Features(
42
+ {
43
+ "id": datasets.Value("string"),
44
+ "context": datasets.Value("string"),
45
+ "question": datasets.Value("string"),
46
+ "answer": datasets.Value("string"),
47
+ "category": datasets.Value("string"),
48
+ "span_start": datasets.Value("int16"),
49
+ "span_end": datasets.Value("int16")
50
+ }
51
+ )
52
+
53
+ return datasets.DatasetInfo(
54
+ description=_DESCRIPTION,
55
+ features=features,
56
+ homepage=_HOMEPAGE
57
+ )
58
+
59
+ def _split_generators(self, dl_manager):
60
+
61
+ train_path = dl_manager.download_and_extract(_TRAIN_URL)
62
+ val_path = dl_manager.download_and_extract(_VAL_URL)
63
+ return [
64
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
65
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": val_path})
66
+ ]
67
+
68
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
69
+ def _generate_examples(self, filepath):
70
+ """Generate examples."""
71
+ with open(filepath, encoding="utf-8") as file:
72
+ contents = json.load(file)
73
+ for id_, row in enumerate(contents):
74
+ yield id_, row