Datasets:

ArXiv:
License:
VictorSanh commited on
Commit
0afc661
1 Parent(s): b6617ce

coco dataset - 2014 subset with karpathy annotations/splits

Browse files
Files changed (2) hide show
  1. COCO.py +176 -0
  2. README.md +143 -0
COCO.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """COCO"""
16
+ import json
17
+ import os
18
+ from pathlib import Path
19
+
20
+ import datasets
21
+
22
+
23
+ _CITATION = """
24
+ @article{DBLP:journals/corr/LinMBHPRDZ14,
25
+ author = {Tsung{-}Yi Lin and
26
+ Michael Maire and
27
+ Serge J. Belongie and
28
+ Lubomir D. Bourdev and
29
+ Ross B. Girshick and
30
+ James Hays and
31
+ Pietro Perona and
32
+ Deva Ramanan and
33
+ Piotr Doll{\'{a}}r and
34
+ C. Lawrence Zitnick},
35
+ title = {Microsoft {COCO:} Common Objects in Context},
36
+ journal = {CoRR},
37
+ volume = {abs/1405.0312},
38
+ year = {2014},
39
+ url = {http://arxiv.org/abs/1405.0312},
40
+ eprinttype = {arXiv},
41
+ eprint = {1405.0312},
42
+ timestamp = {Mon, 13 Aug 2018 16:48:13 +0200},
43
+ biburl = {https://dblp.org/rec/journals/corr/LinMBHPRDZ14.bib},
44
+ bibsource = {dblp computer science bibliography, https://dblp.org}
45
+ }
46
+ """
47
+
48
+ _DESCRIPTION = """
49
+ MS COCO is a large-scale object detection, segmentation, and captioning dataset.
50
+ COCO has several features: Object segmentation, Recognition in context, Superpixel stuff segmentation, 330K images (>200K labeled), 1.5 million object instances, 80 object categories, 91 stuff categories, 5 captions per image, 250,000 people with keypoints.
51
+ """
52
+
53
+ _HOMEPAGE = "https://cocodataset.org/#home"
54
+
55
+ _LICENSE = "CC BY 4.0"
56
+
57
+
58
+ _IMAGES_URLS = {
59
+ "train": "http://images.cocodataset.org/zips/train2014.zip",
60
+ "validation": "http://images.cocodataset.org/zips/val2014.zip",
61
+ }
62
+
63
+ _KARPATHY_FILES_URL = "https://cs.stanford.edu/people/karpathy/deepimagesent/caption_datasets.zip"
64
+
65
+ _SPLIT_MAP = {"train": "train2014", "validation": "val2014"}
66
+
67
+ _FEATURES = datasets.Features(
68
+ {
69
+ "image": datasets.Image(),
70
+ "filepath": datasets.Value("string"),
71
+ "sentids": [datasets.Value("int32")],
72
+ "filename": datasets.Value("string"),
73
+ "imgid": datasets.Value("int32"),
74
+ "split": datasets.Value("string"),
75
+ "sentences": {
76
+ "tokens": [datasets.Value("string")],
77
+ "raw": datasets.Value("string"),
78
+ "imgid": datasets.Value("int32"),
79
+ "sentid": datasets.Value("int32"),
80
+ },
81
+ "cocoid": datasets.Value("int32"),
82
+ }
83
+ )
84
+
85
+
86
+ class COCO(datasets.GeneratorBasedBuilder):
87
+ """COCO"""
88
+
89
+ VERSION = datasets.Version("1.0.0")
90
+
91
+ BUILDER_CONFIGS = [
92
+ datasets.BuilderConfig(name="2014", version=VERSION, description="2014 version of COCO with Karpathy annotations and splits"),
93
+ ]
94
+
95
+ DEFAULT_CONFIG_NAME = "2014"
96
+
97
+ def _info(self):
98
+ return datasets.DatasetInfo(
99
+ description=_DESCRIPTION,
100
+ features=_FEATURES,
101
+ homepage=_HOMEPAGE,
102
+ license=_LICENSE,
103
+ citation=_CITATION,
104
+ )
105
+
106
+ def _split_generators(self, dl_manager):
107
+ annotation_file = os.path.join(dl_manager.download_and_extract(_KARPATHY_FILES_URL), "dataset_coco.json")
108
+ image_folders = {k: Path(v) for k, v in dl_manager.download_and_extract(_IMAGES_URLS).items()}
109
+
110
+ return [
111
+ datasets.SplitGenerator(
112
+ name=datasets.Split.TRAIN,
113
+ gen_kwargs={
114
+ "annotation_file": annotation_file,
115
+ "image_folders": image_folders,
116
+ "split_key": "train",
117
+ },
118
+ ),
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.VALIDATION,
121
+ gen_kwargs={
122
+ "annotation_file": annotation_file,
123
+ "image_folders": image_folders,
124
+ "split_key": "validation",
125
+ },
126
+ ),
127
+ datasets.SplitGenerator(
128
+ name=datasets.Split.TEST,
129
+ gen_kwargs={
130
+ "annotation_file": annotation_file,
131
+ "image_folders": image_folders,
132
+ "split_key": "test",
133
+ },
134
+ ),
135
+ ]
136
+
137
+ def _generate_examples(self, annotation_file, image_folders, split_key):
138
+ counter = 0
139
+ with open(annotation_file, "r", encoding="utf-8") as fi:
140
+ annotations = json.load(fi)
141
+
142
+ for image_metadata in annotations["images"]:
143
+ if split_key == "train":
144
+ if image_metadata["split"] != "train" and image_metadata["split"] != "restval":
145
+ continue
146
+ elif split_key == "validation":
147
+ if image_metadata["split"] != "val":
148
+ continue
149
+ elif split_key == "test":
150
+ if image_metadata["split"] != "test":
151
+ continue
152
+
153
+ if "val2014" in image_metadata["filename"]:
154
+ image_path = image_folders["validation"] / _SPLIT_MAP["validation"]
155
+ else:
156
+ image_path = image_folders["train"] / _SPLIT_MAP["train"]
157
+
158
+ image_path = image_path / image_metadata["filename"]
159
+
160
+ for caption in image_metadata["sentences"]:
161
+ yield counter, {
162
+ "image": str(image_path.absolute()),
163
+ "filepath": image_metadata["filename"],
164
+ "sentids": image_metadata["sentids"],
165
+ "filename": image_metadata["filename"],
166
+ "imgid": image_metadata["imgid"],
167
+ "split": image_metadata["split"],
168
+ "sentences": {
169
+ "tokens": caption["tokens"],
170
+ "raw": caption["raw"],
171
+ "imgid": caption["imgid"],
172
+ "sentid": caption["sentid"],
173
+ },
174
+ "cocoid": image_metadata["cocoid"],
175
+ }
176
+ counter += 1
README.md CHANGED
@@ -1,3 +1,146 @@
1
  ---
2
  license: cc-by-4.0
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: cc-by-4.0
3
  ---
4
+
5
+ # Dataset Card for [Dataset Name]
6
+
7
+ ## Table of Contents
8
+ - [Table of Contents](#table-of-contents)
9
+ - [Dataset Description](#dataset-description)
10
+ - [Dataset Summary](#dataset-summary)
11
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
12
+ - [Languages](#languages)
13
+ - [Dataset Structure](#dataset-structure)
14
+ - [Data Instances](#data-instances)
15
+ - [Data Fields](#data-fields)
16
+ - [Data Splits](#data-splits)
17
+ - [Dataset Creation](#dataset-creation)
18
+ - [Curation Rationale](#curation-rationale)
19
+ - [Source Data](#source-data)
20
+ - [Annotations](#annotations)
21
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
22
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
23
+ - [Social Impact of Dataset](#social-impact-of-dataset)
24
+ - [Discussion of Biases](#discussion-of-biases)
25
+ - [Other Known Limitations](#other-known-limitations)
26
+ - [Additional Information](#additional-information)
27
+ - [Dataset Curators](#dataset-curators)
28
+ - [Licensing Information](#licensing-information)
29
+ - [Citation Information](#citation-information)
30
+ - [Contributions](#contributions)
31
+
32
+ ## Dataset Description
33
+
34
+ - **Homepage:** [https://cocodataset.org/](https://cocodataset.org/)
35
+ - **Repository:**
36
+ - **Paper:** [Microsoft COCO: Common Objects in Context](https://arxiv.org/abs/1405.0312)
37
+ - **Leaderboard:**
38
+ - **Point of Contact:**
39
+
40
+ ### Dataset Summary
41
+
42
+ MS COCO is a large-scale object detection, segmentation, and captioning dataset.
43
+ COCO has several features: Object segmentation, Recognition in context, Superpixel stuff segmentation, 330K images (>200K labeled), 1.5 million object instances, 80 object categories, 91 stuff categories, 5 captions per image, 250,000 people with keypoints.
44
+
45
+ As of now, there is only the 2014 subset (with Karpathy annotations and splits), but feel free to contribute the 2017 subset of COCO!
46
+
47
+ ### Supported Tasks and Leaderboards
48
+
49
+ [More Information Needed]
50
+
51
+ ### Languages
52
+
53
+ [More Information Needed]
54
+
55
+ ## Dataset Structure
56
+
57
+ ### Data Instances
58
+
59
+ Each instance has the following structure:
60
+ ```
61
+ {
62
+ 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=640x480 at 0x7F69C1BA8550>,
63
+ 'filepath': 'COCO_val2014_000000522418.jpg',
64
+ 'sentids': [681330, 686718, 688839, 693159, 693204],
65
+ 'filename': 'COCO_val2014_000000522418.jpg',
66
+ 'imgid': 1,
67
+ 'split': 'restval',
68
+ 'sentences': {
69
+ 'tokens': ['a', 'woman', 'wearing', 'a', 'net', 'on', 'her', 'head', 'cutting', 'a', 'cake'],
70
+ 'raw': 'A woman wearing a net on her head cutting a cake. ',
71
+ 'imgid': 1,
72
+ 'sentid': 681330
73
+ },
74
+ 'cocoid': 522418
75
+ }
76
+ ```
77
+
78
+ ### Data Fields
79
+
80
+ [More Information Needed]
81
+
82
+ ### Data Splits
83
+
84
+ [More Information Needed]
85
+
86
+ ## Dataset Creation
87
+
88
+ ### Curation Rationale
89
+
90
+ [More Information Needed]
91
+
92
+ ### Source Data
93
+
94
+ #### Initial Data Collection and Normalization
95
+
96
+ [More Information Needed]
97
+
98
+ #### Who are the source language producers?
99
+
100
+ [More Information Needed]
101
+
102
+ ### Annotations
103
+
104
+ #### Annotation process
105
+
106
+ [More Information Needed]
107
+
108
+ #### Who are the annotators?
109
+
110
+ [More Information Needed]
111
+
112
+ ### Personal and Sensitive Information
113
+
114
+ [More Information Needed]
115
+
116
+ ## Considerations for Using the Data
117
+
118
+ ### Social Impact of Dataset
119
+
120
+ [More Information Needed]
121
+
122
+ ### Discussion of Biases
123
+
124
+ [More Information Needed]
125
+
126
+ ### Other Known Limitations
127
+
128
+ [More Information Needed]
129
+
130
+ ## Additional Information
131
+
132
+ ### Dataset Curators
133
+
134
+ [More Information Needed]
135
+
136
+ ### Licensing Information
137
+
138
+ [More Information Needed]
139
+
140
+ ### Citation Information
141
+
142
+ [More Information Needed]
143
+
144
+ ### Contributions
145
+
146
+ Thanks to [@VictorSanh](https://github.com/VictorSanh) for adding this dataset.