import datasets import webdataset as wds _DESCRIPTION = 'Conceptual Captions, a new dataset consisting of ~3.3M images annotated with captions. In contrast with the curated style of other image caption annotations, Conceptual Caption images and their raw descriptions are harvested from the web, and therefore represent a wider variety of styles. More precisely, the raw descriptions are harvested from the Alt-text HTML attribute associated with web images. To arrive at the current version of the captions, we have developed an automatic pipeline that extracts, filters, and transforms candidate image/caption pairs, with the goal of achieving a balance of cleanliness, informativeness, fluency, and learnability of the resulting captions.' _HOMEPAGE = 'https://ai.google.com/research/ConceptualCaptions/' _LICENSE = 'The dataset may be freely used for any purpose, although acknowledgement of Google LLC ("Google") as the data source would be appreciated. The dataset is provided "AS IS" without any warranty, express or implied. Google disclaims all liability for any damages, direct or indirect, resulting from the use of the dataset.' _CITATION = ''' @inproceedings{sharma2018conceptual, title={Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning}, author={Sharma, Piyush and Ding, Nan and Goodman, Sebastian and Soricut, Radu}, booktitle={Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, pages={2556--2565}, year={2018} } ''' _DATA_URL = [f'https://huggingface.co./datasets/patrickramos/conceptual_captions/resolve/main/data/{str(i).zfill(5)}.tar' for i in range(332)] class ConceptualCaptions(datasets.GeneratorBasedBuilder): def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features({ 'image': datasets.Image(), 'caption': datasets.Value('string') }), supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION ) def _split_generators(self, dl_manager): tars = dl_manager.download(_DATA_URL) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={'file_path': tars} ) ] def _generate_examples(self, file_path): webdataset = ( wds.WebDataset(file_path) .decode('pil') .to_tuple('jpg;png', 'txt') ) print(webdataset) for i, (image, caption) in enumerate(webdataset): yield i, {'image': image, 'caption': caption}