Datasets:

Languages:
English
ArXiv:
License:
yasumasaonoe commited on
Commit
17db692
·
verified ·
1 Parent(s): 0004c2e

Delete docci.py

Browse files

Clean up old files

Files changed (1) hide show
  1. docci.py +0 -67
docci.py DELETED
@@ -1,67 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022 the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import os
17
- import pandas as pd
18
- import datasets
19
- from huggingface_hub import hf_hub_url
20
-
21
- _INPUT_CSV = "docci.csv" # Update the CSV file name
22
- _INPUT_IMAGES = "docci_images" # Update the images directory name
23
- _REPO_ID = "yonatanbitton/docci" # Update the repository ID
24
- _SUFFIX = 'jpg'
25
-
26
- class Dataset(datasets.GeneratorBasedBuilder):
27
- VERSION = datasets.Version("1.1.0")
28
- BUILDER_CONFIGS = [
29
- datasets.BuilderConfig(name="docci", version=VERSION, description="Docci dataset"),
30
- ]
31
-
32
- def _info(self):
33
- return datasets.DatasetInfo(
34
- features=datasets.Features({
35
- "image_key": datasets.Value("string"),
36
- "description": datasets.Value('string'),
37
- "image": datasets.Image(),
38
- }),
39
- supervised_keys=None, # Update or remove if your dataset is supervised
40
- )
41
-
42
- def _split_generators(self, dl_manager):
43
- """Returns SplitGenerators."""
44
- # hf_auth_token = dl_manager.download_config.use_auth_token
45
- # if hf_auth_token is None:
46
- # raise ConnectionError(
47
- # "Please set use_auth_token=True or use_auth_token='<TOKEN>' to download this dataset"
48
- # )
49
-
50
- data_dir = dl_manager.download_and_extract({
51
- "examples_csv": hf_hub_url(repo_id=_REPO_ID, repo_type='dataset', filename=_INPUT_CSV),
52
- "images_dir": hf_hub_url(repo_id=_REPO_ID, repo_type='dataset', filename=f"{_INPUT_IMAGES}.zip")
53
- })
54
-
55
- return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=data_dir)]
56
-
57
- def _generate_examples(self, examples_csv, images_dir):
58
- """Yields examples."""
59
- df = pd.read_csv(examples_csv)
60
-
61
- for r_idx, r in df.iterrows():
62
- image_path = os.path.join(images_dir, _INPUT_IMAGES, f"{r['image_key']}.{_SUFFIX}")
63
- yield r_idx, {
64
- "image_key": r['image_key'],
65
- "description": r['description'],
66
- "image": image_path
67
- }