# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO: Address all TODOs and remove all explanatory comments """TODO: Add a description here.""" import csv import json import os from PIL import Image import numpy as np import datasets # TODO: Add BibTeX citation # Find for instance the citation on arxiv or on the dataset repo/website _CITATION = """\ @InProceedings{huggingface:dataset, title = {A great new dataset}, author={huggingface, Inc. }, year={2020} } """ # TODO: Add description of the dataset here # You can copy an official description _DESCRIPTION = """\ This new dataset is designed to solve this great NLP task and is crafted with a lot of care. """ # TODO: Add a link to an official homepage for the dataset here _HOMEPAGE = "" # TODO: Add the licence for the dataset here if you can find it _LICENSE = "" # TODO: Add link to the official dataset URLs here # The HuggingFace Datasets library doesn't host the datasets but only points to the original files. # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) # _URLS = { # "first_domain": "https://huggingface.co./great-new-dataset-first_domain.zip", # "second_domain": "https://huggingface.co./great-new-dataset-second_domain.zip", # } # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case class NewDataset(datasets.GeneratorBasedBuilder): """TODO: Short description of my dataset.""" VERSION = datasets.Version("1.1.0") # This is an example of a dataset with multiple configurations. # If you don't want/need to define several sub-sets in your dataset, # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. # If you need to make complex sub-parts in the datasets with configurable options # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig # BUILDER_CONFIG_CLASS = MyBuilderConfig # You will be able to load one or the other configurations in the following list with # data = datasets.load_dataset('my_dataset', 'first_domain') # data = datasets.load_dataset('my_dataset', 'second_domain') # BUILDER_CONFIGS = [ # datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"), # datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"), # ] # DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense. def _info(self): features = datasets.Features({ "image_id": datasets.Value("string"), "species": datasets.Value("string"), "scientific_name": datasets.Value("string"), "pics_array": datasets.Array3D(dtype="uint8", shape=(3, 768, 1024)), # Assuming images are RGB with shape 768x1024 "image_resolution": { "width": datasets.Value("int32"), "height": datasets.Value("int32"), }, "annotations": datasets.Sequence({ "category_id": datasets.Value("int32"), "bounding_box": { "x_min": datasets.Value("float32"), "y_min": datasets.Value("float32"), "x_max": datasets.Value("float32"), "y_max": datasets.Value("float32"), }, }), }) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, # Here we define them because they are different between the two configurations homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): # Only download data, no need to split data_files = dl_manager.download_and_extract({ "csv": "https://huggingface.co./datasets/XintongHe/Populus_Stomatal_Images_Datasets/resolve/main/Labeled Stomatal Images.csv", "zip": "https://huggingface.co./datasets/XintongHe/Populus_Stomatal_Images_Datasets/resolve/main/Labeled Stomatal Images.zip" }) species_info = pd.read_csv(data_files["csv"]) extracted_images_path = os.path.join(data_files["zip"], "Labeled Stomatal Images") # Get all image filenames all_image_filenames = species_info['FileName'].apply(lambda x: x + '.jpg').tolist() # No longer need to randomize and split the dataset return [datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepaths": all_image_filenames, "species_info": species_info, "data_dir": extracted_images_path, }, )] def save_metadata_as_json(image_id, annotations, species, scientific_name, json_path): metadata = { "image_id": image_id, "species": species, "scientific_name": scientific_name, "annotations": annotations } with open(json_path, 'w') as json_file: json.dump(metadata, json_file) def _parse_yolo_labels(self, label_path, width, height): annotations = [] with open(label_path, 'r') as file: yolo_data = file.readlines() for line in yolo_data: class_id, x_center_rel, y_center_rel, width_rel, height_rel = map(float, line.split()) x_min = (x_center_rel - width_rel / 2) * width y_min = (y_center_rel - height_rel / 2) * height x_max = (x_center_rel + width_rel / 2) * width y_max = (y_center_rel + height_rel / 2) * height annotations.append({ "category_id": int(class_id), "bounding_box": { "x_min": x_min, "y_min": y_min, "x_max": x_max, "y_max": y_max } }) return annotations def _generate_examples(self, filepaths, species_info, data_dir, split): """Yields examples as (key, example) tuples.""" for file_name in filepaths: image_id = os.path.splitext(file_name)[0] # Extract the base name without the file extension image_path = os.path.join(data_dir, f"{image_id}.jpg") label_path = os.path.join(data_dir, f"{image_id}.txt") # Find the corresponding row in the CSV for the current image species_row = species_info.loc[species_info['FileName'] == image_id] if not species_row.empty: species = species_row['Species'].values[0] scientific_name = species_row['ScientificName'].values[0] width = species_row['Width'].values[0] height = species_row['Height'].values[0] else: # Default values if not found species = None scientific_name = None width = 1024 # or some default value height = 768 # or some default value with Image.open(image_path) as img: pics_array = np.array(img) # Convert the PIL image to a numpy array annotations = self._parse_yolo_labels(label_path, width, height) yield image_id, { "image_id": image_id, "species": species, "scientific_name": scientific_name, "pics_array": pics_array, "image_resolution": {"width": width, "height": height}, "annotations": annotations, "image": img # Return the PIL image }