text
stringlengths 1
1.02k
| class_index
int64 0
271
| source
stringclasses 76
values |
---|---|---|
Returns:
`pa.StructArray`: Array in the Video arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
"""
if pa.types.is_string(storage.type):
bytes_array = pa.array([None] * len(storage), type=pa.binary())
storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_binary(storage.type):
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
bytes_array = storage.field("bytes")
else:
bytes_array = pa.array([None] * len(storage), type=pa.binary())
if storage.type.get_field_index("path") >= 0:
path_array = storage.field("path") | 162 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/video.py |
else:
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_list(storage.type):
bytes_array = pa.array(
[encode_np_array(np.array(arr))["bytes"] if arr is not None else None for arr in storage.to_pylist()],
type=pa.binary(),
)
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()
)
return array_cast(storage, self.pa_type) | 162 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/video.py |
class Image:
"""Image [`Feature`] to read image data from an image file.
Input: The Image feature accepts as input:
- A `str`: Absolute path to the image file (i.e. random access is allowed).
- A `dict` with the keys:
- `path`: String with relative path of the image file to the archive file.
- `bytes`: Bytes of the image file.
This is useful for archived files with sequential access.
- An `np.ndarray`: NumPy array representing an image.
- A `PIL.Image.Image`: PIL image object.
Args:
mode (`str`, *optional*):
The mode to convert the image to. If `None`, the native mode of the image is used.
decode (`bool`, defaults to `True`):
Whether to decode the image data. If `False`,
returns the underlying dictionary in the format `{"path": image_path, "bytes": image_bytes}`.
Examples: | 163 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/image.py |
```py
>>> from datasets import load_dataset, Image
>>> ds = load_dataset("beans", split="train")
>>> ds.features["image"]
Image(decode=True, id=None)
>>> ds[0]["image"]
<PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x15E52E7F0>
>>> ds = ds.cast_column('image', Image(decode=False))
{'bytes': None,
'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/healthy/healthy_train.85.jpg'}
```
"""
mode: Optional[str] = None
decode: bool = True
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "PIL.Image.Image"
pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
_type: str = field(default="Image", init=False, repr=False)
def __call__(self):
return self.pa_type | 163 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/image.py |
def encode_example(self, value: Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict:
"""Encode example into a format for Arrow.
Args:
value (`str`, `np.ndarray`, `PIL.Image.Image` or `dict`):
Data passed as input to Image feature.
Returns:
`dict` with "path" and "bytes" fields
"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
if isinstance(value, list):
value = np.array(value) | 163 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/image.py |
if isinstance(value, str):
return {"path": value, "bytes": None}
elif isinstance(value, bytes):
return {"path": None, "bytes": value}
elif isinstance(value, np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(value)
elif isinstance(value, PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(value)
elif value.get("path") is not None and os.path.isfile(value["path"]):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path")}
elif value.get("bytes") is not None or value.get("path") is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes"), "path": value.get("path")}
else:
raise ValueError( | 163 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/image.py |
f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
) | 163 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/image.py |
def decode_example(self, value: dict, token_per_repo_id=None) -> "PIL.Image.Image":
"""Decode example image file into image data.
Args:
value (`str` or `dict`):
A string with the absolute image file path, a dictionary with
keys:
- `path`: String with absolute or relative image file path.
- `bytes`: The bytes of the image file.
token_per_repo_id (`dict`, *optional*):
To access and decode
image files from private repositories on the Hub, you can pass
a dictionary repo_id (`str`) -> token (`bool` or `str`).
Returns:
`PIL.Image.Image`
"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.") | 163 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/image.py |
if config.PIL_AVAILABLE:
import PIL.Image
import PIL.ImageOps
else:
raise ImportError("To support decoding images, please install 'Pillow'.")
if token_per_repo_id is None:
token_per_repo_id = {} | 163 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/image.py |
path, bytes_ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.")
else:
if is_local_path(path):
image = PIL.Image.open(path)
else:
source_url = path.split("::")[-1]
pattern = (
config.HUB_DATASETS_URL
if source_url.startswith(config.HF_ENDPOINT)
else config.HUB_DATASETS_HFFS_URL
)
try:
repo_id = string_to_dict(source_url, pattern)["repo_id"]
token = token_per_repo_id.get(repo_id)
except ValueError:
token = None
download_config = DownloadConfig(token=token) | 163 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/image.py |
with xopen(path, "rb", download_config=download_config) as f:
bytes_ = BytesIO(f.read())
image = PIL.Image.open(bytes_)
else:
image = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
if image.getexif().get(PIL.Image.ExifTags.Base.Orientation) is not None:
image = PIL.ImageOps.exif_transpose(image)
if self.mode and self.mode != image.mode:
image = image.convert(self.mode)
return image | 163 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/image.py |
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary."""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary"),
"path": Value("string"),
}
)
def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray:
"""Cast an Arrow array to the Image arrow storage type.
The Arrow types that can be converted to the Image pyarrow storage type are: | 163 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/image.py |
- `pa.string()` - it must contain the "path" data
- `pa.binary()` - it must contain the image bytes
- `pa.struct({"bytes": pa.binary()})`
- `pa.struct({"path": pa.string()})`
- `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
- `pa.list(*)` - it must contain the image array data
Args:
storage (`Union[pa.StringArray, pa.StructArray, pa.ListArray]`):
PyArrow array to cast. | 163 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/image.py |
Returns:
`pa.StructArray`: Array in the Image arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
"""
if pa.types.is_string(storage.type):
bytes_array = pa.array([None] * len(storage), type=pa.binary())
storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_binary(storage.type):
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
bytes_array = storage.field("bytes")
else:
bytes_array = pa.array([None] * len(storage), type=pa.binary())
if storage.type.get_field_index("path") >= 0:
path_array = storage.field("path") | 163 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/image.py |
else:
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_list(storage.type):
bytes_array = pa.array(
[encode_np_array(np.array(arr))["bytes"] if arr is not None else None for arr in storage.to_pylist()],
type=pa.binary(),
)
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()
)
return array_cast(storage, self.pa_type) | 163 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/image.py |
def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
"""Embed image files into the Arrow array.
Args:
storage (`pa.StructArray`):
PyArrow array to embed.
Returns:
`pa.StructArray`: Array in the Image arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
"""
@no_op_if_value_is_null
def path_to_bytes(path):
with xopen(path, "rb") as f:
bytes_ = f.read()
return bytes_ | 163 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/image.py |
bytes_array = pa.array(
[
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],
type=pa.binary(),
)
path_array = pa.array(
[os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
type=pa.string(),
)
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
return array_cast(storage, self.pa_type) | 163 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/image.py |
class WebDataset(datasets.GeneratorBasedBuilder):
DEFAULT_WRITER_BATCH_SIZE = 100
IMAGE_EXTENSIONS: List[str] # definition at the bottom of the script
AUDIO_EXTENSIONS: List[str] # definition at the bottom of the script
VIDEO_EXTENSIONS: List[str] # definition at the bottom of the script
DECODERS: Dict[str, Callable[[Any], Any]] # definition at the bottom of the script
NUM_EXAMPLES_FOR_FEATURES_INFERENCE = 5 | 164 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/webdataset/webdataset.py |
@classmethod
def _get_pipeline_from_tar(cls, tar_path, tar_iterator):
current_example = {}
fs: fsspec.AbstractFileSystem = fsspec.filesystem("memory")
streaming_download_manager = datasets.StreamingDownloadManager()
for filename, f in tar_iterator:
example_key, field_name = base_plus_ext(filename)
if example_key is None:
continue
if current_example and current_example["__key__"] != example_key:
# reposition some keys in last position
current_example["__key__"] = current_example.pop("__key__")
current_example["__url__"] = current_example.pop("__url__")
yield current_example
current_example = {}
current_example["__key__"] = example_key
current_example["__url__"] = tar_path
current_example[field_name.lower()] = f.read() | 164 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/webdataset/webdataset.py |
if field_name.split(".")[-1] in SINGLE_FILE_COMPRESSION_EXTENSION_TO_PROTOCOL:
fs.write_bytes(filename, current_example[field_name.lower()])
extracted_file_path = streaming_download_manager.extract(f"memory://{filename}")
with fsspec.open(extracted_file_path) as f:
current_example[field_name.lower()] = f.read()
fs.delete(filename)
data_extension = xbasename(extracted_file_path).split(".")[-1]
else:
data_extension = field_name.split(".")[-1]
if data_extension in cls.DECODERS:
current_example[field_name] = cls.DECODERS[data_extension](current_example[field_name])
if current_example:
yield current_example | 164 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/webdataset/webdataset.py |
def _info(self) -> datasets.DatasetInfo:
return datasets.DatasetInfo() | 164 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/webdataset/webdataset.py |
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
# Download the data files
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download(self.config.data_files)
splits = []
for split_name, tar_paths in data_files.items():
if isinstance(tar_paths, str):
tar_paths = [tar_paths]
tar_iterators = [dl_manager.iter_archive(tar_path) for tar_path in tar_paths]
splits.append(
datasets.SplitGenerator(
name=split_name, gen_kwargs={"tar_paths": tar_paths, "tar_iterators": tar_iterators}
)
)
if not self.info.features:
# Get one example to get the feature types
pipeline = self._get_pipeline_from_tar(tar_paths[0], tar_iterators[0]) | 164 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/webdataset/webdataset.py |
first_examples = list(islice(pipeline, self.NUM_EXAMPLES_FOR_FEATURES_INFERENCE))
if any(example.keys() != first_examples[0].keys() for example in first_examples):
raise ValueError(
"The TAR archives of the dataset should be in WebDataset format, "
"but the files in the archive don't share the same prefix or the same types."
)
pa_tables = [
pa.Table.from_pylist(cast_to_python_objects([example], only_1d_for_numpy=True))
for example in first_examples
]
inferred_arrow_schema = pa.concat_tables(pa_tables, promote_options="default").schema
features = datasets.Features.from_arrow_schema(inferred_arrow_schema) | 164 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/webdataset/webdataset.py |
# Set Image types
for field_name in first_examples[0]:
extension = field_name.rsplit(".", 1)[-1]
if extension in self.IMAGE_EXTENSIONS:
features[field_name] = datasets.Image()
# Set Audio types
for field_name in first_examples[0]:
extension = field_name.rsplit(".", 1)[-1]
if extension in self.AUDIO_EXTENSIONS:
features[field_name] = datasets.Audio()
# Set Video types
for field_name in first_examples[0]:
extension = field_name.rsplit(".", 1)[-1]
if extension in self.VIDEO_EXTENSIONS:
features[field_name] = datasets.Video()
self.info.features = features
return splits | 164 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/webdataset/webdataset.py |
def _generate_examples(self, tar_paths, tar_iterators):
image_field_names = [
field_name for field_name, feature in self.info.features.items() if isinstance(feature, datasets.Image)
]
audio_field_names = [
field_name for field_name, feature in self.info.features.items() if isinstance(feature, datasets.Audio)
]
all_field_names = list(self.info.features.keys())
for tar_idx, (tar_path, tar_iterator) in enumerate(zip(tar_paths, tar_iterators)):
for example_idx, example in enumerate(self._get_pipeline_from_tar(tar_path, tar_iterator)):
for field_name in all_field_names:
if field_name not in example:
example[field_name] = None
for field_name in image_field_names + audio_field_names:
if example[field_name] is not None:
example[field_name] = { | 164 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/webdataset/webdataset.py |
"path": example["__key__"] + "." + field_name,
"bytes": example[field_name],
}
yield f"{tar_idx}_{example_idx}", example | 164 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/webdataset/webdataset.py |
class FolderBasedBuilderConfig(datasets.BuilderConfig):
"""BuilderConfig for AutoFolder."""
features: Optional[datasets.Features] = None
drop_labels: bool = None
drop_metadata: bool = None
def __post_init__(self):
super().__post_init__() | 165 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
class FolderBasedBuilder(datasets.GeneratorBasedBuilder):
"""
Base class for generic data loaders for vision and image data.
Abstract class attributes to be overridden by a child class:
BASE_FEATURE: feature object to decode data (i.e. datasets.Image, datasets.Audio, ...)
BASE_COLUMN_NAME: string key name of a base feature (i.e. "image", "audio", ...)
BUILDER_CONFIG_CLASS: builder config inherited from `folder_based_builder.FolderBasedBuilderConfig`
EXTENSIONS: list of allowed extensions (only files with these extensions and METADATA_FILENAME files
will be included in a dataset)
"""
BASE_FEATURE: Type[FeatureType]
BASE_COLUMN_NAME: str
BUILDER_CONFIG_CLASS: FolderBasedBuilderConfig
EXTENSIONS: List[str]
METADATA_FILENAMES: List[str] = ["metadata.csv", "metadata.jsonl"]
def _info(self):
return datasets.DatasetInfo(features=self.config.features) | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
def _split_generators(self, dl_manager):
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
# Do an early pass if:
# * `drop_labels` is None (default) or False, to infer the class labels
# * `drop_metadata` is None (default) or False, to find the metadata files
do_analyze = not self.config.drop_labels or not self.config.drop_metadata
labels, path_depths = set(), set()
metadata_files = collections.defaultdict(set) | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
def analyze(files_or_archives, downloaded_files_or_dirs, split):
if len(downloaded_files_or_dirs) == 0:
return
# The files are separated from the archives at this point, so check the first sample
# to see if it's a file or a directory and iterate accordingly
if os.path.isfile(downloaded_files_or_dirs[0]):
original_files, downloaded_files = files_or_archives, downloaded_files_or_dirs
for original_file, downloaded_file in zip(original_files, downloaded_files):
original_file, downloaded_file = str(original_file), str(downloaded_file)
_, original_file_ext = os.path.splitext(original_file)
if original_file_ext.lower() in self.EXTENSIONS:
if not self.config.drop_labels:
labels.add(os.path.basename(os.path.dirname(original_file))) | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
path_depths.add(count_path_segments(original_file))
elif os.path.basename(original_file) in self.METADATA_FILENAMES:
metadata_files[split].add((original_file, downloaded_file))
else:
original_file_name = os.path.basename(original_file)
logger.debug(
f"The file '{original_file_name}' was ignored: it is not an image, and is not {self.METADATA_FILENAMES} either."
)
else:
archives, downloaded_dirs = files_or_archives, downloaded_files_or_dirs
for archive, downloaded_dir in zip(archives, downloaded_dirs):
archive, downloaded_dir = str(archive), str(downloaded_dir)
for downloaded_dir_file in dl_manager.iter_files(downloaded_dir):
_, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file) | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
if downloaded_dir_file_ext in self.EXTENSIONS:
if not self.config.drop_labels:
labels.add(os.path.basename(os.path.dirname(downloaded_dir_file)))
path_depths.add(count_path_segments(downloaded_dir_file))
elif os.path.basename(downloaded_dir_file) in self.METADATA_FILENAMES:
metadata_files[split].add((None, downloaded_dir_file))
else:
archive_file_name = os.path.basename(archive)
original_file_name = os.path.basename(downloaded_dir_file)
logger.debug(
f"The file '{original_file_name}' from the archive '{archive_file_name}' was ignored: it is not an {self.BASE_COLUMN_NAME}, and is not {self.METADATA_FILENAMES} either."
) | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
data_files = self.config.data_files
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
files, archives = self._split_files_and_archives(files)
downloaded_files = dl_manager.download(files)
downloaded_dirs = dl_manager.download_and_extract(archives)
if do_analyze: # drop_metadata is None or False, drop_labels is None or False
logger.info(f"Searching for labels and/or metadata files in {split_name} data files...")
analyze(files, downloaded_files, split_name)
analyze(archives, downloaded_dirs, split_name) | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
if metadata_files:
# add metadata if `metadata_files` are found and `drop_metadata` is None (default) or False
add_metadata = not self.config.drop_metadata
# if `metadata_files` are found, add labels only if
# `drop_labels` is set up to False explicitly (not-default behavior)
add_labels = self.config.drop_labels is False
else:
# if `metadata_files` are not found, don't add metadata
add_metadata = False
# if `metadata_files` are not found and `drop_labels` is None (default) -
# add labels if files are on the same level in directory hierarchy and there is more than one label
add_labels = (
(len(labels) > 1 and len(path_depths) == 1)
if self.config.drop_labels is None
else not self.config.drop_labels | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
) | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
if add_labels:
logger.info("Adding the labels inferred from data directories to the dataset's features...")
if add_metadata:
logger.info("Adding metadata to the dataset...")
else:
add_labels, add_metadata, metadata_files = False, False, {}
splits.append(
datasets.SplitGenerator(
name=split_name,
gen_kwargs={
"files": list(zip(files, downloaded_files))
+ [(None, dl_manager.iter_files(downloaded_dir)) for downloaded_dir in downloaded_dirs],
"metadata_files": metadata_files,
"split_name": split_name,
"add_labels": add_labels,
"add_metadata": add_metadata,
},
)
) | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
if add_metadata:
# Verify that:
# * all metadata files have the same set of features
# * the `file_name` key is one of the metadata keys and is of type string
features_per_metadata_file: List[Tuple[str, datasets.Features]] = []
# Check that all metadata files share the same format
metadata_ext = {
os.path.splitext(original_metadata_file)[-1]
for original_metadata_file, _ in itertools.chain.from_iterable(metadata_files.values())
}
if len(metadata_ext) > 1:
raise ValueError(f"Found metadata files with different extensions: {list(metadata_ext)}")
metadata_ext = metadata_ext.pop() | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
for _, downloaded_metadata_file in itertools.chain.from_iterable(metadata_files.values()):
pa_metadata_table = self._read_metadata(downloaded_metadata_file, metadata_ext=metadata_ext)
features_per_metadata_file.append(
(downloaded_metadata_file, datasets.Features.from_arrow_schema(pa_metadata_table.schema))
)
for downloaded_metadata_file, metadata_features in features_per_metadata_file:
if metadata_features != features_per_metadata_file[0][1]:
raise ValueError(
f"Metadata files {downloaded_metadata_file} and {features_per_metadata_file[0][0]} have different features: {features_per_metadata_file[0]} != {metadata_features}"
)
metadata_features = features_per_metadata_file[0][1]
if "file_name" not in metadata_features:
raise ValueError("`file_name` must be present as dictionary key in metadata files") | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
if metadata_features["file_name"] != datasets.Value("string"):
raise ValueError("`file_name` key must be a string")
del metadata_features["file_name"]
else:
metadata_features = None | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
# Normally, we would do this in _info, but we need to know the labels and/or metadata
# before building the features
if self.config.features is None:
if add_labels:
self.info.features = datasets.Features(
{
self.BASE_COLUMN_NAME: self.BASE_FEATURE(),
"label": datasets.ClassLabel(names=sorted(labels)),
}
)
else:
self.info.features = datasets.Features({self.BASE_COLUMN_NAME: self.BASE_FEATURE()}) | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
if add_metadata:
# Warn if there are duplicated keys in metadata compared to the existing features
# (`BASE_COLUMN_NAME`, optionally "label")
duplicated_keys = set(self.info.features) & set(metadata_features)
if duplicated_keys:
logger.warning(
f"Ignoring metadata columns {list(duplicated_keys)} as they are already present in "
f"the features dictionary."
)
# skip metadata duplicated keys
self.info.features.update(
{
feature: metadata_features[feature]
for feature in metadata_features
if feature not in duplicated_keys
}
)
return splits | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
def _split_files_and_archives(self, data_files):
files, archives = [], []
for data_file in data_files:
_, data_file_ext = os.path.splitext(data_file)
if data_file_ext.lower() in self.EXTENSIONS:
files.append(data_file)
elif os.path.basename(data_file) in self.METADATA_FILENAMES:
files.append(data_file)
else:
archives.append(data_file)
return files, archives
def _read_metadata(self, metadata_file, metadata_ext: str = ""):
if metadata_ext == ".csv":
# Use `pd.read_csv` (although slower) instead of `pyarrow.csv.read_csv` for reading CSV files for consistency with the CSV packaged module
return pa.Table.from_pandas(pd.read_csv(metadata_file))
else:
with open(metadata_file, "rb") as f:
return paj.read_json(f) | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
def _generate_examples(self, files, metadata_files, split_name, add_metadata, add_labels):
split_metadata_files = metadata_files.get(split_name, [])
sample_empty_metadata = (
{k: None for k in self.info.features if k != self.BASE_COLUMN_NAME} if self.info.features else {}
)
last_checked_dir = None
metadata_dir = None
metadata_dict = None
downloaded_metadata_file = None
metadata_ext = ""
if split_metadata_files:
metadata_ext = {
os.path.splitext(original_metadata_file)[-1] for original_metadata_file, _ in split_metadata_files
}
metadata_ext = metadata_ext.pop() | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
file_idx = 0
for original_file, downloaded_file_or_dir in files:
if original_file is not None:
_, original_file_ext = os.path.splitext(original_file)
if original_file_ext.lower() in self.EXTENSIONS:
if add_metadata:
# If the file is a file of a needed type, and we've just entered a new directory,
# find the nereast metadata file (by counting path segments) for the directory
current_dir = os.path.dirname(original_file)
if last_checked_dir is None or last_checked_dir != current_dir:
last_checked_dir = current_dir
metadata_file_candidates = [
(
os.path.relpath(original_file, os.path.dirname(metadata_file_candidate)),
metadata_file_candidate, | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
downloaded_metadata_file,
)
for metadata_file_candidate, downloaded_metadata_file in split_metadata_files
if metadata_file_candidate
is not None # ignore metadata_files that are inside archives
and not os.path.relpath(
original_file, os.path.dirname(metadata_file_candidate)
).startswith("..")
]
if metadata_file_candidates:
_, metadata_file, downloaded_metadata_file = min(
metadata_file_candidates, key=lambda x: count_path_segments(x[0])
)
pa_metadata_table = self._read_metadata( | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
downloaded_metadata_file, metadata_ext=metadata_ext
)
pa_file_name_array = pa_metadata_table["file_name"]
pa_metadata_table = pa_metadata_table.drop(["file_name"])
metadata_dir = os.path.dirname(metadata_file)
metadata_dict = {
os.path.normpath(file_name).replace("\\", "/"): sample_metadata
for file_name, sample_metadata in zip(
pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist()
)
}
else:
raise ValueError( | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}."
)
if metadata_dir is not None and downloaded_metadata_file is not None:
file_relpath = os.path.relpath(original_file, metadata_dir)
file_relpath = file_relpath.replace("\\", "/")
if file_relpath not in metadata_dict:
raise ValueError(
f"{self.BASE_COLUMN_NAME} at {file_relpath} doesn't have metadata in {downloaded_metadata_file}."
)
sample_metadata = metadata_dict[file_relpath]
else:
raise ValueError( | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}."
)
else:
sample_metadata = {}
if add_labels:
sample_label = {"label": os.path.basename(os.path.dirname(original_file))}
else:
sample_label = {}
yield (
file_idx,
{
**sample_empty_metadata,
self.BASE_COLUMN_NAME: downloaded_file_or_dir,
**sample_metadata,
**sample_label,
},
)
file_idx += 1
else:
for downloaded_dir_file in downloaded_file_or_dir: | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
_, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file)
if downloaded_dir_file_ext.lower() in self.EXTENSIONS:
if add_metadata:
current_dir = os.path.dirname(downloaded_dir_file)
if last_checked_dir is None or last_checked_dir != current_dir:
last_checked_dir = current_dir
metadata_file_candidates = [
(
os.path.relpath(
downloaded_dir_file, os.path.dirname(downloaded_metadata_file)
),
metadata_file_candidate,
downloaded_metadata_file,
) | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
for metadata_file_candidate, downloaded_metadata_file in split_metadata_files
if metadata_file_candidate
is None # ignore metadata_files that are not inside archives
and not os.path.relpath(
downloaded_dir_file, os.path.dirname(downloaded_metadata_file)
).startswith("..")
]
if metadata_file_candidates:
_, metadata_file, downloaded_metadata_file = min(
metadata_file_candidates, key=lambda x: count_path_segments(x[0])
)
pa_metadata_table = self._read_metadata(
downloaded_metadata_file, metadata_ext=metadata_ext | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
)
pa_file_name_array = pa_metadata_table["file_name"]
pa_metadata_table = pa_metadata_table.drop(["file_name"])
metadata_dir = os.path.dirname(downloaded_metadata_file)
metadata_dict = {
os.path.normpath(file_name).replace("\\", "/"): sample_metadata
for file_name, sample_metadata in zip(
pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist()
)
}
else:
raise ValueError(
f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}." | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
)
if metadata_dir is not None and downloaded_metadata_file is not None:
downloaded_dir_file_relpath = os.path.relpath(downloaded_dir_file, metadata_dir)
downloaded_dir_file_relpath = downloaded_dir_file_relpath.replace("\\", "/")
if downloaded_dir_file_relpath not in metadata_dict:
raise ValueError(
f"{self.BASE_COLUMN_NAME} at {downloaded_dir_file_relpath} doesn't have metadata in {downloaded_metadata_file}."
)
sample_metadata = metadata_dict[downloaded_dir_file_relpath]
else:
raise ValueError( | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}."
)
else:
sample_metadata = {}
if add_labels:
sample_label = {"label": os.path.basename(os.path.dirname(downloaded_dir_file))}
else:
sample_label = {}
yield (
file_idx,
{
**sample_empty_metadata,
self.BASE_COLUMN_NAME: downloaded_dir_file,
**sample_metadata,
**sample_label,
},
)
file_idx += 1 | 166 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py |
class Cache(datasets.ArrowBasedBuilder):
def __init__(
self,
cache_dir: Optional[str] = None,
dataset_name: Optional[str] = None,
config_name: Optional[str] = None,
version: Optional[str] = "0.0.0",
hash: Optional[str] = None,
base_path: Optional[str] = None,
info: Optional[datasets.DatasetInfo] = None,
features: Optional[datasets.Features] = None,
token: Optional[Union[bool, str]] = None,
repo_id: Optional[str] = None,
data_files: Optional[Union[str, list, dict, datasets.data_files.DataFilesDict]] = None,
data_dir: Optional[str] = None,
storage_options: Optional[dict] = None,
writer_batch_size: Optional[int] = None,
**config_kwargs,
):
if repo_id is None and dataset_name is None:
raise ValueError("repo_id or dataset_name is required for the Cache dataset builder")
if data_files is not None:
config_kwargs["data_files"] = data_files | 167 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/cache/cache.py |
if data_dir is not None:
config_kwargs["data_dir"] = data_dir
if hash == "auto" and version == "auto":
config_name, version, hash = _find_hash_in_cache(
dataset_name=repo_id or dataset_name,
config_name=config_name,
cache_dir=cache_dir,
config_kwargs=config_kwargs,
custom_features=features,
)
elif hash == "auto" or version == "auto":
raise NotImplementedError("Pass both hash='auto' and version='auto' instead")
super().__init__(
cache_dir=cache_dir,
dataset_name=dataset_name,
config_name=config_name,
version=version,
hash=hash,
base_path=base_path,
info=info,
token=token,
repo_id=repo_id,
storage_options=storage_options,
writer_batch_size=writer_batch_size,
) | 167 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/cache/cache.py |
def _info(self) -> datasets.DatasetInfo:
return datasets.DatasetInfo()
def download_and_prepare(self, output_dir: Optional[str] = None, *args, **kwargs):
if not os.path.exists(self.cache_dir):
raise ValueError(f"Cache directory for {self.dataset_name} doesn't exist at {self.cache_dir}")
if output_dir is not None and output_dir != self.cache_dir:
shutil.copytree(self.cache_dir, output_dir) | 167 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/cache/cache.py |
def _split_generators(self, dl_manager):
# used to stream from cache
if isinstance(self.info.splits, datasets.SplitDict):
split_infos: List[datasets.SplitInfo] = list(self.info.splits.values())
else:
raise ValueError(f"Missing splits info for {self.dataset_name} in cache directory {self.cache_dir}")
return [
datasets.SplitGenerator(
name=split_info.name,
gen_kwargs={
"files": filenames_for_dataset_split(
self.cache_dir,
dataset_name=self.dataset_name,
split=split_info.name,
filetype_suffix="arrow",
shard_lengths=split_info.shard_lengths,
)
},
)
for split_info in split_infos
] | 167 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/cache/cache.py |
def _generate_tables(self, files):
# used to stream from cache
for file_idx, file in enumerate(files):
with open(file, "rb") as f:
try:
for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", pa_table
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise | 167 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/cache/cache.py |
class ArrowConfig(datasets.BuilderConfig):
"""BuilderConfig for Arrow."""
features: Optional[datasets.Features] = None
def __post_init__(self):
super().__post_init__() | 168 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/arrow/arrow.py |
class Arrow(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = ArrowConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features) | 169 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/arrow/arrow.py |
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
data_files = dl_manager.download_and_extract(self.config.data_files)
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
# Infer features if they are stored in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(files):
with open(file, "rb") as f:
try:
reader = pa.ipc.open_stream(f) | 169 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/arrow/arrow.py |
except (OSError, pa.lib.ArrowInvalid):
reader = pa.ipc.open_file(f)
self.info.features = datasets.Features.from_arrow_schema(reader.schema)
break
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits | 169 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/arrow/arrow.py |
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.info.features.arrow_schema)
return pa_table | 169 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/arrow/arrow.py |
def _generate_tables(self, files):
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
try:
try:
batches = pa.ipc.open_stream(f)
except (OSError, pa.lib.ArrowInvalid):
reader = pa.ipc.open_file(f)
batches = (reader.get_batch(i) for i in range(reader.num_record_batches))
for batch_idx, record_batch in enumerate(batches):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table) | 169 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/arrow/arrow.py |
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise | 169 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/arrow/arrow.py |
class ImageFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""BuilderConfig for ImageFolder."""
drop_labels: bool = None
drop_metadata: bool = None
def __post_init__(self):
super().__post_init__() | 170 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/imagefolder/imagefolder.py |
class ImageFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Image
BASE_COLUMN_NAME = "image"
BUILDER_CONFIG_CLASS = ImageFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script | 171 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/imagefolder/imagefolder.py |
class XmlConfig(datasets.BuilderConfig):
"""BuilderConfig for xml files."""
features: Optional[datasets.Features] = None
encoding: str = "utf-8"
encoding_errors: Optional[str] = None | 172 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/xml/xml.py |
class Xml(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = XmlConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""The `data_files` kwarg in load_dataset() can be a str, List[str], Dict[str,str], or Dict[str,List[str]]. | 173 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/xml/xml.py |
If str or List[str], then the dataset returns only the 'train' split.
If dict, then keys should be from the `datasets.Split` enum.
"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
data_files = dl_manager.download_and_extract(self.config.data_files)
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits | 173 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/xml/xml.py |
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
schema = self.config.features.arrow_schema
if all(not require_storage_cast(feature) for feature in self.config.features.values()):
# cheaper cast
pa_table = pa_table.cast(schema)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, schema)
return pa_table
else:
return pa_table.cast(pa.schema({"xml": pa.string()})) | 173 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/xml/xml.py |
def _generate_tables(self, files):
pa_table_names = list(self.config.features) if self.config.features is not None else ["xml"]
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
# open in text mode, by default translates universal newlines ("\n", "\r\n" and "\r") into "\n"
with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
xml = f.read()
pa_table = pa.Table.from_arrays([pa.array([xml])], names=pa_table_names)
yield file_idx, self._cast_table(pa_table) | 173 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/xml/xml.py |
class GeneratorConfig(datasets.BuilderConfig):
generator: Optional[Callable] = None
gen_kwargs: Optional[dict] = None
features: Optional[datasets.Features] = None
split: datasets.NamedSplit = datasets.Split.TRAIN
def __post_init__(self):
super().__post_init__()
if self.generator is None:
raise ValueError("generator must be specified")
if self.gen_kwargs is None:
self.gen_kwargs = {} | 174 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/generator/generator.py |
class Generator(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = GeneratorConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=self.config.split, gen_kwargs=self.config.gen_kwargs)]
def _generate_examples(self, **gen_kwargs):
for idx, ex in enumerate(self.config.generator(**gen_kwargs)):
yield idx, ex | 175 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/generator/generator.py |
class JsonConfig(datasets.BuilderConfig):
"""BuilderConfig for JSON."""
features: Optional[datasets.Features] = None
encoding: str = "utf-8"
encoding_errors: Optional[str] = None
field: Optional[str] = None
use_threads: bool = True # deprecated
block_size: Optional[int] = None # deprecated
chunksize: int = 10 << 20 # 10MB
newlines_in_values: Optional[bool] = None
def __post_init__(self):
super().__post_init__() | 176 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/json/json.py |
class Json(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = JsonConfig
def _info(self):
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead")
self.config.chunksize = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore."
)
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported")
return datasets.DatasetInfo(features=self.config.features) | 177 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/json/json.py |
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
data_files = dl_manager.download_and_extract(self.config.data_files)
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits | 177 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/json/json.py |
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
type = self.config.features.arrow_schema.field(column_name).type
pa_table = pa_table.append_column(column_name, pa.array([None] * len(pa_table), type=type))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.config.features.arrow_schema)
return pa_table | 177 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/json/json.py |
def _generate_tables(self, files):
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
# If the file is one json object and if we need to look at the items in one specific field
if self.config.field is not None:
with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
dataset = ujson_loads(f.read())
# We keep only the field we are interested in
dataset = dataset[self.config.field]
df = pandas_read_json(io.StringIO(ujson_dumps(dataset)))
if df.columns.tolist() == [0]:
df.columns = list(self.config.features) if self.config.features else ["text"]
pa_table = pa.Table.from_pandas(df, preserve_index=False)
yield file_idx, self._cast_table(pa_table) | 177 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/json/json.py |
# If the file has one json object per line
else:
with open(file, "rb") as f:
batch_idx = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
block_size = max(self.config.chunksize // 32, 16 << 10)
encoding_errors = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
batch = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(f) | 177 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/json/json.py |
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
batch = batch.decode(self.config.encoding, errors=encoding_errors).encode("utf-8")
try:
while True:
try:
pa_table = paj.read_json(
io.BytesIO(batch), read_options=paj.ReadOptions(block_size=block_size)
)
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(e, pa.ArrowInvalid)
and "straddling" not in str(e)
or block_size > len(batch)
):
raise | 177 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/json/json.py |
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"Batch of {len(batch)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}."
)
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
file, encoding=self.config.encoding, errors=self.config.encoding_errors
) as f:
df = pandas_read_json(f)
except ValueError: | 177 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/json/json.py |
logger.error(f"Failed to load JSON from file '{file}' with error {type(e)}: {e}")
raise e
if df.columns.tolist() == [0]:
df.columns = list(self.config.features) if self.config.features else ["text"]
try:
pa_table = pa.Table.from_pandas(df, preserve_index=False)
except pa.ArrowInvalid as e:
logger.error(
f"Failed to convert pandas DataFrame to Arrow Table from file '{file}' with error {type(e)}: {e}"
)
raise ValueError(
f"Failed to convert pandas DataFrame to Arrow Table from file {file}."
) from None
yield file_idx, self._cast_table(pa_table)
break | 177 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/json/json.py |
yield (file_idx, batch_idx), self._cast_table(pa_table)
batch_idx += 1 | 177 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/json/json.py |
class CsvConfig(datasets.BuilderConfig):
"""BuilderConfig for CSV.""" | 178 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/csv/csv.py |
sep: str = ","
delimiter: Optional[str] = None
header: Optional[Union[int, List[int], str]] = "infer"
names: Optional[List[str]] = None
column_names: Optional[List[str]] = None
index_col: Optional[Union[int, str, List[int], List[str]]] = None
usecols: Optional[Union[List[int], List[str]]] = None
prefix: Optional[str] = None
mangle_dupe_cols: bool = True
engine: Optional[Literal["c", "python", "pyarrow"]] = None
converters: Dict[Union[int, str], Callable[[Any], Any]] = None
true_values: Optional[list] = None
false_values: Optional[list] = None
skipinitialspace: bool = False
skiprows: Optional[Union[int, List[int]]] = None
nrows: Optional[int] = None
na_values: Optional[Union[str, List[str]]] = None
keep_default_na: bool = True
na_filter: bool = True
verbose: bool = False
skip_blank_lines: bool = True
thousands: Optional[str] = None
decimal: str = "."
lineterminator: Optional[str] = None
quotechar: str = '"' | 178 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/csv/csv.py |
quoting: int = 0
escapechar: Optional[str] = None
comment: Optional[str] = None
encoding: Optional[str] = None
dialect: Optional[str] = None
error_bad_lines: bool = True
warn_bad_lines: bool = True
skipfooter: int = 0
doublequote: bool = True
memory_map: bool = False
float_precision: Optional[str] = None
chunksize: int = 10_000
features: Optional[datasets.Features] = None
encoding_errors: Optional[str] = "strict"
on_bad_lines: Literal["error", "warn", "skip"] = "error"
date_format: Optional[str] = None | 178 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/csv/csv.py |
def __post_init__(self):
super().__post_init__()
if self.delimiter is not None:
self.sep = self.delimiter
if self.column_names is not None:
self.names = self.column_names | 178 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/csv/csv.py |
@property
def pd_read_csv_kwargs(self):
pd_read_csv_kwargs = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator, | 178 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/csv/csv.py |
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
} | 178 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/csv/csv.py |
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter] | 178 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/csv/csv.py |
# Remove 2.2 deprecated arguments
if datasets.config.PANDAS_VERSION.release >= (2, 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_DEPRECATED_2_2_0_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter):
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs | 178 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/csv/csv.py |
class Csv(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = CsvConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
data_files = dl_manager.download_and_extract(self.config.data_files)
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits | 179 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/csv/csv.py |
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
schema = self.config.features.arrow_schema
if all(not require_storage_cast(feature) for feature in self.config.features.values()):
# cheaper cast
pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, schema)
return pa_table | 179 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/csv/csv.py |
def _generate_tables(self, files):
schema = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
dtype = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(feature) else object
for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
csv_file_reader = pd.read_csv(file, iterator=True, dtype=dtype, **self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(csv_file_reader):
pa_table = pa.Table.from_pandas(df)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") | 179 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/csv/csv.py |
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise | 179 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/csv/csv.py |
class TextConfig(datasets.BuilderConfig):
"""BuilderConfig for text files."""
features: Optional[datasets.Features] = None
encoding: str = "utf-8"
encoding_errors: Optional[str] = None
chunksize: int = 10 << 20 # 10MB
keep_linebreaks: bool = False
sample_by: str = "line" | 180 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/text/text.py |
class Text(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = TextConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""The `data_files` kwarg in load_dataset() can be a str, List[str], Dict[str,str], or Dict[str,List[str]]. | 181 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/text/text.py |
If str or List[str], then the dataset returns only the 'train' split.
If dict, then keys should be from the `datasets.Split` enum.
"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
data_files = dl_manager.download_and_extract(self.config.data_files)
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits | 181 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/text/text.py |
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
schema = self.config.features.arrow_schema
if all(not require_storage_cast(feature) for feature in self.config.features.values()):
# cheaper cast
pa_table = pa_table.cast(schema)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, schema)
return pa_table
else:
return pa_table.cast(pa.schema({"text": pa.string()})) | 181 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/text/text.py |