text
stringlengths 24
253k
| type
stringclasses 1
value | start
int64 67
146k
| end
int64 223
278k
| depth
int64 0
1
| filepath
stringlengths 74
128
| parent_class
stringclasses 1
value | class_index
int64 0
271
|
---|---|---|---|---|---|---|---|
class tracked_list(list):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.last_item = None
def __iter__(self) -> Iterator:
for x in super().__iter__():
self.last_item = x
yield x
self.last_item = None
def __repr__(self) -> str:
if self.last_item is None:
return super().__repr__()
else:
return f"{self.__class__.__name__}(current={self.last_item})" | class_definition | 602 | 1,096 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/track.py | null | 200 |
class TrackedIterableFromGenerator(Iterable):
"""Utility class to create an iterable from a generator function, in order to reset the generator when needed."""
def __init__(self, generator, *args):
super().__init__()
self.generator = generator
self.args = args
self.last_item = None
def __iter__(self):
for x in self.generator(*self.args):
self.last_item = x
yield x
self.last_item = None
def __repr__(self) -> str:
if self.last_item is None:
return super().__repr__()
else:
return f"{self.__class__.__name__}(current={self.last_item})"
def __reduce__(self):
return (self.__class__, (self.generator, *self.args)) | class_definition | 1,099 | 1,855 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/track.py | null | 201 |
class NonMutableDict(dict):
"""Dict where keys can only be added but not modified.
Will raise an error if the user try to overwrite one key. The error message
can be customized during construction. It will be formatted using {key} for
the overwritten key.
"""
def __init__(self, *args, **kwargs):
self._error_msg = kwargs.pop(
"error_msg",
"Try to overwrite existing key: {key}",
)
if kwargs:
raise ValueError("NonMutableDict cannot be initialized with kwargs.")
super().__init__(*args, **kwargs)
def __setitem__(self, key, value):
if key in self:
raise ValueError(self._error_msg.format(key=key))
return super().__setitem__(key, value)
def update(self, other):
if any(k in self for k in other):
raise ValueError(self._error_msg.format(key=set(self) & set(other)))
return super().update(other) | class_definition | 10,782 | 11,735 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/py_utils.py | null | 202 |
class classproperty(property): # pylint: disable=invalid-name
"""Descriptor to be used as decorator for @classmethods."""
def __get__(self, obj, objtype=None):
return self.fget.__get__(None, objtype)() | class_definition | 11,738 | 11,957 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/py_utils.py | null | 203 |
class NestedDataStructure:
def __init__(self, data=None):
self.data = data if data is not None else []
def flatten(self, data=None):
data = data if data is not None else self.data
if isinstance(data, dict):
return self.flatten(list(data.values()))
elif isinstance(data, (list, tuple)):
return [flattened for item in data for flattened in self.flatten(item)]
else:
return [data] | class_definition | 19,811 | 20,273 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/py_utils.py | null | 204 |
class tqdm(old_tqdm):
"""
Class to override `disable` argument in case progress bars are globally disabled.
Taken from https://github.com/tqdm/tqdm/issues/619#issuecomment-619639324.
"""
def __init__(self, *args, **kwargs):
if are_progress_bars_disabled():
kwargs["disable"] = True
super().__init__(*args, **kwargs)
def __delattr__(self, attr: str) -> None:
"""Fix for https://github.com/huggingface/datasets/issues/6066"""
try:
super().__delattr__(attr)
except AttributeError:
if attr != "_lock":
raise | class_definition | 3,488 | 4,110 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/tqdm.py | null | 205 |
class _PatchedModuleObj:
"""Set all the modules components as attributes of the _PatchedModuleObj object."""
def __init__(self, module, attrs=None):
attrs = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__"):
setattr(self, key, getattr(module, key))
self._original_module = module._original_module if isinstance(module, _PatchedModuleObj) else module | class_definition | 103 | 590 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/patching.py | null | 206 |
class patch_submodule:
"""
Patch a submodule attribute of an object, by keeping all other submodules intact at all levels.
Example::
>>> import importlib
>>> from datasets.load import dataset_module_factory
>>> from datasets.streaming import patch_submodule, xjoin
>>>
>>> dataset_module = dataset_module_factory("snli")
>>> snli_module = importlib.import_module(dataset_module.module_path)
>>> patcher = patch_submodule(snli_module, "os.path.join", xjoin)
>>> patcher.start()
>>> assert snli_module.os.path.join is xjoin
"""
_active_patches = []
def __init__(self, obj, target: str, new, attrs=None):
self.obj = obj
self.target = target
self.new = new
self.key = target.split(".")[0]
self.original = {}
self.attrs = attrs or []
def __enter__(self):
*submodules, target_attr = self.target.split(".")
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(submodules)):
try:
submodule = import_module(".".join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
obj_attr = getattr(self.obj, attr)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
isinstance(obj_attr, _PatchedModuleObj) and obj_attr._original_module is submodule
):
self.original[attr] = obj_attr
# patch at top level
setattr(self.obj, attr, _PatchedModuleObj(obj_attr, attrs=self.attrs))
patched = getattr(self.obj, attr)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(patched, key, _PatchedModuleObj(getattr(patched, key, None), attrs=self.attrs))
patched = getattr(patched, key)
# finally set the target attribute
setattr(patched, target_attr, self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
attr_value = getattr(import_module(".".join(submodules)), target_attr)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj, attr) is attr_value:
self.original[attr] = getattr(self.obj, attr)
setattr(self.obj, attr, self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
self.original[target_attr] = globals()["__builtins__"][target_attr]
setattr(self.obj, target_attr, self.new)
else:
raise RuntimeError(f"Tried to patch attribute {target_attr} instead of a submodule.")
def __exit__(self, *exc_info):
for attr in list(self.original):
setattr(self.obj, attr, self.original.pop(attr))
def start(self):
"""Activate a patch."""
self.__enter__()
self._active_patches.append(self)
def stop(self):
"""Stop an active patch."""
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | class_definition | 593 | 4,954 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/patching.py | null | 207 |
class FileLock(FileLock_):
"""
A `filelock.FileLock` initializer that handles long paths.
It also uses the current umask for lock files.
"""
MAX_FILENAME_LENGTH = 255
def __init__(self, lock_file, *args, **kwargs):
# The "mode" argument is required if we want to use the current umask in filelock >= 3.10
# In previous previous it was already using the current umask.
if "mode" not in kwargs and version.parse(_filelock_version) >= version.parse("3.10.0"):
umask = os.umask(0o666)
os.umask(umask)
kwargs["mode"] = 0o666 & ~umask
lock_file = self.hash_filename_if_too_long(lock_file)
super().__init__(lock_file, *args, **kwargs)
@classmethod
def hash_filename_if_too_long(cls, path: str) -> str:
path = os.path.abspath(os.path.expanduser(path))
filename = os.path.basename(path)
max_filename_length = cls.MAX_FILENAME_LENGTH
if issubclass(cls, UnixFileLock):
max_filename_length = min(max_filename_length, os.statvfs(os.path.dirname(path)).f_namemax)
if len(filename) > max_filename_length:
dirname = os.path.dirname(path)
hashed_filename = str(hash(filename))
new_filename = (
filename[: max_filename_length - len(hashed_filename) - 8] + "..." + hashed_filename + ".lock"
)
return os.path.join(dirname, new_filename)
else:
return path | class_definition | 876 | 2,369 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/_filelock.py | null | 208 |
class TqdmCallback(fsspec.callbacks.TqdmCallback):
def __init__(self, tqdm_kwargs=None, *args, **kwargs):
if config.FSSPEC_VERSION < version.parse("2024.2.0"):
super().__init__(tqdm_kwargs, *args, **kwargs)
self._tqdm = _tqdm # replace tqdm module by datasets.utils.tqdm module
else:
kwargs["tqdm_cls"] = _tqdm.tqdm
super().__init__(tqdm_kwargs, *args, **kwargs) | class_definition | 11,994 | 12,425 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/file_utils.py | null | 209 |
class NonStreamableDatasetError(Exception):
pass | class_definition | 18,763 | 18,815 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/file_utils.py | null | 210 |
class xPath(type(Path())):
"""Extension of `pathlib.Path` to support both local paths and remote URLs."""
def __str__(self):
path_str = super().__str__()
main_hop, *rest_hops = path_str.split("::")
if is_local_path(main_hop):
return main_hop
path_as_posix = path_str.replace("\\", "/")
path_as_posix = SINGLE_SLASH_AFTER_PROTOCOL_PATTERN.sub("://", path_as_posix)
path_as_posix += "//" if path_as_posix.endswith(":") else "" # Add slashes to root of the protocol
return path_as_posix
def exists(self, download_config: Optional[DownloadConfig] = None):
"""Extend `pathlib.Path.exists` method to support both local and remote files.
Args:
download_config : mainly use token or storage_options to support different platforms and auth types.
Returns:
`bool`
"""
return xexists(str(self), download_config=download_config)
def glob(self, pattern, download_config: Optional[DownloadConfig] = None):
"""Glob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Args:
pattern (`str`): Pattern that resulting paths must match.
download_config : mainly use token or storage_options to support different platforms and auth types.
Yields:
[`xPath`]
"""
posix_path = self.as_posix()
main_hop, *rest_hops = posix_path.split("::")
if is_local_path(main_hop):
yield from Path(main_hop).glob(pattern)
else:
# globbing inside a zip in a private repo requires authentication
if rest_hops:
urlpath = rest_hops[0]
urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
storage_options = {urlpath.split("://")[0]: storage_options}
posix_path = "::".join([main_hop, urlpath, *rest_hops[1:]])
else:
storage_options = None
fs, *_ = url_to_fs(xjoin(posix_path, pattern), **(storage_options or {}))
globbed_paths = fs.glob(xjoin(main_hop, pattern))
for globbed_path in globbed_paths:
yield type(self)("::".join([f"{fs.protocol}://{globbed_path}"] + rest_hops))
def rglob(self, pattern, **kwargs):
"""Rglob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Args:
pattern (`str`): Pattern that resulting paths must match.
Yields:
[`xPath`]
"""
return self.glob("**/" + pattern, **kwargs)
@property
def parent(self) -> "xPath":
"""Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Returns:
[`xPath`]
"""
return type(self)(xdirname(self.as_posix()))
@property
def name(self) -> str:
"""Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Returns:
`str`
"""
return PurePosixPath(self.as_posix().split("::")[0]).name
@property
def stem(self) -> str:
"""Stem function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Returns:
`str`
"""
return PurePosixPath(self.as_posix().split("::")[0]).stem
@property
def suffix(self) -> str:
"""Suffix function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Returns:
`str`
"""
return PurePosixPath(self.as_posix().split("::")[0]).suffix
def open(self, *args, **kwargs):
"""Extend :func:`xopen` to support argument of type :obj:`~pathlib.Path`.
Args:
**args: Arguments passed to :func:`fsspec.open`.
**kwargs: Keyword arguments passed to :func:`fsspec.open`.
Returns:
`io.FileIO`: File-like object.
"""
return xopen(str(self), *args, **kwargs)
def joinpath(self, *p: Tuple[str, ...]) -> "xPath":
"""Extend :func:`xjoin` to support argument of type :obj:`~pathlib.Path`.
Args:
*p (`tuple` of `str`): Other path components.
Returns:
[`xPath`]
"""
return type(self)(xjoin(self.as_posix(), *p))
def __truediv__(self, p: str) -> "xPath":
return self.joinpath(p)
def with_suffix(self, suffix):
main_hop, *rest_hops = str(self).split("::")
if is_local_path(main_hop):
return type(self)(str(super().with_suffix(suffix)))
return type(self)("::".join([type(self)(PurePosixPath(main_hop).with_suffix(suffix)).as_posix()] + rest_hops)) | class_definition | 40,898 | 45,786 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/file_utils.py | null | 211 |
class ArchiveIterable(TrackedIterableFromGenerator):
"""An iterable of (path, fileobj) from a TAR archive, used by `iter_archive`"""
@staticmethod
def _iter_tar(f):
stream = tarfile.open(fileobj=f, mode="r|*")
for tarinfo in stream:
file_path = tarinfo.name
if not tarinfo.isreg():
continue
if file_path is None:
continue
if os.path.basename(file_path).startswith((".", "__")):
# skipping hidden files
continue
file_obj = stream.extractfile(tarinfo)
yield file_path, file_obj
stream.members = []
del stream
@staticmethod
def _iter_zip(f):
zipf = zipfile.ZipFile(f)
for member in zipf.infolist():
file_path = member.filename
if member.is_dir():
continue
if file_path is None:
continue
if os.path.basename(file_path).startswith((".", "__")):
# skipping hidden files
continue
file_obj = zipf.open(member)
yield file_path, file_obj
@classmethod
def _iter_from_fileobj(cls, f) -> Generator[Tuple, None, None]:
compression = _get_extraction_protocol_with_magic_number(f)
if compression == "zip":
yield from cls._iter_zip(f)
else:
yield from cls._iter_tar(f)
@classmethod
def _iter_from_urlpath(
cls, urlpath: str, download_config: Optional[DownloadConfig] = None
) -> Generator[Tuple, None, None]:
compression = _get_extraction_protocol(urlpath, download_config=download_config)
# Set block_size=0 to get faster streaming
# (e.g. for hf:// and https:// it uses streaming Requests file-like instances)
with xopen(urlpath, "rb", download_config=download_config, block_size=0) as f:
if compression == "zip":
yield from cls._iter_zip(f)
else:
yield from cls._iter_tar(f)
@classmethod
def from_buf(cls, fileobj) -> "ArchiveIterable":
return cls(cls._iter_from_fileobj, fileobj)
@classmethod
def from_urlpath(cls, urlpath_or_buf, download_config: Optional[DownloadConfig] = None) -> "ArchiveIterable":
return cls(cls._iter_from_urlpath, urlpath_or_buf, download_config) | class_definition | 50,356 | 52,759 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/file_utils.py | null | 212 |
class FilesIterable(TrackedIterableFromGenerator):
"""An iterable of paths from a list of directories or files"""
@classmethod
def _iter_from_urlpaths(
cls, urlpaths: Union[str, List[str]], download_config: Optional[DownloadConfig] = None
) -> Generator[str, None, None]:
if not isinstance(urlpaths, list):
urlpaths = [urlpaths]
for urlpath in urlpaths:
if xisfile(urlpath, download_config=download_config):
yield urlpath
elif xisdir(urlpath, download_config=download_config):
for dirpath, dirnames, filenames in xwalk(urlpath, download_config=download_config):
# in-place modification to prune the search
dirnames[:] = sorted([dirname for dirname in dirnames if not dirname.startswith((".", "__"))])
if xbasename(dirpath).startswith((".", "__")):
# skipping hidden directories
continue
for filename in sorted(filenames):
if filename.startswith((".", "__")):
# skipping hidden files
continue
yield xjoin(dirpath, filename)
else:
raise FileNotFoundError(urlpath)
@classmethod
def from_urlpaths(cls, urlpaths, download_config: Optional[DownloadConfig] = None) -> "FilesIterable":
return cls(cls._iter_from_urlpaths, urlpaths, download_config) | class_definition | 52,762 | 54,288 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/file_utils.py | null | 213 |
class Pickler(dill.Pickler):
dispatch = dill._dill.MetaCatchingDict(dill.Pickler.dispatch.copy())
_legacy_no_dict_keys_sorting = False
def save(self, obj, save_persistent_id=True):
obj_type = type(obj)
if obj_type not in self.dispatch:
if "regex" in sys.modules:
import regex # type: ignore
if obj_type is regex.Pattern:
pklregister(obj_type)(_save_regexPattern)
if "spacy" in sys.modules:
import spacy # type: ignore
if issubclass(obj_type, spacy.Language):
pklregister(obj_type)(_save_spacyLanguage)
if "tiktoken" in sys.modules:
import tiktoken # type: ignore
if obj_type is tiktoken.Encoding:
pklregister(obj_type)(_save_tiktokenEncoding)
if "torch" in sys.modules:
import torch # type: ignore
if issubclass(obj_type, torch.Tensor):
pklregister(obj_type)(_save_torchTensor)
if obj_type is torch.Generator:
pklregister(obj_type)(_save_torchGenerator)
# Unwrap `torch.compile`-ed modules
if issubclass(obj_type, torch.nn.Module):
obj = getattr(obj, "_orig_mod", obj)
if "transformers" in sys.modules:
import transformers # type: ignore
if issubclass(obj_type, transformers.PreTrainedTokenizerBase):
pklregister(obj_type)(_save_transformersPreTrainedTokenizerBase)
# Unwrap `torch.compile`-ed functions
if obj_type is FunctionType:
obj = getattr(obj, "_torchdynamo_orig_callable", obj)
dill.Pickler.save(self, obj, save_persistent_id=save_persistent_id)
def _batch_setitems(self, items):
if self._legacy_no_dict_keys_sorting:
return super()._batch_setitems(items)
# Ignore the order of keys in a dict
try:
# Faster, but fails for unorderable elements
items = sorted(items)
except Exception: # TypeError, decimal.InvalidOperation, etc.
from datasets.fingerprint import Hasher
items = sorted(items, key=lambda x: Hasher.hash(x[0]))
dill.Pickler._batch_setitems(self, items)
def memoize(self, obj):
# Don't memoize strings since two identical strings can have different Python ids
if type(obj) is not str: # noqa: E721
dill.Pickler.memoize(self, obj) | class_definition | 847 | 3,422 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/_dill.py | null | 214 |
class ExtractManager:
def __init__(self, cache_dir: Optional[str] = None):
self.extract_dir = (
os.path.join(cache_dir, config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
self.extractor = Extractor
def _get_output_path(self, path: str) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
abs_path = os.path.abspath(path)
return os.path.join(self.extract_dir, hash_url_to_filename(abs_path))
def _do_extract(self, output_path: str, force_extract: bool) -> bool:
return force_extract or (
not os.path.isfile(output_path) and not (os.path.isdir(output_path) and os.listdir(output_path))
)
def extract(self, input_path: str, force_extract: bool = False) -> str:
extractor_format = self.extractor.infer_extractor_format(input_path)
if not extractor_format:
return input_path
output_path = self._get_output_path(input_path)
if self._do_extract(output_path, force_extract):
self.extractor.extract(input_path, output_path, extractor_format)
return output_path | class_definition | 354 | 1,662 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py | null | 215 |
class BaseExtractor(ABC):
@classmethod
@abstractmethod
def is_extractable(cls, path: Union[Path, str], **kwargs) -> bool: ...
@staticmethod
@abstractmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: ... | class_definition | 1,665 | 1,931 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py | null | 216 |
class MagicNumberBaseExtractor(BaseExtractor, ABC):
magic_numbers: List[bytes] = []
@staticmethod
def read_magic_number(path: Union[Path, str], magic_number_length: int):
with open(path, "rb") as f:
return f.read(magic_number_length)
@classmethod
def is_extractable(cls, path: Union[Path, str], magic_number: bytes = b"") -> bool:
if not magic_number:
magic_number_length = max(len(cls_magic_number) for cls_magic_number in cls.magic_numbers)
try:
magic_number = cls.read_magic_number(path, magic_number_length)
except OSError:
return False
return any(magic_number.startswith(cls_magic_number) for cls_magic_number in cls.magic_numbers) | class_definition | 1,934 | 2,696 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py | null | 217 |
class TarExtractor(BaseExtractor):
@classmethod
def is_extractable(cls, path: Union[Path, str], **kwargs) -> bool:
return tarfile.is_tarfile(path)
@staticmethod
def safemembers(members, output_path):
"""
Fix for CVE-2007-4559
Desc:
Directory traversal vulnerability in the (1) extract and (2) extractall functions in the tarfile
module in Python allows user-assisted remote attackers to overwrite arbitrary files via a .. (dot dot)
sequence in filenames in a TAR archive, a related issue to CVE-2001-1267.
See: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2007-4559
From: https://stackoverflow.com/a/10077309
"""
def resolved(path: str) -> str:
return os.path.realpath(os.path.abspath(path))
def badpath(path: str, base: str) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(base, path)).startswith(base)
def badlink(info, base: str) -> bool:
# Links are interpreted relative to the directory containing the link
tip = resolved(os.path.join(base, os.path.dirname(info.name)))
return badpath(info.linkname, base=tip)
base = resolved(output_path)
for finfo in members:
if badpath(finfo.name, base):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)")
elif finfo.issym() and badlink(finfo, base):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}")
elif finfo.islnk() and badlink(finfo, base):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}")
else:
yield finfo
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
os.makedirs(output_path, exist_ok=True)
tar_file = tarfile.open(input_path)
tar_file.extractall(output_path, members=TarExtractor.safemembers(tar_file, output_path))
tar_file.close() | class_definition | 2,699 | 4,848 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py | null | 218 |
class GzipExtractor(MagicNumberBaseExtractor):
magic_numbers = [b"\x1f\x8b"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
with gzip.open(input_path, "rb") as gzip_file:
with open(output_path, "wb") as extracted_file:
shutil.copyfileobj(gzip_file, extracted_file) | class_definition | 4,851 | 5,213 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py | null | 219 |
class ZipExtractor(MagicNumberBaseExtractor):
magic_numbers = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def is_extractable(cls, path: Union[Path, str], magic_number: bytes = b"") -> bool:
if super().is_extractable(path, magic_number=magic_number):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(path, "rb") as fp:
endrec = _EndRecData(fp)
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
data = fp.read(sizeCentralDir) # CD is where we expect it to be
if len(data) == sizeCentralDir:
centdir = struct.unpack(structCentralDir, data) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
os.makedirs(output_path, exist_ok=True)
with zipfile.ZipFile(input_path, "r") as zip_file:
zip_file.extractall(output_path)
zip_file.close() | class_definition | 5,216 | 7,612 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py | null | 220 |
class XzExtractor(MagicNumberBaseExtractor):
magic_numbers = [b"\xfd\x37\x7a\x58\x5a\x00"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
with lzma.open(input_path) as compressed_file:
with open(output_path, "wb") as extracted_file:
shutil.copyfileobj(compressed_file, extracted_file) | class_definition | 7,615 | 7,997 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py | null | 221 |
class RarExtractor(MagicNumberBaseExtractor):
magic_numbers = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile")
import rarfile
os.makedirs(output_path, exist_ok=True)
rf = rarfile.RarFile(input_path)
rf.extractall(output_path)
rf.close() | class_definition | 8,000 | 8,506 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py | null | 222 |
class ZstdExtractor(MagicNumberBaseExtractor):
magic_numbers = [b"\x28\xb5\x2f\xfd"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard")
import zstandard as zstd
dctx = zstd.ZstdDecompressor()
with open(input_path, "rb") as ifh, open(output_path, "wb") as ofh:
dctx.copy_stream(ifh, ofh) | class_definition | 8,509 | 8,995 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py | null | 223 |
class Bzip2Extractor(MagicNumberBaseExtractor):
magic_numbers = [b"\x42\x5a\x68"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
with bz2.open(input_path, "rb") as compressed_file:
with open(output_path, "wb") as extracted_file:
shutil.copyfileobj(compressed_file, extracted_file) | class_definition | 8,998 | 9,376 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py | null | 224 |
class SevenZipExtractor(MagicNumberBaseExtractor):
magic_numbers = [b"\x37\x7a\xbc\xaf\x27\x1c"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr")
import py7zr
os.makedirs(output_path, exist_ok=True)
with py7zr.SevenZipFile(input_path, "r") as archive:
archive.extractall(output_path) | class_definition | 9,379 | 9,856 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py | null | 225 |
class Lz4Extractor(MagicNumberBaseExtractor):
magic_numbers = [b"\x04\x22\x4d\x18"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4")
import lz4.frame
with lz4.frame.open(input_path, "rb") as compressed_file:
with open(output_path, "wb") as extracted_file:
shutil.copyfileobj(compressed_file, extracted_file) | class_definition | 9,859 | 10,364 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py | null | 226 |
class Extractor:
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
extractors: Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": Bzip2Extractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": Lz4Extractor, # <Added version="2.4.0"/>
}
@classmethod
def _get_magic_number_max_length(cls):
return max(
len(extractor_magic_number)
for extractor in cls.extractors.values()
if issubclass(extractor, MagicNumberBaseExtractor)
for extractor_magic_number in extractor.magic_numbers
)
@staticmethod
def _read_magic_number(path: Union[Path, str], magic_number_length: int):
try:
return MagicNumberBaseExtractor.read_magic_number(path, magic_number_length=magic_number_length)
except OSError:
return b""
@classmethod
def is_extractable(cls, path: Union[Path, str], return_extractor: bool = False) -> bool:
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead.",
category=FutureWarning,
)
extractor_format = cls.infer_extractor_format(path)
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def infer_extractor_format(cls, path: Union[Path, str]) -> Optional[str]: # <Added version="2.4.0"/>
magic_number_max_length = cls._get_magic_number_max_length()
magic_number = cls._read_magic_number(path, magic_number_max_length)
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(path, magic_number=magic_number):
return extractor_format
@classmethod
def extract(
cls,
input_path: Union[Path, str],
output_path: Union[Path, str],
extractor_format: str,
) -> None:
os.makedirs(os.path.dirname(output_path), exist_ok=True)
# Prevent parallel extractions
lock_path = str(Path(output_path).with_suffix(".lock"))
with FileLock(lock_path):
shutil.rmtree(output_path, ignore_errors=True)
extractor = cls.extractors[extractor_format]
return extractor.extract(input_path, output_path) | class_definition | 10,367 | 13,038 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py | null | 227 |
class SharedMemoryContext:
# This is a context manager for creating shared memory that ensures cleanup happens even if a process is interrupted
# The process that creates shared memory is always the one responsible for unlinking it in the end
def __init__(self):
self.created_shms = []
self.opened_shms = []
def get_shm(self, name, size, create):
shm = SharedMemory(size=int(size), name=name, create=create)
if create:
# We only unlink the ones we created in this context
self.created_shms.append(shm)
else:
# If we didn't create it, we only close it when done, we don't unlink it
self.opened_shms.append(shm)
return shm
def get_array(self, name, shape, dtype, create):
shm = self.get_shm(name=name, size=np.prod(shape) * np.dtype(dtype).itemsize, create=create)
return np.ndarray(shape, dtype=dtype, buffer=shm.buf)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
for shm in self.created_shms:
shm.close()
shm.unlink()
for shm in self.opened_shms:
shm.close() | class_definition | 8,870 | 10,071 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/tf_utils.py | null | 228 |
class NumpyMultiprocessingGenerator:
def __init__(
self,
dataset,
cols_to_retain,
collate_fn,
collate_fn_args,
columns_to_np_types,
output_signature,
shuffle,
batch_size,
drop_remainder,
num_workers,
):
self.dataset = dataset
self.cols_to_retain = cols_to_retain
self.collate_fn = collate_fn
self.collate_fn_args = collate_fn_args
self.string_columns = [col for col, dtype in columns_to_np_types.items() if dtype is np.str_]
# Strings will be converted to arrays of single unicode chars, so that we can have a constant itemsize
self.columns_to_np_types = {
col: dtype if col not in self.string_columns else np.dtype("U1")
for col, dtype in columns_to_np_types.items()
}
self.output_signature = output_signature
self.shuffle = shuffle
self.batch_size = batch_size
self.drop_remainder = drop_remainder
self.num_workers = num_workers
# Because strings are converted to characters, we need to add one extra dimension to the shape
self.columns_to_ranks = {
col: int(spec.shape.rank) if col not in self.string_columns else int(spec.shape.rank) + 1
for col, spec in output_signature.items()
}
def __iter__(self):
# Make sure we only spawn workers if they have work to do
num_workers = min(self.num_workers, int(ceil(len(self.dataset) / self.batch_size)))
# Do the shuffling in iter so that it's done at the start of each epoch
per_worker_batches, final_batch, final_batch_worker = self.distribute_batches(
self.dataset, self.batch_size, self.drop_remainder, num_workers, self.shuffle
)
ctx = get_context("spawn")
names = []
shape_arrays = []
workers = []
array_ready_events = [ctx.Event() for _ in range(num_workers)]
array_loaded_events = [ctx.Event() for _ in range(num_workers)]
base_args = {
"dataset": self.dataset,
"cols_to_retain": self.cols_to_retain,
"collate_fn": self.collate_fn,
"collate_fn_args": self.collate_fn_args,
"columns_to_np_types": self.columns_to_np_types,
"columns_to_ranks": self.columns_to_ranks,
"string_columns": self.string_columns,
}
with SharedMemoryContext() as shm_ctx:
for i in range(num_workers):
worker_random_id = str(uuid4())
worker_name = f"dw_{i}_{worker_random_id}"[:10]
names.append(worker_name)
worker_shape_arrays = {
col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=True)
for col, rank in self.columns_to_ranks.items()
}
shape_arrays.append(worker_shape_arrays)
worker_indices = per_worker_batches[i]
if i == final_batch_worker and final_batch is not None:
final_batch_arg = final_batch
else:
final_batch_arg = None
worker_kwargs = {
"worker_name": worker_name,
"indices": worker_indices,
"extra_batch": final_batch_arg,
"array_ready_event": array_ready_events[i],
"array_loaded_event": array_loaded_events[i],
**base_args,
}
worker = ctx.Process(target=self.worker_loop, kwargs=worker_kwargs, daemon=True)
worker.start()
workers.append(worker)
end_signal_received = False
while not end_signal_received:
for i in range(num_workers):
if not array_ready_events[i].wait(timeout=60):
raise TimeoutError("Data loading worker timed out!")
array_ready_events[i].clear()
array_shapes = shape_arrays[i]
if any(np.any(shape < 0) for shape in array_shapes.values()):
# Child processes send negative array shapes to indicate
# that no more data is going to be sent
end_signal_received = True
break
# Matt: Because array shapes are variable we recreate the shared memory each iteration.
# I suspect repeatedly opening lots of shared memory is the bottleneck for the parent process.
# A future optimization, at the cost of some code complexity, could be to reuse shared memory
# between iterations, but this would require knowing in advance the maximum size, or having
# a system to only create a new memory block when a new maximum size is seen.
# Another potential optimization would be to figure out which memory copies are necessary,
# or whether we can yield objects straight out of shared memory.
with SharedMemoryContext() as batch_shm_ctx:
# This memory context only lasts long enough to copy everything out of the batch
arrays = {
col: batch_shm_ctx.get_array(
f"{names[i]}_{col}",
shape=shape,
dtype=self.columns_to_np_types[col],
create=False,
)
for col, shape in array_shapes.items()
}
# Copy everything out of shm because the memory
# will be unlinked by the child process at some point
arrays = {col: np.copy(arr) for col, arr in arrays.items()}
# Now we convert any unicode char arrays to strings
for string_col in self.string_columns:
arrays[string_col] = (
arrays[string_col].view(f"U{arrays[string_col].shape[-1]}").squeeze(-1)
)
yield arrays
array_loaded_events[i].set()
# Now we just do some cleanup
# Shared memory is cleaned up by the context manager, so we just make sure workers finish
for worker in workers:
worker.join()
def __call__(self):
return self
@staticmethod
def worker_loop(
dataset,
cols_to_retain,
collate_fn,
collate_fn_args,
columns_to_np_types,
columns_to_ranks,
string_columns,
indices,
extra_batch,
worker_name,
array_ready_event,
array_loaded_event,
):
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
if config.TF_AVAILABLE:
import tensorflow as tf
else:
raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
tf.config.set_visible_devices([], "GPU") # Make sure workers don't try to allocate GPU memory
def send_batch_to_parent(indices):
batch = np_get_batch(
indices=indices,
dataset=dataset,
cols_to_retain=cols_to_retain,
collate_fn=collate_fn,
collate_fn_args=collate_fn_args,
columns_to_np_types=columns_to_np_types,
return_dict=True,
)
# Now begins the fun part where we start shovelling shared memory at the parent process
out_arrays = {}
with SharedMemoryContext() as batch_shm_ctx:
# The batch shared memory context exists only as long as it takes for the parent process
# to read everything, after which it cleans everything up again
for col, cast_dtype in columns_to_np_types.items():
# Everything has to be np.array for this to work, even if the collate_fn is giving us tf.Tensor
array = batch[col]
if col in string_columns:
# We can't send unicode arrays over shared memory, so we convert to single chars ("U1")
# which have a fixed width of 4 bytes. The parent process will convert these back to strings.
array = array.view("U1").reshape(array.shape + (-1,))
shape_arrays[col][:] = array.shape
out_arrays[col] = batch_shm_ctx.get_array(
f"{worker_name}_{col}", shape=array.shape, dtype=cast_dtype, create=True
)
out_arrays[col][:] = array
array_ready_event.set()
array_loaded_event.wait()
array_loaded_event.clear()
with SharedMemoryContext() as shm_ctx:
shape_arrays = {
col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=False)
for col, rank in columns_to_ranks.items()
}
for batch in indices:
send_batch_to_parent(batch)
if extra_batch is not None:
send_batch_to_parent(extra_batch)
# Now we send a batsignal to the parent process that we're done
for col, array in shape_arrays.items():
array[:] = -1
array_ready_event.set()
@staticmethod
def distribute_batches(dataset, batch_size, drop_remainder, num_workers, shuffle):
indices = np.arange(len(dataset))
if shuffle:
np.random.shuffle(indices)
num_samples = len(indices)
# We distribute the batches so that reading from the workers in round-robin order yields the exact
# order specified in indices. This is only important when shuffle is False, but we do it regardless.
incomplete_batch_cutoff = num_samples - (num_samples % batch_size)
indices, last_incomplete_batch = np.split(indices, [incomplete_batch_cutoff])
if drop_remainder or len(last_incomplete_batch) == 0:
last_incomplete_batch = None
indices = indices.reshape(-1, batch_size)
num_batches = len(indices)
final_batches_cutoff = num_batches - (num_batches % num_workers)
indices, final_batches = np.split(indices, [final_batches_cutoff])
indices = indices.reshape(-1, num_workers, batch_size)
per_worker_indices = np.split(indices, indices.shape[1], axis=1)
per_worker_indices = [np.squeeze(worker_indices, 1) for worker_indices in per_worker_indices]
# Distribute the final batches to the first workers
for i in range(len(final_batches)):
# len(final_batches) can be zero, and is always less than num_workers
per_worker_indices[i] = np.concatenate([per_worker_indices[i], final_batches[i].reshape(1, -1)], axis=0)
# Add the last incomplete batch to the next worker, which might be the first worker
if last_incomplete_batch is not None:
incomplete_batch_worker_idx = len(final_batches)
else:
incomplete_batch_worker_idx = None
return per_worker_indices, last_incomplete_batch, incomplete_batch_worker_idx | class_definition | 10,074 | 21,655 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/tf_utils.py | null | 229 |
class VerificationMode(enum.Enum):
"""`Enum` that specifies which verification checks to run.
The default mode is `BASIC_CHECKS`, which will perform only rudimentary checks to avoid slowdowns
when generating/downloading a dataset for the first time.
The verification modes:
| | Verification checks |
|---------------------------|------------------------------------------------------------------------------ |
| `ALL_CHECKS` | Split checks, uniqueness of the keys yielded in case of the GeneratorBuilder |
| | and the validity (number of files, checksums, etc.) of downloaded files |
| `BASIC_CHECKS` (default) | Same as `ALL_CHECKS` but without checking downloaded files |
| `NO_CHECKS` | None |
"""
ALL_CHECKS = "all_checks"
BASIC_CHECKS = "basic_checks"
NO_CHECKS = "no_checks" | class_definition | 412 | 1,490 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/info_utils.py | null | 230 |
class OnAccess(enum.EnumMeta):
"""
Enum metaclass that calls a user-specified function whenever a member is accessed.
"""
def __getattribute__(cls, name):
obj = super().__getattribute__(name)
if isinstance(obj, enum.Enum) and obj._on_access:
obj._on_access()
return obj
def __getitem__(cls, name):
member = super().__getitem__(name)
if member._on_access:
member._on_access()
return member
def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1):
obj = super().__call__(value, names, module=module, qualname=qualname, type=type, start=start)
if isinstance(obj, enum.Enum) and obj._on_access:
obj._on_access()
return obj | class_definition | 1,958 | 2,743 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/deprecation_utils.py | null | 231 |
class DeprecatedEnum(enum.Enum, metaclass=OnAccess):
"""
Enum class that calls `deprecate` method whenever a member is accessed.
"""
def __new__(cls, value):
member = object.__new__(cls)
member._value_ = value
member._on_access = member.deprecate
return member
@property
def help_message(self):
return ""
def deprecate(self):
help_message = f" {self.help_message}" if self.help_message else ""
warnings.warn(
f"'{self.__objclass__.__name__}' is deprecated and will be removed in the next major version of datasets."
+ help_message,
FutureWarning,
stacklevel=3,
) | class_definition | 2,746 | 3,451 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/deprecation_utils.py | null | 232 |
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (`bool`, defaults to `False`):
If `True`, re-dowload the file even if it's already cached in
the cache dir.
resume_download (`bool`, defaults to `False`):
If `True`, resume the download if an incompletely received file is
found.
proxies (`dict`, *optional*):
user_agent (`str`, *optional*):
Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (`bool`, defaults to `False`):
If `True` and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (`bool`, defaults to `False`):
If `True` when `extract_compressed_file` is `True` and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (`bool`, defaults to `False`):
Whether to delete (or keep) the extracted files.
extract_on_the_fly (`bool`, defaults to `False`):
If `True`, extract compressed files while they are being read.
use_etag (`bool`, defaults to `True`):
Whether to use the ETag HTTP response header to validate the cached files.
num_proc (`int`, *optional*):
The number of processes to launch to download the files in parallel.
max_retries (`int`, default to `1`):
The number of times to retry an HTTP request if it fails.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the dataset file-system backend, if any.
download_desc (`str`, *optional*):
A description to be displayed alongside with the progress bar while downloading the files.
disable_tqdm (`bool`, defaults to `False`):
Whether to disable the individual files download progress bar
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
extract_on_the_fly: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
token: Optional[Union[str, bool]] = None
storage_options: Dict[str, Any] = field(default_factory=dict)
download_desc: Optional[str] = None
disable_tqdm: bool = False
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
def __setattr__(self, name, value):
if name == "token" and getattr(self, "storage_options", None) is not None:
if "hf" not in self.storage_options:
self.storage_options["hf"] = {"token": value, "endpoint": config.HF_ENDPOINT}
elif getattr(self.storage_options["hf"], "token", None) is None:
self.storage_options["hf"]["token"] = value
super().__setattr__(name, value) | class_definition | 160 | 3,800 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_config.py | null | 233 |
class DownloadMode(enum.Enum):
"""`Enum` for how to treat pre-existing downloads and data.
The default mode is `REUSE_DATASET_IF_EXISTS`, which will reuse both
raw downloads and the prepared dataset if they exist.
The generations modes:
| | Downloads | Dataset |
|-------------------------------------|-----------|---------|
| `REUSE_DATASET_IF_EXISTS` (default) | Reuse | Reuse |
| `REUSE_CACHE_IF_EXISTS` | Reuse | Fresh |
| `FORCE_REDOWNLOAD` | Fresh | Fresh |
"""
REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists"
REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists"
FORCE_REDOWNLOAD = "force_redownload" | class_definition | 1,430 | 2,175 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_manager.py | null | 234 |
class DownloadManager:
is_streaming = False
def __init__(
self,
dataset_name: Optional[str] = None,
data_dir: Optional[str] = None,
download_config: Optional[DownloadConfig] = None,
base_path: Optional[str] = None,
record_checksums=True,
):
"""Download manager constructor.
Args:
data_dir:
can be used to specify a manual directory to get the files from.
dataset_name (`str`):
name of dataset this instance will be used for. If
provided, downloads will contain which datasets they were used for.
download_config (`DownloadConfig`):
to specify the cache directory and other
download options
base_path (`str`):
base path that is used when relative paths are used to
download files. This can be a remote url.
record_checksums (`bool`, defaults to `True`):
Whether to record the checksums of the downloaded files. If None, the value is inferred from the builder.
"""
self._dataset_name = dataset_name
self._data_dir = data_dir
self._base_path = base_path or os.path.abspath(".")
# To record what is being used: {url: {num_bytes: int, checksum: str}}
self._recorded_sizes_checksums: Dict[str, Dict[str, Optional[Union[int, str]]]] = {}
self.record_checksums = record_checksums
self.download_config = download_config or DownloadConfig()
self.downloaded_paths = {}
self.extracted_paths = {}
@property
def manual_dir(self):
return self._data_dir
@property
def downloaded_size(self):
"""Returns the total size of downloaded files."""
return sum(checksums_dict["num_bytes"] for checksums_dict in self._recorded_sizes_checksums.values())
def _record_sizes_checksums(self, url_or_urls: NestedDataStructure, downloaded_path_or_paths: NestedDataStructure):
"""Record size/checksum of downloaded files."""
delay = 5
for url, path in hf_tqdm(
list(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten())),
delay=delay,
desc="Computing checksums",
):
# call str to support PathLike objects
self._recorded_sizes_checksums[str(url)] = get_size_checksum_dict(
path, record_checksum=self.record_checksums
)
def download(self, url_or_urls):
"""Download given URL(s).
By default, only one process is used for download. Pass customized `download_config.num_proc` to change this behavior.
Args:
url_or_urls (`str` or `list` or `dict`):
URL or `list` or `dict` of URLs to download. Each URL is a `str`.
Returns:
`str` or `list` or `dict`:
The downloaded paths matching the given input `url_or_urls`.
Example:
```py
>>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
```
"""
download_config = self.download_config.copy()
download_config.extract_compressed_file = False
if download_config.download_desc is None:
download_config.download_desc = "Downloading data"
download_func = partial(self._download_batched, download_config=download_config)
start_time = datetime.now()
with stack_multiprocessing_download_progress_bars():
downloaded_path_or_paths = map_nested(
download_func,
url_or_urls,
map_tuple=True,
num_proc=download_config.num_proc,
desc="Downloading data files",
batched=True,
batch_size=-1,
)
duration = datetime.now() - start_time
logger.info(f"Downloading took {duration.total_seconds() // 60} min")
url_or_urls = NestedDataStructure(url_or_urls)
downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths)
self.downloaded_paths.update(dict(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten())))
start_time = datetime.now()
self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths)
duration = datetime.now() - start_time
logger.info(f"Checksum Computation took {duration.total_seconds() // 60} min")
return downloaded_path_or_paths.data
def _download_batched(
self,
url_or_filenames: List[str],
download_config: DownloadConfig,
) -> List[str]:
if len(url_or_filenames) >= 16:
download_config = download_config.copy()
download_config.disable_tqdm = True
download_func = partial(self._download_single, download_config=download_config)
fs: fsspec.AbstractFileSystem
path = str(url_or_filenames[0])
if is_relative_path(path):
# append the relative path to the base_path
path = url_or_path_join(self._base_path, path)
fs, path = url_to_fs(path, **download_config.storage_options)
size = 0
try:
size = fs.info(path).get("size", 0)
except Exception:
pass
max_workers = (
config.HF_DATASETS_MULTITHREADING_MAX_WORKERS if size < (20 << 20) else 1
) # enable multithreading if files are small
return thread_map(
download_func,
url_or_filenames,
desc=download_config.download_desc or "Downloading",
unit="files",
position=multiprocessing.current_process()._identity[-1] # contains the ranks of subprocesses
if os.environ.get("HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS") == "1"
and multiprocessing.current_process()._identity
else None,
max_workers=max_workers,
tqdm_class=tqdm,
)
else:
return [
self._download_single(url_or_filename, download_config=download_config)
for url_or_filename in url_or_filenames
]
def _download_single(self, url_or_filename: str, download_config: DownloadConfig) -> str:
url_or_filename = str(url_or_filename)
if is_relative_path(url_or_filename):
# append the relative path to the base_path
url_or_filename = url_or_path_join(self._base_path, url_or_filename)
out = cached_path(url_or_filename, download_config=download_config)
out = tracked_str(out)
out.set_origin(url_or_filename)
return out
def iter_archive(self, path_or_buf: Union[str, io.BufferedReader]):
"""Iterate over files within an archive.
Args:
path_or_buf (`str` or `io.BufferedReader`):
Archive path or archive binary file object.
Yields:
`tuple[str, io.BufferedReader]`:
2-tuple (path_within_archive, file_object).
File object is opened in binary mode.
Example:
```py
>>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
>>> files = dl_manager.iter_archive(archive)
```
"""
if hasattr(path_or_buf, "read"):
return ArchiveIterable.from_buf(path_or_buf)
else:
return ArchiveIterable.from_urlpath(path_or_buf)
def iter_files(self, paths: Union[str, List[str]]):
"""Iterate over file paths.
Args:
paths (`str` or `list` of `str`):
Root paths.
Yields:
`str`: File path.
Example:
```py
>>> files = dl_manager.download_and_extract('https://huggingface.co./datasets/beans/resolve/main/data/train.zip')
>>> files = dl_manager.iter_files(files)
```
"""
return FilesIterable.from_urlpaths(paths)
def extract(self, path_or_paths):
"""Extract given path(s).
Args:
path_or_paths (path or `list` or `dict`):
Path of file to extract. Each path is a `str`.
Returns:
extracted_path(s): `str`, The extracted paths matching the given input
path_or_paths.
Example:
```py
>>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
>>> extracted_files = dl_manager.extract(downloaded_files)
```
"""
download_config = self.download_config.copy()
download_config.extract_compressed_file = True
extract_func = partial(self._download_single, download_config=download_config)
extracted_paths = map_nested(
extract_func,
path_or_paths,
num_proc=download_config.num_proc,
desc="Extracting data files",
)
path_or_paths = NestedDataStructure(path_or_paths)
extracted_paths = NestedDataStructure(extracted_paths)
self.extracted_paths.update(dict(zip(path_or_paths.flatten(), extracted_paths.flatten())))
return extracted_paths.data
def download_and_extract(self, url_or_urls):
"""Download and extract given `url_or_urls`.
Is roughly equivalent to:
```
extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))
```
Args:
url_or_urls (`str` or `list` or `dict`):
URL or `list` or `dict` of URLs to download and extract. Each URL is a `str`.
Returns:
extracted_path(s): `str`, extracted paths of given URL(s).
"""
return self.extract(self.download(url_or_urls))
def get_recorded_sizes_checksums(self):
return self._recorded_sizes_checksums.copy()
def delete_extracted_files(self):
paths_to_delete = set(self.extracted_paths.values()) - set(self.downloaded_paths.values())
for key, path in list(self.extracted_paths.items()):
if path in paths_to_delete and os.path.isfile(path):
os.remove(path)
del self.extracted_paths[key]
def manage_extracted_files(self):
if self.download_config.delete_extracted:
self.delete_extracted_files() | class_definition | 2,178 | 12,773 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_manager.py | null | 235 |
class StreamingDownloadManager:
"""
Download manager that uses the "::" separator to navigate through (possibly remote) compressed archives.
Contrary to the regular `DownloadManager`, the `download` and `extract` methods don't actually download nor extract
data, but they rather return the path or url that could be opened using the `xopen` function which extends the
built-in `open` function to stream data from remote files.
"""
is_streaming = True
def __init__(
self,
dataset_name: Optional[str] = None,
data_dir: Optional[str] = None,
download_config: Optional[DownloadConfig] = None,
base_path: Optional[str] = None,
):
self._dataset_name = dataset_name
self._data_dir = data_dir
self._base_path = base_path or os.path.abspath(".")
self.download_config = download_config or DownloadConfig()
self.downloaded_size = None
self.record_checksums = False
@property
def manual_dir(self):
return self._data_dir
def download(self, url_or_urls):
"""Normalize URL(s) of files to stream data from.
This is the lazy version of `DownloadManager.download` for streaming.
Args:
url_or_urls (`str` or `list` or `dict`):
URL(s) of files to stream data from. Each url is a `str`.
Returns:
url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input url_or_urls.
Example:
```py
>>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
```
"""
url_or_urls = map_nested(self._download_single, url_or_urls, map_tuple=True)
return url_or_urls
def _download_single(self, urlpath: str) -> str:
urlpath = str(urlpath)
if is_relative_path(urlpath):
# append the relative path to the base_path
urlpath = url_or_path_join(self._base_path, urlpath)
return urlpath
def extract(self, url_or_urls):
"""Add extraction protocol for given url(s) for streaming.
This is the lazy version of `DownloadManager.extract` for streaming.
Args:
url_or_urls (`str` or `list` or `dict`):
URL(s) of files to stream data from. Each url is a `str`.
Returns:
url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`.
Example:
```py
>>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
>>> extracted_files = dl_manager.extract(downloaded_files)
```
"""
urlpaths = map_nested(self._extract, url_or_urls, map_tuple=True)
return urlpaths
def _extract(self, urlpath: str) -> str:
urlpath = str(urlpath)
protocol = _get_extraction_protocol(urlpath, download_config=self.download_config)
# get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz
path = urlpath.split("::")[0]
extension = _get_path_extension(path)
if extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")):
raise NotImplementedError(
f"Extraction protocol for TAR archives like '{urlpath}' is not implemented in streaming mode. "
f"Please use `dl_manager.iter_archive` instead.\n\n"
f"Example usage:\n\n"
f"\turl = dl_manager.download(url)\n"
f"\ttar_archive_iterator = dl_manager.iter_archive(url)\n\n"
f"\tfor filename, file in tar_archive_iterator:\n"
f"\t\t..."
)
if protocol is None:
# no extraction
return urlpath
elif protocol in SINGLE_FILE_COMPRESSION_PROTOCOLS:
# there is one single file which is the uncompressed file
inner_file = os.path.basename(urlpath.split("::")[0])
inner_file = inner_file[: inner_file.rindex(".")] if "." in inner_file else inner_file
return f"{protocol}://{inner_file}::{urlpath}"
else:
return f"{protocol}://::{urlpath}"
def download_and_extract(self, url_or_urls):
"""Prepare given `url_or_urls` for streaming (add extraction protocol).
This is the lazy version of `DownloadManager.download_and_extract` for streaming.
Is equivalent to:
```
urls = dl_manager.extract(dl_manager.download(url_or_urls))
```
Args:
url_or_urls (`str` or `list` or `dict`):
URL(s) to stream from data from. Each url is a `str`.
Returns:
url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`.
"""
return self.extract(self.download(url_or_urls))
def iter_archive(self, urlpath_or_buf: Union[str, io.BufferedReader]) -> Iterable[Tuple]:
"""Iterate over files within an archive.
Args:
urlpath_or_buf (`str` or `io.BufferedReader`):
Archive path or archive binary file object.
Yields:
`tuple[str, io.BufferedReader]`:
2-tuple (path_within_archive, file_object).
File object is opened in binary mode.
Example:
```py
>>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
>>> files = dl_manager.iter_archive(archive)
```
"""
if hasattr(urlpath_or_buf, "read"):
return ArchiveIterable.from_buf(urlpath_or_buf)
else:
return ArchiveIterable.from_urlpath(urlpath_or_buf, download_config=self.download_config)
def iter_files(self, urlpaths: Union[str, List[str]]) -> Iterable[str]:
"""Iterate over files.
Args:
urlpaths (`str` or `list` of `str`):
Root paths.
Yields:
str: File URL path.
Example:
```py
>>> files = dl_manager.download_and_extract('https://huggingface.co./datasets/beans/resolve/main/data/train.zip')
>>> files = dl_manager.iter_files(files)
```
"""
return FilesIterable.from_urlpaths(urlpaths, download_config=self.download_config)
def manage_extracted_files(self):
pass
def get_recorded_sizes_checksums(self):
pass | class_definition | 891 | 7,522 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/streaming_download_manager.py | null | 236 |
class PolarsArrowExtractor(BaseArrowExtractor["pl.DataFrame", "pl.Series", "pl.DataFrame"]):
def extract_row(self, pa_table: pa.Table) -> "pl.DataFrame":
if config.POLARS_AVAILABLE:
if "polars" not in sys.modules:
import polars
else:
polars = sys.modules["polars"]
return polars.from_arrow(pa_table.slice(length=1))
else:
raise ValueError("Polars needs to be installed to be able to return Polars dataframes.")
def extract_column(self, pa_table: pa.Table) -> "pl.Series":
if config.POLARS_AVAILABLE:
if "polars" not in sys.modules:
import polars
else:
polars = sys.modules["polars"]
return polars.from_arrow(pa_table.select([0]))[pa_table.column_names[0]]
else:
raise ValueError("Polars needs to be installed to be able to return Polars dataframes.")
def extract_batch(self, pa_table: pa.Table) -> "pl.DataFrame":
if config.POLARS_AVAILABLE:
if "polars" not in sys.modules:
import polars
else:
polars = sys.modules["polars"]
return polars.from_arrow(pa_table)
else:
raise ValueError("Polars needs to be installed to be able to return Polars dataframes.") | class_definition | 998 | 2,357 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/polars_formatter.py | null | 237 |
class PolarsFeaturesDecoder:
def __init__(self, features: Optional[Features]):
self.features = features
import polars as pl # noqa: F401 - import pl at initialization
def decode_row(self, row: "pl.DataFrame") -> "pl.DataFrame":
decode = (
{
column_name: no_op_if_value_is_null(partial(decode_nested_example, feature))
for column_name, feature in self.features.items()
if self.features._column_requires_decoding[column_name]
}
if self.features
else {}
)
if decode:
row[list(decode.keys())] = row.map_rows(decode)
return row
def decode_column(self, column: "pl.Series", column_name: str) -> "pl.Series":
decode = (
no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name]))
if self.features and column_name in self.features and self.features._column_requires_decoding[column_name]
else None
)
if decode:
column = column.map_elements(decode)
return column
def decode_batch(self, batch: "pl.DataFrame") -> "pl.DataFrame":
return self.decode_row(batch) | class_definition | 2,360 | 3,595 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/polars_formatter.py | null | 238 |
class PolarsFormatter(TensorFormatter[Mapping, "pl.DataFrame", Mapping]):
def __init__(self, features=None, **np_array_kwargs):
super().__init__(features=features)
self.np_array_kwargs = np_array_kwargs
self.polars_arrow_extractor = PolarsArrowExtractor
self.polars_features_decoder = PolarsFeaturesDecoder(features)
import polars as pl # noqa: F401 - import pl at initialization
def format_row(self, pa_table: pa.Table) -> "pl.DataFrame":
row = self.polars_arrow_extractor().extract_row(pa_table)
row = self.polars_features_decoder.decode_row(row)
return row
def format_column(self, pa_table: pa.Table) -> "pl.Series":
column = self.polars_arrow_extractor().extract_column(pa_table)
column = self.polars_features_decoder.decode_column(column, pa_table.column_names[0])
return column
def format_batch(self, pa_table: pa.Table) -> "pl.DataFrame":
row = self.polars_arrow_extractor().extract_batch(pa_table)
row = self.polars_features_decoder.decode_batch(row)
return row | class_definition | 3,598 | 4,699 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/polars_formatter.py | null | 239 |
class BaseArrowExtractor(Generic[RowFormat, ColumnFormat, BatchFormat]):
"""
Arrow extractor are used to extract data from pyarrow tables.
It makes it possible to extract rows, columns and batches.
These three extractions types have to be implemented.
"""
def extract_row(self, pa_table: pa.Table) -> RowFormat:
raise NotImplementedError
def extract_column(self, pa_table: pa.Table) -> ColumnFormat:
raise NotImplementedError
def extract_batch(self, pa_table: pa.Table) -> BatchFormat:
raise NotImplementedError | class_definition | 3,964 | 4,534 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py | null | 240 |
class SimpleArrowExtractor(BaseArrowExtractor[pa.Table, pa.Array, pa.Table]):
def extract_row(self, pa_table: pa.Table) -> pa.Table:
return pa_table
def extract_column(self, pa_table: pa.Table) -> pa.Array:
return pa_table.column(0)
def extract_batch(self, pa_table: pa.Table) -> pa.Table:
return pa_table | class_definition | 4,727 | 5,070 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py | null | 241 |
class PythonArrowExtractor(BaseArrowExtractor[dict, list, dict]):
def extract_row(self, pa_table: pa.Table) -> dict:
return _unnest(pa_table.to_pydict())
def extract_column(self, pa_table: pa.Table) -> list:
return pa_table.column(0).to_pylist()
def extract_batch(self, pa_table: pa.Table) -> dict:
return pa_table.to_pydict() | class_definition | 5,073 | 5,437 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py | null | 242 |
class NumpyArrowExtractor(BaseArrowExtractor[dict, np.ndarray, dict]):
def __init__(self, **np_array_kwargs):
self.np_array_kwargs = np_array_kwargs
def extract_row(self, pa_table: pa.Table) -> dict:
return _unnest(self.extract_batch(pa_table))
def extract_column(self, pa_table: pa.Table) -> np.ndarray:
return self._arrow_array_to_numpy(pa_table[pa_table.column_names[0]])
def extract_batch(self, pa_table: pa.Table) -> dict:
return {col: self._arrow_array_to_numpy(pa_table[col]) for col in pa_table.column_names}
def _arrow_array_to_numpy(self, pa_array: pa.Array) -> np.ndarray:
if isinstance(pa_array, pa.ChunkedArray):
if isinstance(pa_array.type, _ArrayXDExtensionType):
# don't call to_pylist() to preserve dtype of the fixed-size array
zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True)
array: List = [
row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only)
]
else:
zero_copy_only = _is_zero_copy_only(pa_array.type) and all(
not _is_array_with_nulls(chunk) for chunk in pa_array.chunks
)
array: List = [
row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only)
]
else:
if isinstance(pa_array.type, _ArrayXDExtensionType):
# don't call to_pylist() to preserve dtype of the fixed-size array
zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True)
array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only)
else:
zero_copy_only = _is_zero_copy_only(pa_array.type) and not _is_array_with_nulls(pa_array)
array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only).tolist()
if len(array) > 0:
if any(
(isinstance(x, np.ndarray) and (x.dtype == object or x.shape != array[0].shape))
or (isinstance(x, float) and np.isnan(x))
for x in array
):
if np.lib.NumpyVersion(np.__version__) >= "2.0.0b1":
return np.asarray(array, dtype=object)
return np.array(array, copy=False, dtype=object)
if np.lib.NumpyVersion(np.__version__) >= "2.0.0b1":
return np.asarray(array)
else:
return np.array(array, copy=False) | class_definition | 5,440 | 8,032 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py | null | 243 |
class PandasArrowExtractor(BaseArrowExtractor[pd.DataFrame, pd.Series, pd.DataFrame]):
def extract_row(self, pa_table: pa.Table) -> pd.DataFrame:
return pa_table.slice(length=1).to_pandas(types_mapper=pandas_types_mapper)
def extract_column(self, pa_table: pa.Table) -> pd.Series:
return pa_table.select([0]).to_pandas(types_mapper=pandas_types_mapper)[pa_table.column_names[0]]
def extract_batch(self, pa_table: pa.Table) -> pd.DataFrame:
return pa_table.to_pandas(types_mapper=pandas_types_mapper) | class_definition | 8,035 | 8,572 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py | null | 244 |
class PythonFeaturesDecoder:
def __init__(
self, features: Optional[Features], token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None
):
self.features = features
self.token_per_repo_id = token_per_repo_id
def decode_row(self, row: dict) -> dict:
return self.features.decode_example(row, token_per_repo_id=self.token_per_repo_id) if self.features else row
def decode_column(self, column: list, column_name: str) -> list:
return self.features.decode_column(column, column_name) if self.features else column
def decode_batch(self, batch: dict) -> dict:
return self.features.decode_batch(batch) if self.features else batch | class_definition | 8,575 | 9,279 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py | null | 245 |
class PandasFeaturesDecoder:
def __init__(self, features: Optional[Features]):
self.features = features
def decode_row(self, row: pd.DataFrame) -> pd.DataFrame:
decode = (
{
column_name: no_op_if_value_is_null(partial(decode_nested_example, feature))
for column_name, feature in self.features.items()
if self.features._column_requires_decoding[column_name]
}
if self.features
else {}
)
if decode:
row[list(decode.keys())] = row.transform(decode)
return row
def decode_column(self, column: pd.Series, column_name: str) -> pd.Series:
decode = (
no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name]))
if self.features and column_name in self.features and self.features._column_requires_decoding[column_name]
else None
)
if decode:
column = column.transform(decode)
return column
def decode_batch(self, batch: pd.DataFrame) -> pd.DataFrame:
return self.decode_row(batch) | class_definition | 9,282 | 10,431 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py | null | 246 |
class LazyDict(MutableMapping):
"""A dictionary backed by Arrow data. The values are formatted on-the-fly when accessing the dictionary."""
def __init__(self, pa_table: pa.Table, formatter: "Formatter"):
self.pa_table = pa_table
self.formatter = formatter
self.data = {key: None for key in pa_table.column_names}
self.keys_to_format = set(self.data.keys())
def __len__(self):
return len(self.data)
def __getitem__(self, key):
value = self.data[key]
if key in self.keys_to_format:
value = self.format(key)
self.data[key] = value
self.keys_to_format.remove(key)
return value
def __setitem__(self, key, value):
if key in self.keys_to_format:
self.keys_to_format.remove(key)
self.data[key] = value
def __delitem__(self, key) -> None:
if key in self.keys_to_format:
self.keys_to_format.remove(key)
del self.data[key]
def __iter__(self):
return iter(self.data)
def __contains__(self, key):
return key in self.data
def __repr__(self):
self._format_all()
return repr(self.data)
if config.PY_VERSION >= version.parse("3.9"):
# merging with the union ("|") operator is supported in Python 3.9+
def __or__(self, other):
if isinstance(other, LazyDict):
inst = self.copy()
other = other.copy()
other._format_all()
inst.keys_to_format -= other.data.keys()
inst.data = inst.data | other.data
return inst
if isinstance(other, dict):
inst = self.copy()
inst.keys_to_format -= other.keys()
inst.data = inst.data | other
return inst
return NotImplemented
def __ror__(self, other):
if isinstance(other, LazyDict):
inst = self.copy()
other = other.copy()
other._format_all()
inst.keys_to_format -= other.data.keys()
inst.data = other.data | inst.data
return inst
if isinstance(other, dict):
inst = self.copy()
inst.keys_to_format -= other.keys()
inst.data = other | inst.data
return inst
return NotImplemented
def __ior__(self, other):
if isinstance(other, LazyDict):
other = other.copy()
other._format_all()
self.keys_to_format -= other.data.keys()
self.data |= other.data
else:
self.keys_to_format -= other.keys()
self.data |= other
return self
def __copy__(self):
# Identical to `UserDict.__copy__`
inst = self.__class__.__new__(self.__class__)
inst.__dict__.update(self.__dict__)
# Create a copy and avoid triggering descriptors
inst.__dict__["data"] = self.__dict__["data"].copy()
inst.__dict__["keys_to_format"] = self.__dict__["keys_to_format"].copy()
return inst
def copy(self):
import copy
return copy.copy(self)
@classmethod
def fromkeys(cls, iterable, value=None):
raise NotImplementedError
def format(self, key):
raise NotImplementedError
def _format_all(self):
for key in self.keys_to_format:
self.data[key] = self.format(key)
self.keys_to_format.clear() | class_definition | 10,434 | 14,023 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py | null | 247 |
class LazyRow(LazyDict):
def format(self, key):
return self.formatter.format_column(self.pa_table.select([key]))[0] | class_definition | 14,026 | 14,153 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py | null | 248 |
class LazyBatch(LazyDict):
def format(self, key):
return self.formatter.format_column(self.pa_table.select([key])) | class_definition | 14,156 | 14,282 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py | null | 249 |
class Formatter(Generic[RowFormat, ColumnFormat, BatchFormat]):
"""
A formatter is an object that extracts and formats data from pyarrow tables.
It defines the formatting for rows, columns and batches.
"""
simple_arrow_extractor = SimpleArrowExtractor
python_arrow_extractor = PythonArrowExtractor
numpy_arrow_extractor = NumpyArrowExtractor
pandas_arrow_extractor = PandasArrowExtractor
def __init__(
self,
features: Optional[Features] = None,
token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None,
):
self.features = features
self.token_per_repo_id = token_per_repo_id
self.python_features_decoder = PythonFeaturesDecoder(self.features, self.token_per_repo_id)
self.pandas_features_decoder = PandasFeaturesDecoder(self.features)
def __call__(self, pa_table: pa.Table, query_type: str) -> Union[RowFormat, ColumnFormat, BatchFormat]:
if query_type == "row":
return self.format_row(pa_table)
elif query_type == "column":
return self.format_column(pa_table)
elif query_type == "batch":
return self.format_batch(pa_table)
def format_row(self, pa_table: pa.Table) -> RowFormat:
raise NotImplementedError
def format_column(self, pa_table: pa.Table) -> ColumnFormat:
raise NotImplementedError
def format_batch(self, pa_table: pa.Table) -> BatchFormat:
raise NotImplementedError | class_definition | 14,285 | 15,775 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py | null | 250 |
class TensorFormatter(Formatter[RowFormat, ColumnFormat, BatchFormat]):
def recursive_tensorize(self, data_struct: dict):
raise NotImplementedError | class_definition | 15,778 | 15,937 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py | null | 251 |
class ArrowFormatter(Formatter[pa.Table, pa.Array, pa.Table]):
def format_row(self, pa_table: pa.Table) -> pa.Table:
return self.simple_arrow_extractor().extract_row(pa_table)
def format_column(self, pa_table: pa.Table) -> pa.Array:
return self.simple_arrow_extractor().extract_column(pa_table)
def format_batch(self, pa_table: pa.Table) -> pa.Table:
return self.simple_arrow_extractor().extract_batch(pa_table) | class_definition | 15,940 | 16,389 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py | null | 252 |
class PythonFormatter(Formatter[Mapping, list, Mapping]):
def __init__(self, features=None, lazy=False, token_per_repo_id=None):
super().__init__(features, token_per_repo_id)
self.lazy = lazy
def format_row(self, pa_table: pa.Table) -> Mapping:
if self.lazy:
return LazyRow(pa_table, self)
row = self.python_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return row
def format_column(self, pa_table: pa.Table) -> list:
column = self.python_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
if self.lazy:
return LazyBatch(pa_table, self)
batch = self.python_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
return batch | class_definition | 16,392 | 17,399 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py | null | 253 |
class PandasFormatter(Formatter[pd.DataFrame, pd.Series, pd.DataFrame]):
def format_row(self, pa_table: pa.Table) -> pd.DataFrame:
row = self.pandas_arrow_extractor().extract_row(pa_table)
row = self.pandas_features_decoder.decode_row(row)
return row
def format_column(self, pa_table: pa.Table) -> pd.Series:
column = self.pandas_arrow_extractor().extract_column(pa_table)
column = self.pandas_features_decoder.decode_column(column, pa_table.column_names[0])
return column
def format_batch(self, pa_table: pa.Table) -> pd.DataFrame:
row = self.pandas_arrow_extractor().extract_batch(pa_table)
row = self.pandas_features_decoder.decode_batch(row)
return row | class_definition | 17,402 | 18,144 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py | null | 254 |
class CustomFormatter(Formatter[dict, ColumnFormat, dict]):
"""
A user-defined custom formatter function defined by a ``transform``.
The transform must take as input a batch of data extracted for an arrow table using the python extractor,
and return a batch.
If the output batch is not a dict, then output_all_columns won't work.
If the ouput batch has several fields, then querying a single column won't work since we don't know which field
to return.
"""
def __init__(self, transform: Callable[[dict], dict], features=None, token_per_repo_id=None, **kwargs):
super().__init__(features=features, token_per_repo_id=token_per_repo_id)
self.transform = transform
def format_row(self, pa_table: pa.Table) -> dict:
formatted_batch = self.format_batch(pa_table)
try:
return _unnest(formatted_batch)
except Exception as exc:
raise TypeError(
f"Custom formatting function must return a dict of sequences to be able to pick a row, but got {formatted_batch}"
) from exc
def format_column(self, pa_table: pa.Table) -> ColumnFormat:
formatted_batch = self.format_batch(pa_table)
if hasattr(formatted_batch, "keys"):
if len(formatted_batch.keys()) > 1:
raise TypeError(
"Tried to query a column but the custom formatting function returns too many columns. "
f"Only one column was expected but got columns {list(formatted_batch.keys())}."
)
else:
raise TypeError(
f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}"
)
try:
return formatted_batch[pa_table.column_names[0]]
except Exception as exc:
raise TypeError(
f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}"
) from exc
def format_batch(self, pa_table: pa.Table) -> dict:
batch = self.python_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
return self.transform(batch) | class_definition | 18,147 | 20,392 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py | null | 255 |
class TorchFormatter(TensorFormatter[Mapping, "torch.Tensor", Mapping]):
def __init__(self, features=None, token_per_repo_id=None, **torch_tensor_kwargs):
super().__init__(features=features, token_per_repo_id=token_per_repo_id)
self.torch_tensor_kwargs = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _consolidate(self, column):
import torch
if isinstance(column, list) and column:
if all(
isinstance(x, torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column
):
return torch.stack(column)
return column
def _tensorize(self, value):
import torch
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value.tolist()
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": torch.int64}
# Convert dtype to np.int64 if it's either np.uint16 or np.uint32 to ensure compatibility.
# np.uint64 is excluded from this conversion as there is no compatible PyTorch dtype that can handle it without loss.
if value.dtype in [np.uint16, np.uint32]:
value = value.astype(np.int64)
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": torch.float32}
if config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
if value.ndim == 2:
value = value[:, :, np.newaxis]
value = value.transpose((2, 0, 1))
if config.DECORD_AVAILABLE and "decord" in sys.modules:
from decord import VideoReader
from decord.bridge import to_torch
if isinstance(value, VideoReader):
value._hf_bridge_out = to_torch
return value
return torch.tensor(value, **{**default_dtype, **self.torch_tensor_kwargs})
def _recursive_tensorize(self, data_struct):
import torch
# support for torch, tf, jax etc.
if hasattr(data_struct, "__array__") and not isinstance(data_struct, torch.Tensor):
data_struct = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
elif isinstance(data_struct, (list, tuple)):
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct, map_list=False)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "torch.Tensor":
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch | class_definition | 871 | 5,067 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/torch_formatter.py | null | 256 |
class JaxFormatter(TensorFormatter[Mapping, "jax.Array", Mapping]):
def __init__(self, features=None, device=None, token_per_repo_id=None, **jnp_array_kwargs):
super().__init__(features=features, token_per_repo_id=token_per_repo_id)
import jax
from jaxlib.xla_client import Device
if isinstance(device, Device):
raise ValueError(
f"Expected {device} to be a `str` not {type(device)}, as `jaxlib.xla_extension.Device` "
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`."
)
self.device = device if isinstance(device, str) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
DEVICE_MAPPING = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
f"Device with string identifier {self.device} not listed among the available "
f"devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default "
f"device: {str(jax.devices()[0])}."
)
self.device = str(jax.devices()[0])
self.jnp_array_kwargs = jnp_array_kwargs
@staticmethod
def _map_devices_to_str() -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(device): device for device in jax.devices()}
def _consolidate(self, column):
import jax
import jax.numpy as jnp
if isinstance(column, list) and column:
if all(
isinstance(x, jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
):
return jnp.stack(column, axis=0)
return column
def _tensorize(self, value):
import jax
import jax.numpy as jnp
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value.tolist()
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_x64:
default_dtype = {"dtype": jnp.int64}
else:
default_dtype = {"dtype": jnp.int32}
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": jnp.float32}
if config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
if config.DECORD_AVAILABLE and "decord" in sys.modules:
# We need to import torch first, otherwise later it can cause issues
# e.g. "RuntimeError: random_device could not be read"
# when running `torch.tensor(value).share_memory_()`
if config.TORCH_AVAILABLE:
import torch # noqa
from decord import VideoReader
if isinstance(value, VideoReader):
value._hf_bridge_out = lambda x: jnp.array(np.asarray(x))
return value
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
DEVICE_MAPPING = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(value, **{**default_dtype, **self.jnp_array_kwargs})
def _recursive_tensorize(self, data_struct):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(data_struct, torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(data_struct, "__array__") and not isinstance(data_struct, jax.Array):
data_struct = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
elif isinstance(data_struct, (list, tuple)):
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct, map_list=False)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "jax.Array":
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch | class_definition | 1,004 | 7,445 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/jax_formatter.py | null | 257 |
class TFFormatter(TensorFormatter[Mapping, "tf.Tensor", Mapping]):
def __init__(self, features=None, token_per_repo_id=None, **tf_tensor_kwargs):
super().__init__(features=features, token_per_repo_id=token_per_repo_id)
self.tf_tensor_kwargs = tf_tensor_kwargs
import tensorflow as tf # noqa: F401 - import tf at initialization
def _consolidate(self, column):
import tensorflow as tf
if isinstance(column, list) and column:
if all(
isinstance(x, tf.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
):
return tf.stack(column)
elif all(
isinstance(x, (tf.Tensor, tf.RaggedTensor)) and x.ndim == 1 and x.dtype == column[0].dtype
for x in column
):
# only rag 1-D tensors, otherwise some dimensions become ragged even though they were consolidated
return tf.ragged.stack(column)
return column
def _tensorize(self, value):
import tensorflow as tf
if value is None:
return value
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": tf.int64}
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": tf.float32}
if config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
if config.DECORD_AVAILABLE and "decord" in sys.modules:
# We need to import torch first, otherwise later it can cause issues
# e.g. "RuntimeError: random_device could not be read"
# when running `torch.tensor(value).share_memory_()`
if config.TORCH_AVAILABLE:
import torch # noqa
from decord import VideoReader
from decord.bridge import to_tensorflow
if isinstance(value, VideoReader):
value._hf_bridge_out = to_tensorflow
return value
return tf.convert_to_tensor(value, **{**default_dtype, **self.tf_tensor_kwargs})
def _recursive_tensorize(self, data_struct):
import tensorflow as tf
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(data_struct, torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(data_struct, "__array__") and not isinstance(data_struct, tf.Tensor):
data_struct = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # tf tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
elif isinstance(data_struct, (list, tuple)):
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct, map_list=False)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "tf.Tensor":
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch | class_definition | 882 | 5,294 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/tf_formatter.py | null | 258 |
class NumpyFormatter(TensorFormatter[Mapping, np.ndarray, Mapping]):
def __init__(self, features=None, token_per_repo_id=None, **np_array_kwargs):
super().__init__(features=features, token_per_repo_id=token_per_repo_id)
self.np_array_kwargs = np_array_kwargs
def _consolidate(self, column):
if isinstance(column, list):
if column and all(
isinstance(x, np.ndarray) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
):
return np.stack(column)
else:
# don't use np.array(column, dtype=object)
# since it fails in certain cases
# see https://stackoverflow.com/q/51005699
out = np.empty(len(column), dtype=object)
out[:] = column
return out
return column
def _tensorize(self, value):
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value
elif isinstance(value, np.number):
return value
default_dtype = {}
if isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": np.int64}
elif isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": np.float32}
if config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
return np.asarray(value, **self.np_array_kwargs)
if config.DECORD_AVAILABLE and "decord" in sys.modules:
# We need to import torch first, otherwise later it can cause issues
# e.g. "RuntimeError: random_device could not be read"
# when running `torch.tensor(value).share_memory_()`
if config.TORCH_AVAILABLE:
import torch # noqa
from decord import VideoReader
if isinstance(value, VideoReader):
value._hf_bridge_out = np.asarray
return value
return np.asarray(value, **{**default_dtype, **self.np_array_kwargs})
def _recursive_tensorize(self, data_struct):
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(data_struct, torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(data_struct, "__array__") and not isinstance(data_struct, (np.ndarray, np.character, np.number)):
data_struct = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object:
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
if isinstance(data_struct, (list, tuple)):
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct, map_list=False)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> np.ndarray:
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch | class_definition | 782 | 5,107 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/np_formatter.py | null | 259 |
class DeleteFromHubCommand(BaseDatasetsCLICommand):
@staticmethod
def register_subcommand(parser):
parser: ArgumentParser = parser.add_parser("delete_from_hub", help="Delete dataset config from the Hub")
parser.add_argument(
"dataset_id", help="source dataset ID, e.g. USERNAME/DATASET_NAME or ORGANIZATION/DATASET_NAME"
)
parser.add_argument("config_name", help="config name to delete")
parser.add_argument("--token", help="access token to the Hugging Face Hub")
parser.add_argument("--revision", help="source revision")
parser.set_defaults(func=_command_factory)
def __init__(
self,
dataset_id: str,
config_name: str,
token: Optional[str],
revision: Optional[str],
):
self._dataset_id = dataset_id
self._config_name = config_name
self._token = token
self._revision = revision
def run(self) -> None:
_ = delete_from_hub(self._dataset_id, self._config_name, revision=self._revision, token=self._token) | class_definition | 324 | 1,395 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/delete_from_hub.py | null | 260 |
class EnvironmentCommand(BaseDatasetsCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
download_parser = parser.add_parser("env", help="Print relevant system environment info.")
download_parser.set_defaults(func=info_command_factory)
def run(self):
info = {
"`datasets` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"`huggingface_hub` version": huggingface_hub.__version__,
"PyArrow version": pyarrow.__version__,
"Pandas version": pandas.__version__,
"`fsspec` version": fsspec.__version__,
}
print("\nCopy-and-paste the text below in your GitHub issue.\n")
print(self.format_dict(info))
return info
@staticmethod
def format_dict(d):
return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n" | class_definition | 282 | 1,238 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/env.py | null | 261 |
class ConvertToParquetCommand(BaseDatasetsCLICommand):
@staticmethod
def register_subcommand(parser):
parser: ArgumentParser = parser.add_parser("convert_to_parquet", help="Convert dataset to Parquet")
parser.add_argument(
"dataset_id", help="source dataset ID, e.g. USERNAME/DATASET_NAME or ORGANIZATION/DATASET_NAME"
)
parser.add_argument("--token", help="access token to the Hugging Face Hub (defaults to logged-in user's one)")
parser.add_argument("--revision", help="source revision")
parser.add_argument(
"--trust_remote_code", action="store_true", help="whether to trust the code execution of the load script"
)
parser.set_defaults(func=_command_factory)
def __init__(
self,
dataset_id: str,
token: Optional[str],
revision: Optional[str],
trust_remote_code: bool,
):
self._dataset_id = dataset_id
self._token = token
self._revision = revision
self._trust_remote_code = trust_remote_code
def run(self) -> None:
_ = convert_to_parquet(
self._dataset_id, revision=self._revision, token=self._token, trust_remote_code=self._trust_remote_code
) | class_definition | 336 | 1,592 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/convert_to_parquet.py | null | 262 |
class ConvertCommand(BaseDatasetsCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the datasets-cli
Args:
parser: Root parser to register command-specific arguments
"""
train_parser = parser.add_parser(
"convert",
help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.",
)
train_parser.add_argument(
"--tfds_path",
type=str,
required=True,
help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.",
)
train_parser.add_argument(
"--datasets_directory", type=str, required=True, help="Path to the HuggingFace Datasets folder."
)
train_parser.set_defaults(func=convert_command_factory)
def __init__(self, tfds_path: str, datasets_directory: str, *args):
self._logger = get_logger("datasets-cli/converting")
self._tfds_path = tfds_path
self._datasets_directory = datasets_directory
def run(self):
if os.path.isdir(self._tfds_path):
abs_tfds_path = os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
abs_tfds_path = os.path.dirname(self._tfds_path)
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path.")
abs_datasets_path = os.path.abspath(self._datasets_directory)
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}")
utils_files = []
with_manual_update = []
imports_to_builder_map = {}
if os.path.isdir(self._tfds_path):
file_names = os.listdir(abs_tfds_path)
else:
file_names = [os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}")
input_file = os.path.join(abs_tfds_path, f_name)
output_file = os.path.join(abs_datasets_path, f_name)
if not os.path.isfile(input_file) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file")
continue
with open(input_file, encoding="utf-8") as f:
lines = f.readlines()
out_lines = []
is_builder = False
needs_manual_update = False
tfds_imports = []
for line in lines:
out_line = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
out_line = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
out_line = ""
continue
elif "from absl import logging" in out_line:
out_line = "from datasets import logging\n"
elif "getLogger" in out_line:
out_line = out_line.replace("getLogger", "get_logger")
elif any(expression in out_line for expression in TO_HIGHLIGHT):
needs_manual_update = True
to_remove = list(filter(lambda e: e in out_line, TO_HIGHLIGHT))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(to_remove) + "\n")
out_lines.append(out_line)
out_lines.append(HIGHLIGHT_MESSAGE_POST)
continue
else:
for pattern, replacement in TO_CONVERT:
out_line = re.sub(pattern, replacement, out_line)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
match = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)", out_line)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(","))
out_line = "from . import " + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}")
if "GeneratorBasedBuilder" in out_line:
is_builder = True
out_lines.append(out_line)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
dir_name = f_name.replace(".py", "")
output_dir = os.path.join(abs_datasets_path, dir_name)
output_file = os.path.join(output_dir, f_name)
os.makedirs(output_dir, exist_ok=True)
self._logger.info(f"Adding directory {output_dir}")
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(output_file)
if needs_manual_update:
with_manual_update.append(output_file)
with open(output_file, "w", encoding="utf-8") as f:
f.writelines(out_lines)
self._logger.info(f"Converted in {output_file}")
for utils_file in utils_files:
try:
f_name = os.path.basename(utils_file)
dest_folder = imports_to_builder_map[f_name.replace(".py", "")]
self._logger.info(f"Moving {dest_folder} to {utils_file}")
shutil.copy(utils_file, dest_folder)
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually.")
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'."
) | class_definition | 1,477 | 7,879 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/convert.py | null | 263 |
class BaseDatasetsCLICommand(ABC):
@staticmethod
@abstractmethod
def register_subcommand(parser: ArgumentParser):
raise NotImplementedError()
@abstractmethod
def run(self):
raise NotImplementedError() | class_definition | 74 | 311 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/__init__.py | null | 264 |
class TestCommand(BaseDatasetsCLICommand):
__test__ = False # to tell pytest it's not a test class
@staticmethod
def register_subcommand(parser: ArgumentParser):
test_parser = parser.add_parser("test", help="Test dataset implementation.")
test_parser.add_argument("--name", type=str, default=None, help="Dataset processing name")
test_parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="Cache directory where the datasets are stored.",
)
test_parser.add_argument(
"--data_dir",
type=str,
default=None,
help="Can be used to specify a manual directory to get the files from.",
)
test_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations")
test_parser.add_argument(
"--save_info", action="store_true", help="Save the dataset infos in the dataset card (README.md)"
)
test_parser.add_argument(
"--ignore_verifications",
action="store_true",
help="Run the test without checksums and splits checks.",
)
test_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload")
test_parser.add_argument(
"--clear_cache",
action="store_true",
help="Remove downloaded files and cached datasets after each config test",
)
test_parser.add_argument("--num_proc", type=int, default=None, help="Number of processes")
test_parser.add_argument(
"--trust_remote_code", action="store_true", help="whether to trust the code execution of the load script"
)
# aliases
test_parser.add_argument("--save_infos", action="store_true", help="alias to save_info")
test_parser.add_argument("dataset", type=str, help="Name of the dataset to download")
test_parser.set_defaults(func=_test_command_factory)
def __init__(
self,
dataset: str,
name: str,
cache_dir: str,
data_dir: str,
all_configs: bool,
save_infos: bool,
ignore_verifications: bool,
force_redownload: bool,
clear_cache: bool,
num_proc: int,
trust_remote_code: Optional[bool],
):
self._dataset = dataset
self._name = name
self._cache_dir = cache_dir
self._data_dir = data_dir
self._all_configs = all_configs
self._save_infos = save_infos
self._ignore_verifications = ignore_verifications
self._force_redownload = force_redownload
self._clear_cache = clear_cache
self._num_proc = num_proc
self._trust_remote_code = trust_remote_code
if clear_cache and not cache_dir:
print(
"When --clear_cache is used, specifying a cache directory is mandatory.\n"
"The 'download' folder of the cache directory and the dataset builder cache will be deleted after each configuration test.\n"
"Please provide a --cache_dir that will be used to test the dataset script."
)
exit(1)
if save_infos:
self._ignore_verifications = True
def run(self):
logging.getLogger("filelock").setLevel(ERROR)
if self._name is not None and self._all_configs:
print("Both parameters `config` and `all_configs` can't be used at once.")
exit(1)
path, config_name = self._dataset, self._name
module = dataset_module_factory(path, trust_remote_code=self._trust_remote_code)
builder_cls = import_main_class(module.module_path)
n_builders = len(builder_cls.BUILDER_CONFIGS) if self._all_configs and builder_cls.BUILDER_CONFIGS else 1
def get_builders() -> Generator[DatasetBuilder, None, None]:
if self._all_configs and builder_cls.BUILDER_CONFIGS:
for i, config in enumerate(builder_cls.BUILDER_CONFIGS):
if "config_name" in module.builder_kwargs:
yield builder_cls(
cache_dir=self._cache_dir,
data_dir=self._data_dir,
**module.builder_kwargs,
)
else:
yield builder_cls(
config_name=config.name,
cache_dir=self._cache_dir,
data_dir=self._data_dir,
**module.builder_kwargs,
)
else:
if "config_name" in module.builder_kwargs:
yield builder_cls(cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs)
else:
yield builder_cls(
config_name=config_name,
cache_dir=self._cache_dir,
data_dir=self._data_dir,
**module.builder_kwargs,
)
for j, builder in enumerate(get_builders()):
print(f"Testing builder '{builder.config.name}' ({j + 1}/{n_builders})")
builder._record_infos = os.path.exists(
os.path.join(builder.get_imported_module_dir(), datasets.config.DATASETDICT_INFOS_FILENAME)
) # record checksums only if we need to update a (deprecated) dataset_infos.json
builder.download_and_prepare(
download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS
if not self._force_redownload
else DownloadMode.FORCE_REDOWNLOAD,
verification_mode=VerificationMode.NO_CHECKS
if self._ignore_verifications
else VerificationMode.ALL_CHECKS,
num_proc=self._num_proc,
)
builder.as_dataset()
if self._save_infos:
builder._save_infos()
# If save_infos=True, the dataset card (README.md) is created next to the loaded module file.
# The dataset_infos are saved in the YAML part of the README.md
# Let's move it to the original directory of the dataset script, to allow the user to
# upload them on S3 at the same time afterwards.
if self._save_infos:
dataset_readme_path = os.path.join(
builder_cls.get_imported_module_dir(), datasets.config.REPOCARD_FILENAME
)
name = Path(path).name + ".py"
combined_path = os.path.join(path, name)
if os.path.isfile(path):
dataset_dir = os.path.dirname(path)
elif os.path.isfile(combined_path):
dataset_dir = path
elif os.path.isdir(path): # for local directories containing only data files
dataset_dir = path
else: # in case of a remote dataset
dataset_dir = None
print(f"Dataset card saved at {dataset_readme_path}")
# Move dataset_info back to the user
if dataset_dir is not None:
user_dataset_readme_path = os.path.join(dataset_dir, datasets.config.REPOCARD_FILENAME)
copyfile(dataset_readme_path, user_dataset_readme_path)
print(f"Dataset card saved at {user_dataset_readme_path}")
# If clear_cache=True, the download folder and the dataset builder cache directory are deleted
if self._clear_cache:
if os.path.isdir(builder._cache_dir):
logger.warning(f"Clearing cache at {builder._cache_dir}")
rmtree(builder._cache_dir)
download_dir = os.path.join(self._cache_dir, datasets.config.DOWNLOADED_DATASETS_DIR)
if os.path.isdir(download_dir):
logger.warning(f"Clearing cache at {download_dir}")
rmtree(download_dir)
print("Test successful.") | class_definition | 921 | 9,087 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/commands/test.py | null | 265 |
class BaseCompressedFileFileSystem(AbstractArchiveFileSystem):
"""Read contents of compressed file as a filesystem with one file inside."""
root_marker = ""
protocol: str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
compression: str = None # compression type in fsspec. ex: "gzip"
extension: str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__(
self, fo: str = "", target_protocol: Optional[str] = None, target_options: Optional[dict] = None, **kwargs
):
"""
The compressed file system can be instantiated from any compressed file.
It reads the contents of compressed file as a filesystem with one file inside, as if it was an archive.
The single file inside the filesystem is named after the compresssed file,
without the compression extension at the end of the filename.
Args:
fo (:obj:``str``): Path to compressed file. Will fetch file using ``fsspec.open()``
mode (:obj:``str``): Currently, only 'rb' accepted
target_protocol(:obj:``str``, optional): To override the FS protocol inferred from a URL.
target_options (:obj:``dict``, optional): Kwargs passed when instantiating the target FS.
"""
super().__init__(self, **kwargs)
self.fo = fo.__fspath__() if hasattr(fo, "__fspath__") else fo
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
self._open_with_fsspec = partial(
fsspec.open,
self.fo,
mode="rb",
protocol=target_protocol,
compression=self.compression,
client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs", {}), # To avoid issues if it was already passed.
},
**(target_options or {}),
)
self.compressed_name = os.path.basename(self.fo.split("::")[0])
self.uncompressed_name = (
self.compressed_name[: self.compressed_name.rindex(".")]
if "." in self.compressed_name
else self.compressed_name
)
self.dir_cache = None
@classmethod
def _strip_protocol(cls, path):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(path).lstrip("/")
def _get_dirs(self):
if self.dir_cache is None:
f = {**self._open_with_fsspec().fs.info(self.fo), "name": self.uncompressed_name}
self.dir_cache = {f["name"]: f}
def cat(self, path: str):
with self._open_with_fsspec().open() as f:
return f.read()
def _open(
self,
path: str,
mode: str = "rb",
block_size=None,
autocommit=True,
cache_options=None,
**kwargs,
):
path = self._strip_protocol(path)
if mode != "rb":
raise ValueError(f"Tried to read with mode {mode} on file {self.fo} opened with mode 'rb'")
return self._open_with_fsspec().open() | class_definition | 138 | 3,486 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/filesystems/compression.py | null | 266 |
class Bz2FileSystem(BaseCompressedFileFileSystem):
"""Read contents of BZ2 file as a filesystem with one file inside."""
protocol = "bz2"
compression = "bz2"
extension = ".bz2" | class_definition | 3,489 | 3,682 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/filesystems/compression.py | null | 267 |
class GzipFileSystem(BaseCompressedFileFileSystem):
"""Read contents of GZIP file as a filesystem with one file inside."""
protocol = "gzip"
compression = "gzip"
extension = ".gz" | class_definition | 3,685 | 3,881 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/filesystems/compression.py | null | 268 |
class Lz4FileSystem(BaseCompressedFileFileSystem):
"""Read contents of LZ4 file as a filesystem with one file inside."""
protocol = "lz4"
compression = "lz4"
extension = ".lz4" | class_definition | 3,884 | 4,077 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/filesystems/compression.py | null | 269 |
class XzFileSystem(BaseCompressedFileFileSystem):
"""Read contents of .xz (LZMA) file as a filesystem with one file inside."""
protocol = "xz"
compression = "xz"
extension = ".xz" | class_definition | 4,080 | 4,276 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/filesystems/compression.py | null | 270 |
class ZstdFileSystem(BaseCompressedFileFileSystem):
"""
Read contents of .zstd file as a filesystem with one file inside.
"""
protocol = "zstd"
compression = "zstd"
extension = ".zst" | class_definition | 4,279 | 4,487 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/filesystems/compression.py | null | 271 |