text
stringlengths 1
1.02k
| class_index
int64 0
271
| source
stringclasses 76
values |
---|---|---|
class GzipExtractor(MagicNumberBaseExtractor):
magic_numbers = [b"\x1f\x8b"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
with gzip.open(input_path, "rb") as gzip_file:
with open(output_path, "wb") as extracted_file:
shutil.copyfileobj(gzip_file, extracted_file) | 219 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py |
class ZipExtractor(MagicNumberBaseExtractor):
magic_numbers = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def is_extractable(cls, path: Union[Path, str], magic_number: bytes = b"") -> bool:
if super().is_extractable(path, magic_number=magic_number):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
) | 220 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py |
with open(path, "rb") as fp:
endrec = _EndRecData(fp)
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
data = fp.read(sizeCentralDir) # CD is where we expect it to be
if len(data) == sizeCentralDir:
centdir = struct.unpack(structCentralDir, data) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number | 220 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py |
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False | 220 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py |
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
os.makedirs(output_path, exist_ok=True)
with zipfile.ZipFile(input_path, "r") as zip_file:
zip_file.extractall(output_path)
zip_file.close() | 220 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py |
class XzExtractor(MagicNumberBaseExtractor):
magic_numbers = [b"\xfd\x37\x7a\x58\x5a\x00"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
with lzma.open(input_path) as compressed_file:
with open(output_path, "wb") as extracted_file:
shutil.copyfileobj(compressed_file, extracted_file) | 221 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py |
class RarExtractor(MagicNumberBaseExtractor):
magic_numbers = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile")
import rarfile
os.makedirs(output_path, exist_ok=True)
rf = rarfile.RarFile(input_path)
rf.extractall(output_path)
rf.close() | 222 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py |
class ZstdExtractor(MagicNumberBaseExtractor):
magic_numbers = [b"\x28\xb5\x2f\xfd"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard")
import zstandard as zstd
dctx = zstd.ZstdDecompressor()
with open(input_path, "rb") as ifh, open(output_path, "wb") as ofh:
dctx.copy_stream(ifh, ofh) | 223 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py |
class Bzip2Extractor(MagicNumberBaseExtractor):
magic_numbers = [b"\x42\x5a\x68"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
with bz2.open(input_path, "rb") as compressed_file:
with open(output_path, "wb") as extracted_file:
shutil.copyfileobj(compressed_file, extracted_file) | 224 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py |
class SevenZipExtractor(MagicNumberBaseExtractor):
magic_numbers = [b"\x37\x7a\xbc\xaf\x27\x1c"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr")
import py7zr
os.makedirs(output_path, exist_ok=True)
with py7zr.SevenZipFile(input_path, "r") as archive:
archive.extractall(output_path) | 225 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py |
class Lz4Extractor(MagicNumberBaseExtractor):
magic_numbers = [b"\x04\x22\x4d\x18"]
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4")
import lz4.frame
with lz4.frame.open(input_path, "rb") as compressed_file:
with open(output_path, "wb") as extracted_file:
shutil.copyfileobj(compressed_file, extracted_file) | 226 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py |
class Extractor:
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
extractors: Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": Bzip2Extractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": Lz4Extractor, # <Added version="2.4.0"/>
}
@classmethod
def _get_magic_number_max_length(cls):
return max(
len(extractor_magic_number)
for extractor in cls.extractors.values()
if issubclass(extractor, MagicNumberBaseExtractor)
for extractor_magic_number in extractor.magic_numbers
) | 227 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py |
@staticmethod
def _read_magic_number(path: Union[Path, str], magic_number_length: int):
try:
return MagicNumberBaseExtractor.read_magic_number(path, magic_number_length=magic_number_length)
except OSError:
return b""
@classmethod
def is_extractable(cls, path: Union[Path, str], return_extractor: bool = False) -> bool:
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead.",
category=FutureWarning,
)
extractor_format = cls.infer_extractor_format(path)
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None) | 227 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py |
@classmethod
def infer_extractor_format(cls, path: Union[Path, str]) -> Optional[str]: # <Added version="2.4.0"/>
magic_number_max_length = cls._get_magic_number_max_length()
magic_number = cls._read_magic_number(path, magic_number_max_length)
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(path, magic_number=magic_number):
return extractor_format
@classmethod
def extract(
cls,
input_path: Union[Path, str],
output_path: Union[Path, str],
extractor_format: str,
) -> None:
os.makedirs(os.path.dirname(output_path), exist_ok=True)
# Prevent parallel extractions
lock_path = str(Path(output_path).with_suffix(".lock"))
with FileLock(lock_path):
shutil.rmtree(output_path, ignore_errors=True)
extractor = cls.extractors[extractor_format]
return extractor.extract(input_path, output_path) | 227 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py |
class SharedMemoryContext:
# This is a context manager for creating shared memory that ensures cleanup happens even if a process is interrupted
# The process that creates shared memory is always the one responsible for unlinking it in the end
def __init__(self):
self.created_shms = []
self.opened_shms = []
def get_shm(self, name, size, create):
shm = SharedMemory(size=int(size), name=name, create=create)
if create:
# We only unlink the ones we created in this context
self.created_shms.append(shm)
else:
# If we didn't create it, we only close it when done, we don't unlink it
self.opened_shms.append(shm)
return shm
def get_array(self, name, shape, dtype, create):
shm = self.get_shm(name=name, size=np.prod(shape) * np.dtype(dtype).itemsize, create=create)
return np.ndarray(shape, dtype=dtype, buffer=shm.buf)
def __enter__(self):
return self | 228 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/tf_utils.py |
def __exit__(self, exc_type, exc_value, traceback):
for shm in self.created_shms:
shm.close()
shm.unlink()
for shm in self.opened_shms:
shm.close() | 228 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/tf_utils.py |
class NumpyMultiprocessingGenerator:
def __init__(
self,
dataset,
cols_to_retain,
collate_fn,
collate_fn_args,
columns_to_np_types,
output_signature,
shuffle,
batch_size,
drop_remainder,
num_workers,
):
self.dataset = dataset
self.cols_to_retain = cols_to_retain
self.collate_fn = collate_fn
self.collate_fn_args = collate_fn_args
self.string_columns = [col for col, dtype in columns_to_np_types.items() if dtype is np.str_]
# Strings will be converted to arrays of single unicode chars, so that we can have a constant itemsize
self.columns_to_np_types = {
col: dtype if col not in self.string_columns else np.dtype("U1")
for col, dtype in columns_to_np_types.items()
}
self.output_signature = output_signature
self.shuffle = shuffle
self.batch_size = batch_size
self.drop_remainder = drop_remainder | 229 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/tf_utils.py |
self.num_workers = num_workers
# Because strings are converted to characters, we need to add one extra dimension to the shape
self.columns_to_ranks = {
col: int(spec.shape.rank) if col not in self.string_columns else int(spec.shape.rank) + 1
for col, spec in output_signature.items()
} | 229 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/tf_utils.py |
def __iter__(self):
# Make sure we only spawn workers if they have work to do
num_workers = min(self.num_workers, int(ceil(len(self.dataset) / self.batch_size)))
# Do the shuffling in iter so that it's done at the start of each epoch
per_worker_batches, final_batch, final_batch_worker = self.distribute_batches(
self.dataset, self.batch_size, self.drop_remainder, num_workers, self.shuffle
)
ctx = get_context("spawn")
names = []
shape_arrays = []
workers = []
array_ready_events = [ctx.Event() for _ in range(num_workers)]
array_loaded_events = [ctx.Event() for _ in range(num_workers)] | 229 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/tf_utils.py |
base_args = {
"dataset": self.dataset,
"cols_to_retain": self.cols_to_retain,
"collate_fn": self.collate_fn,
"collate_fn_args": self.collate_fn_args,
"columns_to_np_types": self.columns_to_np_types,
"columns_to_ranks": self.columns_to_ranks,
"string_columns": self.string_columns,
}
with SharedMemoryContext() as shm_ctx:
for i in range(num_workers):
worker_random_id = str(uuid4())
worker_name = f"dw_{i}_{worker_random_id}"[:10]
names.append(worker_name)
worker_shape_arrays = {
col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=True)
for col, rank in self.columns_to_ranks.items()
}
shape_arrays.append(worker_shape_arrays) | 229 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/tf_utils.py |
worker_indices = per_worker_batches[i]
if i == final_batch_worker and final_batch is not None:
final_batch_arg = final_batch
else:
final_batch_arg = None
worker_kwargs = {
"worker_name": worker_name,
"indices": worker_indices,
"extra_batch": final_batch_arg,
"array_ready_event": array_ready_events[i],
"array_loaded_event": array_loaded_events[i],
**base_args,
}
worker = ctx.Process(target=self.worker_loop, kwargs=worker_kwargs, daemon=True)
worker.start()
workers.append(worker) | 229 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/tf_utils.py |
end_signal_received = False
while not end_signal_received:
for i in range(num_workers):
if not array_ready_events[i].wait(timeout=60):
raise TimeoutError("Data loading worker timed out!")
array_ready_events[i].clear()
array_shapes = shape_arrays[i]
if any(np.any(shape < 0) for shape in array_shapes.values()):
# Child processes send negative array shapes to indicate
# that no more data is going to be sent
end_signal_received = True
break
# Matt: Because array shapes are variable we recreate the shared memory each iteration.
# I suspect repeatedly opening lots of shared memory is the bottleneck for the parent process. | 229 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/tf_utils.py |
# A future optimization, at the cost of some code complexity, could be to reuse shared memory
# between iterations, but this would require knowing in advance the maximum size, or having
# a system to only create a new memory block when a new maximum size is seen.
# Another potential optimization would be to figure out which memory copies are necessary,
# or whether we can yield objects straight out of shared memory.
with SharedMemoryContext() as batch_shm_ctx:
# This memory context only lasts long enough to copy everything out of the batch
arrays = {
col: batch_shm_ctx.get_array(
f"{names[i]}_{col}",
shape=shape,
dtype=self.columns_to_np_types[col], | 229 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/tf_utils.py |
create=False,
)
for col, shape in array_shapes.items()
}
# Copy everything out of shm because the memory
# will be unlinked by the child process at some point
arrays = {col: np.copy(arr) for col, arr in arrays.items()}
# Now we convert any unicode char arrays to strings
for string_col in self.string_columns:
arrays[string_col] = (
arrays[string_col].view(f"U{arrays[string_col].shape[-1]}").squeeze(-1)
)
yield arrays
array_loaded_events[i].set()
# Now we just do some cleanup
# Shared memory is cleaned up by the context manager, so we just make sure workers finish
for worker in workers:
worker.join() | 229 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/tf_utils.py |
def __call__(self):
return self
@staticmethod
def worker_loop(
dataset,
cols_to_retain,
collate_fn,
collate_fn_args,
columns_to_np_types,
columns_to_ranks,
string_columns,
indices,
extra_batch,
worker_name,
array_ready_event,
array_loaded_event,
):
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
if config.TF_AVAILABLE:
import tensorflow as tf
else:
raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
tf.config.set_visible_devices([], "GPU") # Make sure workers don't try to allocate GPU memory | 229 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/tf_utils.py |
def send_batch_to_parent(indices):
batch = np_get_batch(
indices=indices,
dataset=dataset,
cols_to_retain=cols_to_retain,
collate_fn=collate_fn,
collate_fn_args=collate_fn_args,
columns_to_np_types=columns_to_np_types,
return_dict=True,
) | 229 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/tf_utils.py |
# Now begins the fun part where we start shovelling shared memory at the parent process
out_arrays = {}
with SharedMemoryContext() as batch_shm_ctx:
# The batch shared memory context exists only as long as it takes for the parent process
# to read everything, after which it cleans everything up again
for col, cast_dtype in columns_to_np_types.items():
# Everything has to be np.array for this to work, even if the collate_fn is giving us tf.Tensor
array = batch[col]
if col in string_columns:
# We can't send unicode arrays over shared memory, so we convert to single chars ("U1")
# which have a fixed width of 4 bytes. The parent process will convert these back to strings.
array = array.view("U1").reshape(array.shape + (-1,))
shape_arrays[col][:] = array.shape | 229 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/tf_utils.py |
out_arrays[col] = batch_shm_ctx.get_array(
f"{worker_name}_{col}", shape=array.shape, dtype=cast_dtype, create=True
)
out_arrays[col][:] = array | 229 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/tf_utils.py |
array_ready_event.set()
array_loaded_event.wait()
array_loaded_event.clear()
with SharedMemoryContext() as shm_ctx:
shape_arrays = {
col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=False)
for col, rank in columns_to_ranks.items()
}
for batch in indices:
send_batch_to_parent(batch)
if extra_batch is not None:
send_batch_to_parent(extra_batch)
# Now we send a batsignal to the parent process that we're done
for col, array in shape_arrays.items():
array[:] = -1
array_ready_event.set() | 229 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/tf_utils.py |
@staticmethod
def distribute_batches(dataset, batch_size, drop_remainder, num_workers, shuffle):
indices = np.arange(len(dataset))
if shuffle:
np.random.shuffle(indices)
num_samples = len(indices)
# We distribute the batches so that reading from the workers in round-robin order yields the exact
# order specified in indices. This is only important when shuffle is False, but we do it regardless.
incomplete_batch_cutoff = num_samples - (num_samples % batch_size)
indices, last_incomplete_batch = np.split(indices, [incomplete_batch_cutoff])
if drop_remainder or len(last_incomplete_batch) == 0:
last_incomplete_batch = None
indices = indices.reshape(-1, batch_size)
num_batches = len(indices)
final_batches_cutoff = num_batches - (num_batches % num_workers)
indices, final_batches = np.split(indices, [final_batches_cutoff])
indices = indices.reshape(-1, num_workers, batch_size) | 229 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/tf_utils.py |
per_worker_indices = np.split(indices, indices.shape[1], axis=1)
per_worker_indices = [np.squeeze(worker_indices, 1) for worker_indices in per_worker_indices]
# Distribute the final batches to the first workers
for i in range(len(final_batches)):
# len(final_batches) can be zero, and is always less than num_workers
per_worker_indices[i] = np.concatenate([per_worker_indices[i], final_batches[i].reshape(1, -1)], axis=0)
# Add the last incomplete batch to the next worker, which might be the first worker
if last_incomplete_batch is not None:
incomplete_batch_worker_idx = len(final_batches)
else:
incomplete_batch_worker_idx = None
return per_worker_indices, last_incomplete_batch, incomplete_batch_worker_idx | 229 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/tf_utils.py |
class VerificationMode(enum.Enum):
"""`Enum` that specifies which verification checks to run.
The default mode is `BASIC_CHECKS`, which will perform only rudimentary checks to avoid slowdowns
when generating/downloading a dataset for the first time.
The verification modes:
| | Verification checks |
|---------------------------|------------------------------------------------------------------------------ |
| `ALL_CHECKS` | Split checks, uniqueness of the keys yielded in case of the GeneratorBuilder |
| | and the validity (number of files, checksums, etc.) of downloaded files |
| `BASIC_CHECKS` (default) | Same as `ALL_CHECKS` but without checking downloaded files |
| `NO_CHECKS` | None |
""" | 230 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/info_utils.py |
ALL_CHECKS = "all_checks"
BASIC_CHECKS = "basic_checks"
NO_CHECKS = "no_checks" | 230 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/info_utils.py |
class OnAccess(enum.EnumMeta):
"""
Enum metaclass that calls a user-specified function whenever a member is accessed.
"""
def __getattribute__(cls, name):
obj = super().__getattribute__(name)
if isinstance(obj, enum.Enum) and obj._on_access:
obj._on_access()
return obj
def __getitem__(cls, name):
member = super().__getitem__(name)
if member._on_access:
member._on_access()
return member
def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1):
obj = super().__call__(value, names, module=module, qualname=qualname, type=type, start=start)
if isinstance(obj, enum.Enum) and obj._on_access:
obj._on_access()
return obj | 231 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/deprecation_utils.py |
class DeprecatedEnum(enum.Enum, metaclass=OnAccess):
"""
Enum class that calls `deprecate` method whenever a member is accessed.
"""
def __new__(cls, value):
member = object.__new__(cls)
member._value_ = value
member._on_access = member.deprecate
return member
@property
def help_message(self):
return ""
def deprecate(self):
help_message = f" {self.help_message}" if self.help_message else ""
warnings.warn(
f"'{self.__objclass__.__name__}' is deprecated and will be removed in the next major version of datasets."
+ help_message,
FutureWarning,
stacklevel=3,
) | 232 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/deprecation_utils.py |
class DownloadConfig:
"""Configuration for our cached path manager. | 233 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_config.py |
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (`bool`, defaults to `False`):
If `True`, re-dowload the file even if it's already cached in
the cache dir.
resume_download (`bool`, defaults to `False`):
If `True`, resume the download if an incompletely received file is
found.
proxies (`dict`, *optional*):
user_agent (`str`, *optional*):
Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (`bool`, defaults to `False`):
If `True` and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (`bool`, defaults to `False`):
If `True` when `extract_compressed_file` is `True` and the archive | 233 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_config.py |
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (`bool`, defaults to `False`):
Whether to delete (or keep) the extracted files.
extract_on_the_fly (`bool`, defaults to `False`):
If `True`, extract compressed files while they are being read.
use_etag (`bool`, defaults to `True`):
Whether to use the ETag HTTP response header to validate the cached files.
num_proc (`int`, *optional*):
The number of processes to launch to download the files in parallel.
max_retries (`int`, default to `1`):
The number of times to retry an HTTP request if it fails.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
storage_options (`dict`, *optional*): | 233 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_config.py |
Key/value pairs to be passed on to the dataset file-system backend, if any.
download_desc (`str`, *optional*):
A description to be displayed alongside with the progress bar while downloading the files.
disable_tqdm (`bool`, defaults to `False`):
Whether to disable the individual files download progress bar
""" | 233 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_config.py |
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
extract_on_the_fly: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
token: Optional[Union[str, bool]] = None
storage_options: Dict[str, Any] = field(default_factory=dict)
download_desc: Optional[str] = None
disable_tqdm: bool = False
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()}) | 233 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_config.py |
def __setattr__(self, name, value):
if name == "token" and getattr(self, "storage_options", None) is not None:
if "hf" not in self.storage_options:
self.storage_options["hf"] = {"token": value, "endpoint": config.HF_ENDPOINT}
elif getattr(self.storage_options["hf"], "token", None) is None:
self.storage_options["hf"]["token"] = value
super().__setattr__(name, value) | 233 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_config.py |
class DownloadMode(enum.Enum):
"""`Enum` for how to treat pre-existing downloads and data.
The default mode is `REUSE_DATASET_IF_EXISTS`, which will reuse both
raw downloads and the prepared dataset if they exist.
The generations modes:
| | Downloads | Dataset |
|-------------------------------------|-----------|---------|
| `REUSE_DATASET_IF_EXISTS` (default) | Reuse | Reuse |
| `REUSE_CACHE_IF_EXISTS` | Reuse | Fresh |
| `FORCE_REDOWNLOAD` | Fresh | Fresh |
"""
REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists"
REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists"
FORCE_REDOWNLOAD = "force_redownload" | 234 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_manager.py |
class DownloadManager:
is_streaming = False
def __init__(
self,
dataset_name: Optional[str] = None,
data_dir: Optional[str] = None,
download_config: Optional[DownloadConfig] = None,
base_path: Optional[str] = None,
record_checksums=True,
):
"""Download manager constructor. | 235 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_manager.py |
Args:
data_dir:
can be used to specify a manual directory to get the files from.
dataset_name (`str`):
name of dataset this instance will be used for. If
provided, downloads will contain which datasets they were used for.
download_config (`DownloadConfig`):
to specify the cache directory and other
download options
base_path (`str`):
base path that is used when relative paths are used to
download files. This can be a remote url.
record_checksums (`bool`, defaults to `True`):
Whether to record the checksums of the downloaded files. If None, the value is inferred from the builder.
"""
self._dataset_name = dataset_name
self._data_dir = data_dir
self._base_path = base_path or os.path.abspath(".")
# To record what is being used: {url: {num_bytes: int, checksum: str}} | 235 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_manager.py |
self._recorded_sizes_checksums: Dict[str, Dict[str, Optional[Union[int, str]]]] = {}
self.record_checksums = record_checksums
self.download_config = download_config or DownloadConfig()
self.downloaded_paths = {}
self.extracted_paths = {} | 235 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_manager.py |
@property
def manual_dir(self):
return self._data_dir
@property
def downloaded_size(self):
"""Returns the total size of downloaded files."""
return sum(checksums_dict["num_bytes"] for checksums_dict in self._recorded_sizes_checksums.values())
def _record_sizes_checksums(self, url_or_urls: NestedDataStructure, downloaded_path_or_paths: NestedDataStructure):
"""Record size/checksum of downloaded files."""
delay = 5
for url, path in hf_tqdm(
list(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten())),
delay=delay,
desc="Computing checksums",
):
# call str to support PathLike objects
self._recorded_sizes_checksums[str(url)] = get_size_checksum_dict(
path, record_checksum=self.record_checksums
)
def download(self, url_or_urls):
"""Download given URL(s). | 235 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_manager.py |
By default, only one process is used for download. Pass customized `download_config.num_proc` to change this behavior.
Args:
url_or_urls (`str` or `list` or `dict`):
URL or `list` or `dict` of URLs to download. Each URL is a `str`.
Returns:
`str` or `list` or `dict`:
The downloaded paths matching the given input `url_or_urls`.
Example:
```py
>>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
```
"""
download_config = self.download_config.copy()
download_config.extract_compressed_file = False
if download_config.download_desc is None:
download_config.download_desc = "Downloading data"
download_func = partial(self._download_batched, download_config=download_config) | 235 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_manager.py |
start_time = datetime.now()
with stack_multiprocessing_download_progress_bars():
downloaded_path_or_paths = map_nested(
download_func,
url_or_urls,
map_tuple=True,
num_proc=download_config.num_proc,
desc="Downloading data files",
batched=True,
batch_size=-1,
)
duration = datetime.now() - start_time
logger.info(f"Downloading took {duration.total_seconds() // 60} min")
url_or_urls = NestedDataStructure(url_or_urls)
downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths)
self.downloaded_paths.update(dict(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten()))) | 235 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_manager.py |
start_time = datetime.now()
self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths)
duration = datetime.now() - start_time
logger.info(f"Checksum Computation took {duration.total_seconds() // 60} min")
return downloaded_path_or_paths.data
def _download_batched(
self,
url_or_filenames: List[str],
download_config: DownloadConfig,
) -> List[str]:
if len(url_or_filenames) >= 16:
download_config = download_config.copy()
download_config.disable_tqdm = True
download_func = partial(self._download_single, download_config=download_config) | 235 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_manager.py |
fs: fsspec.AbstractFileSystem
path = str(url_or_filenames[0])
if is_relative_path(path):
# append the relative path to the base_path
path = url_or_path_join(self._base_path, path)
fs, path = url_to_fs(path, **download_config.storage_options)
size = 0
try:
size = fs.info(path).get("size", 0)
except Exception:
pass
max_workers = (
config.HF_DATASETS_MULTITHREADING_MAX_WORKERS if size < (20 << 20) else 1
) # enable multithreading if files are small | 235 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_manager.py |
return thread_map(
download_func,
url_or_filenames,
desc=download_config.download_desc or "Downloading",
unit="files",
position=multiprocessing.current_process()._identity[-1] # contains the ranks of subprocesses
if os.environ.get("HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS") == "1"
and multiprocessing.current_process()._identity
else None,
max_workers=max_workers,
tqdm_class=tqdm,
)
else:
return [
self._download_single(url_or_filename, download_config=download_config)
for url_or_filename in url_or_filenames
] | 235 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_manager.py |
def _download_single(self, url_or_filename: str, download_config: DownloadConfig) -> str:
url_or_filename = str(url_or_filename)
if is_relative_path(url_or_filename):
# append the relative path to the base_path
url_or_filename = url_or_path_join(self._base_path, url_or_filename)
out = cached_path(url_or_filename, download_config=download_config)
out = tracked_str(out)
out.set_origin(url_or_filename)
return out
def iter_archive(self, path_or_buf: Union[str, io.BufferedReader]):
"""Iterate over files within an archive.
Args:
path_or_buf (`str` or `io.BufferedReader`):
Archive path or archive binary file object.
Yields:
`tuple[str, io.BufferedReader]`:
2-tuple (path_within_archive, file_object).
File object is opened in binary mode.
Example: | 235 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_manager.py |
```py
>>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
>>> files = dl_manager.iter_archive(archive)
```
"""
if hasattr(path_or_buf, "read"):
return ArchiveIterable.from_buf(path_or_buf)
else:
return ArchiveIterable.from_urlpath(path_or_buf)
def iter_files(self, paths: Union[str, List[str]]):
"""Iterate over file paths.
Args:
paths (`str` or `list` of `str`):
Root paths.
Yields:
`str`: File path.
Example:
```py
>>> files = dl_manager.download_and_extract('https://huggingface.co./datasets/beans/resolve/main/data/train.zip')
>>> files = dl_manager.iter_files(files)
```
"""
return FilesIterable.from_urlpaths(paths)
def extract(self, path_or_paths):
"""Extract given path(s). | 235 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_manager.py |
Args:
path_or_paths (path or `list` or `dict`):
Path of file to extract. Each path is a `str`.
Returns:
extracted_path(s): `str`, The extracted paths matching the given input
path_or_paths.
Example: | 235 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_manager.py |
```py
>>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
>>> extracted_files = dl_manager.extract(downloaded_files)
```
"""
download_config = self.download_config.copy()
download_config.extract_compressed_file = True
extract_func = partial(self._download_single, download_config=download_config)
extracted_paths = map_nested(
extract_func,
path_or_paths,
num_proc=download_config.num_proc,
desc="Extracting data files",
)
path_or_paths = NestedDataStructure(path_or_paths)
extracted_paths = NestedDataStructure(extracted_paths)
self.extracted_paths.update(dict(zip(path_or_paths.flatten(), extracted_paths.flatten())))
return extracted_paths.data
def download_and_extract(self, url_or_urls):
"""Download and extract given `url_or_urls`. | 235 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_manager.py |
Is roughly equivalent to:
```
extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))
```
Args:
url_or_urls (`str` or `list` or `dict`):
URL or `list` or `dict` of URLs to download and extract. Each URL is a `str`.
Returns:
extracted_path(s): `str`, extracted paths of given URL(s).
"""
return self.extract(self.download(url_or_urls))
def get_recorded_sizes_checksums(self):
return self._recorded_sizes_checksums.copy()
def delete_extracted_files(self):
paths_to_delete = set(self.extracted_paths.values()) - set(self.downloaded_paths.values())
for key, path in list(self.extracted_paths.items()):
if path in paths_to_delete and os.path.isfile(path):
os.remove(path)
del self.extracted_paths[key] | 235 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_manager.py |
def manage_extracted_files(self):
if self.download_config.delete_extracted:
self.delete_extracted_files() | 235 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/download_manager.py |
class StreamingDownloadManager:
"""
Download manager that uses the "::" separator to navigate through (possibly remote) compressed archives.
Contrary to the regular `DownloadManager`, the `download` and `extract` methods don't actually download nor extract
data, but they rather return the path or url that could be opened using the `xopen` function which extends the
built-in `open` function to stream data from remote files.
"""
is_streaming = True
def __init__(
self,
dataset_name: Optional[str] = None,
data_dir: Optional[str] = None,
download_config: Optional[DownloadConfig] = None,
base_path: Optional[str] = None,
):
self._dataset_name = dataset_name
self._data_dir = data_dir
self._base_path = base_path or os.path.abspath(".")
self.download_config = download_config or DownloadConfig()
self.downloaded_size = None
self.record_checksums = False | 236 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/streaming_download_manager.py |
@property
def manual_dir(self):
return self._data_dir
def download(self, url_or_urls):
"""Normalize URL(s) of files to stream data from.
This is the lazy version of `DownloadManager.download` for streaming.
Args:
url_or_urls (`str` or `list` or `dict`):
URL(s) of files to stream data from. Each url is a `str`.
Returns:
url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input url_or_urls.
Example:
```py
>>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
```
"""
url_or_urls = map_nested(self._download_single, url_or_urls, map_tuple=True)
return url_or_urls | 236 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/streaming_download_manager.py |
def _download_single(self, urlpath: str) -> str:
urlpath = str(urlpath)
if is_relative_path(urlpath):
# append the relative path to the base_path
urlpath = url_or_path_join(self._base_path, urlpath)
return urlpath
def extract(self, url_or_urls):
"""Add extraction protocol for given url(s) for streaming.
This is the lazy version of `DownloadManager.extract` for streaming.
Args:
url_or_urls (`str` or `list` or `dict`):
URL(s) of files to stream data from. Each url is a `str`.
Returns:
url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`.
Example: | 236 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/streaming_download_manager.py |
```py
>>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
>>> extracted_files = dl_manager.extract(downloaded_files)
```
"""
urlpaths = map_nested(self._extract, url_or_urls, map_tuple=True)
return urlpaths | 236 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/streaming_download_manager.py |
def _extract(self, urlpath: str) -> str:
urlpath = str(urlpath)
protocol = _get_extraction_protocol(urlpath, download_config=self.download_config)
# get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz
path = urlpath.split("::")[0]
extension = _get_path_extension(path)
if extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")):
raise NotImplementedError(
f"Extraction protocol for TAR archives like '{urlpath}' is not implemented in streaming mode. "
f"Please use `dl_manager.iter_archive` instead.\n\n"
f"Example usage:\n\n"
f"\turl = dl_manager.download(url)\n"
f"\ttar_archive_iterator = dl_manager.iter_archive(url)\n\n"
f"\tfor filename, file in tar_archive_iterator:\n"
f"\t\t..."
)
if protocol is None:
# no extraction | 236 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/streaming_download_manager.py |
return urlpath
elif protocol in SINGLE_FILE_COMPRESSION_PROTOCOLS:
# there is one single file which is the uncompressed file
inner_file = os.path.basename(urlpath.split("::")[0])
inner_file = inner_file[: inner_file.rindex(".")] if "." in inner_file else inner_file
return f"{protocol}://{inner_file}::{urlpath}"
else:
return f"{protocol}://::{urlpath}" | 236 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/streaming_download_manager.py |
def download_and_extract(self, url_or_urls):
"""Prepare given `url_or_urls` for streaming (add extraction protocol).
This is the lazy version of `DownloadManager.download_and_extract` for streaming.
Is equivalent to:
```
urls = dl_manager.extract(dl_manager.download(url_or_urls))
```
Args:
url_or_urls (`str` or `list` or `dict`):
URL(s) to stream from data from. Each url is a `str`.
Returns:
url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`.
"""
return self.extract(self.download(url_or_urls))
def iter_archive(self, urlpath_or_buf: Union[str, io.BufferedReader]) -> Iterable[Tuple]:
"""Iterate over files within an archive.
Args:
urlpath_or_buf (`str` or `io.BufferedReader`):
Archive path or archive binary file object. | 236 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/streaming_download_manager.py |
Yields:
`tuple[str, io.BufferedReader]`:
2-tuple (path_within_archive, file_object).
File object is opened in binary mode.
Example:
```py
>>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
>>> files = dl_manager.iter_archive(archive)
```
"""
if hasattr(urlpath_or_buf, "read"):
return ArchiveIterable.from_buf(urlpath_or_buf)
else:
return ArchiveIterable.from_urlpath(urlpath_or_buf, download_config=self.download_config)
def iter_files(self, urlpaths: Union[str, List[str]]) -> Iterable[str]:
"""Iterate over files.
Args:
urlpaths (`str` or `list` of `str`):
Root paths.
Yields:
str: File URL path.
Example: | 236 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/streaming_download_manager.py |
```py
>>> files = dl_manager.download_and_extract('https://huggingface.co./datasets/beans/resolve/main/data/train.zip')
>>> files = dl_manager.iter_files(files)
```
"""
return FilesIterable.from_urlpaths(urlpaths, download_config=self.download_config)
def manage_extracted_files(self):
pass
def get_recorded_sizes_checksums(self):
pass | 236 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/download/streaming_download_manager.py |
class PolarsArrowExtractor(BaseArrowExtractor["pl.DataFrame", "pl.Series", "pl.DataFrame"]):
def extract_row(self, pa_table: pa.Table) -> "pl.DataFrame":
if config.POLARS_AVAILABLE:
if "polars" not in sys.modules:
import polars
else:
polars = sys.modules["polars"]
return polars.from_arrow(pa_table.slice(length=1))
else:
raise ValueError("Polars needs to be installed to be able to return Polars dataframes.")
def extract_column(self, pa_table: pa.Table) -> "pl.Series":
if config.POLARS_AVAILABLE:
if "polars" not in sys.modules:
import polars
else:
polars = sys.modules["polars"]
return polars.from_arrow(pa_table.select([0]))[pa_table.column_names[0]]
else:
raise ValueError("Polars needs to be installed to be able to return Polars dataframes.") | 237 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/polars_formatter.py |
def extract_batch(self, pa_table: pa.Table) -> "pl.DataFrame":
if config.POLARS_AVAILABLE:
if "polars" not in sys.modules:
import polars
else:
polars = sys.modules["polars"]
return polars.from_arrow(pa_table)
else:
raise ValueError("Polars needs to be installed to be able to return Polars dataframes.") | 237 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/polars_formatter.py |
class PolarsFeaturesDecoder:
def __init__(self, features: Optional[Features]):
self.features = features
import polars as pl # noqa: F401 - import pl at initialization
def decode_row(self, row: "pl.DataFrame") -> "pl.DataFrame":
decode = (
{
column_name: no_op_if_value_is_null(partial(decode_nested_example, feature))
for column_name, feature in self.features.items()
if self.features._column_requires_decoding[column_name]
}
if self.features
else {}
)
if decode:
row[list(decode.keys())] = row.map_rows(decode)
return row | 238 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/polars_formatter.py |
def decode_column(self, column: "pl.Series", column_name: str) -> "pl.Series":
decode = (
no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name]))
if self.features and column_name in self.features and self.features._column_requires_decoding[column_name]
else None
)
if decode:
column = column.map_elements(decode)
return column
def decode_batch(self, batch: "pl.DataFrame") -> "pl.DataFrame":
return self.decode_row(batch) | 238 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/polars_formatter.py |
class PolarsFormatter(TensorFormatter[Mapping, "pl.DataFrame", Mapping]):
def __init__(self, features=None, **np_array_kwargs):
super().__init__(features=features)
self.np_array_kwargs = np_array_kwargs
self.polars_arrow_extractor = PolarsArrowExtractor
self.polars_features_decoder = PolarsFeaturesDecoder(features)
import polars as pl # noqa: F401 - import pl at initialization
def format_row(self, pa_table: pa.Table) -> "pl.DataFrame":
row = self.polars_arrow_extractor().extract_row(pa_table)
row = self.polars_features_decoder.decode_row(row)
return row
def format_column(self, pa_table: pa.Table) -> "pl.Series":
column = self.polars_arrow_extractor().extract_column(pa_table)
column = self.polars_features_decoder.decode_column(column, pa_table.column_names[0])
return column | 239 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/polars_formatter.py |
def format_batch(self, pa_table: pa.Table) -> "pl.DataFrame":
row = self.polars_arrow_extractor().extract_batch(pa_table)
row = self.polars_features_decoder.decode_batch(row)
return row | 239 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/polars_formatter.py |
class BaseArrowExtractor(Generic[RowFormat, ColumnFormat, BatchFormat]):
"""
Arrow extractor are used to extract data from pyarrow tables.
It makes it possible to extract rows, columns and batches.
These three extractions types have to be implemented.
"""
def extract_row(self, pa_table: pa.Table) -> RowFormat:
raise NotImplementedError
def extract_column(self, pa_table: pa.Table) -> ColumnFormat:
raise NotImplementedError
def extract_batch(self, pa_table: pa.Table) -> BatchFormat:
raise NotImplementedError | 240 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
class SimpleArrowExtractor(BaseArrowExtractor[pa.Table, pa.Array, pa.Table]):
def extract_row(self, pa_table: pa.Table) -> pa.Table:
return pa_table
def extract_column(self, pa_table: pa.Table) -> pa.Array:
return pa_table.column(0)
def extract_batch(self, pa_table: pa.Table) -> pa.Table:
return pa_table | 241 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
class PythonArrowExtractor(BaseArrowExtractor[dict, list, dict]):
def extract_row(self, pa_table: pa.Table) -> dict:
return _unnest(pa_table.to_pydict())
def extract_column(self, pa_table: pa.Table) -> list:
return pa_table.column(0).to_pylist()
def extract_batch(self, pa_table: pa.Table) -> dict:
return pa_table.to_pydict() | 242 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
class NumpyArrowExtractor(BaseArrowExtractor[dict, np.ndarray, dict]):
def __init__(self, **np_array_kwargs):
self.np_array_kwargs = np_array_kwargs
def extract_row(self, pa_table: pa.Table) -> dict:
return _unnest(self.extract_batch(pa_table))
def extract_column(self, pa_table: pa.Table) -> np.ndarray:
return self._arrow_array_to_numpy(pa_table[pa_table.column_names[0]])
def extract_batch(self, pa_table: pa.Table) -> dict:
return {col: self._arrow_array_to_numpy(pa_table[col]) for col in pa_table.column_names} | 243 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
def _arrow_array_to_numpy(self, pa_array: pa.Array) -> np.ndarray:
if isinstance(pa_array, pa.ChunkedArray):
if isinstance(pa_array.type, _ArrayXDExtensionType):
# don't call to_pylist() to preserve dtype of the fixed-size array
zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True)
array: List = [
row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only)
]
else:
zero_copy_only = _is_zero_copy_only(pa_array.type) and all(
not _is_array_with_nulls(chunk) for chunk in pa_array.chunks
)
array: List = [
row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only)
]
else:
if isinstance(pa_array.type, _ArrayXDExtensionType): | 243 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
# don't call to_pylist() to preserve dtype of the fixed-size array
zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True)
array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only)
else:
zero_copy_only = _is_zero_copy_only(pa_array.type) and not _is_array_with_nulls(pa_array)
array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only).tolist() | 243 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
if len(array) > 0:
if any(
(isinstance(x, np.ndarray) and (x.dtype == object or x.shape != array[0].shape))
or (isinstance(x, float) and np.isnan(x))
for x in array
):
if np.lib.NumpyVersion(np.__version__) >= "2.0.0b1":
return np.asarray(array, dtype=object)
return np.array(array, copy=False, dtype=object)
if np.lib.NumpyVersion(np.__version__) >= "2.0.0b1":
return np.asarray(array)
else:
return np.array(array, copy=False) | 243 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
class PandasArrowExtractor(BaseArrowExtractor[pd.DataFrame, pd.Series, pd.DataFrame]):
def extract_row(self, pa_table: pa.Table) -> pd.DataFrame:
return pa_table.slice(length=1).to_pandas(types_mapper=pandas_types_mapper)
def extract_column(self, pa_table: pa.Table) -> pd.Series:
return pa_table.select([0]).to_pandas(types_mapper=pandas_types_mapper)[pa_table.column_names[0]]
def extract_batch(self, pa_table: pa.Table) -> pd.DataFrame:
return pa_table.to_pandas(types_mapper=pandas_types_mapper) | 244 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
class PythonFeaturesDecoder:
def __init__(
self, features: Optional[Features], token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None
):
self.features = features
self.token_per_repo_id = token_per_repo_id
def decode_row(self, row: dict) -> dict:
return self.features.decode_example(row, token_per_repo_id=self.token_per_repo_id) if self.features else row
def decode_column(self, column: list, column_name: str) -> list:
return self.features.decode_column(column, column_name) if self.features else column
def decode_batch(self, batch: dict) -> dict:
return self.features.decode_batch(batch) if self.features else batch | 245 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
class PandasFeaturesDecoder:
def __init__(self, features: Optional[Features]):
self.features = features
def decode_row(self, row: pd.DataFrame) -> pd.DataFrame:
decode = (
{
column_name: no_op_if_value_is_null(partial(decode_nested_example, feature))
for column_name, feature in self.features.items()
if self.features._column_requires_decoding[column_name]
}
if self.features
else {}
)
if decode:
row[list(decode.keys())] = row.transform(decode)
return row | 246 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
def decode_column(self, column: pd.Series, column_name: str) -> pd.Series:
decode = (
no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name]))
if self.features and column_name in self.features and self.features._column_requires_decoding[column_name]
else None
)
if decode:
column = column.transform(decode)
return column
def decode_batch(self, batch: pd.DataFrame) -> pd.DataFrame:
return self.decode_row(batch) | 246 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
class LazyDict(MutableMapping):
"""A dictionary backed by Arrow data. The values are formatted on-the-fly when accessing the dictionary."""
def __init__(self, pa_table: pa.Table, formatter: "Formatter"):
self.pa_table = pa_table
self.formatter = formatter
self.data = {key: None for key in pa_table.column_names}
self.keys_to_format = set(self.data.keys())
def __len__(self):
return len(self.data)
def __getitem__(self, key):
value = self.data[key]
if key in self.keys_to_format:
value = self.format(key)
self.data[key] = value
self.keys_to_format.remove(key)
return value
def __setitem__(self, key, value):
if key in self.keys_to_format:
self.keys_to_format.remove(key)
self.data[key] = value
def __delitem__(self, key) -> None:
if key in self.keys_to_format:
self.keys_to_format.remove(key)
del self.data[key] | 247 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
def __iter__(self):
return iter(self.data)
def __contains__(self, key):
return key in self.data
def __repr__(self):
self._format_all()
return repr(self.data)
if config.PY_VERSION >= version.parse("3.9"):
# merging with the union ("|") operator is supported in Python 3.9+
def __or__(self, other):
if isinstance(other, LazyDict):
inst = self.copy()
other = other.copy()
other._format_all()
inst.keys_to_format -= other.data.keys()
inst.data = inst.data | other.data
return inst
if isinstance(other, dict):
inst = self.copy()
inst.keys_to_format -= other.keys()
inst.data = inst.data | other
return inst
return NotImplemented | 247 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
def __ror__(self, other):
if isinstance(other, LazyDict):
inst = self.copy()
other = other.copy()
other._format_all()
inst.keys_to_format -= other.data.keys()
inst.data = other.data | inst.data
return inst
if isinstance(other, dict):
inst = self.copy()
inst.keys_to_format -= other.keys()
inst.data = other | inst.data
return inst
return NotImplemented
def __ior__(self, other):
if isinstance(other, LazyDict):
other = other.copy()
other._format_all()
self.keys_to_format -= other.data.keys()
self.data |= other.data
else:
self.keys_to_format -= other.keys()
self.data |= other
return self | 247 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
def __copy__(self):
# Identical to `UserDict.__copy__`
inst = self.__class__.__new__(self.__class__)
inst.__dict__.update(self.__dict__)
# Create a copy and avoid triggering descriptors
inst.__dict__["data"] = self.__dict__["data"].copy()
inst.__dict__["keys_to_format"] = self.__dict__["keys_to_format"].copy()
return inst
def copy(self):
import copy
return copy.copy(self)
@classmethod
def fromkeys(cls, iterable, value=None):
raise NotImplementedError
def format(self, key):
raise NotImplementedError
def _format_all(self):
for key in self.keys_to_format:
self.data[key] = self.format(key)
self.keys_to_format.clear() | 247 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
class LazyRow(LazyDict):
def format(self, key):
return self.formatter.format_column(self.pa_table.select([key]))[0] | 248 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
class LazyBatch(LazyDict):
def format(self, key):
return self.formatter.format_column(self.pa_table.select([key])) | 249 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
class Formatter(Generic[RowFormat, ColumnFormat, BatchFormat]):
"""
A formatter is an object that extracts and formats data from pyarrow tables.
It defines the formatting for rows, columns and batches.
"""
simple_arrow_extractor = SimpleArrowExtractor
python_arrow_extractor = PythonArrowExtractor
numpy_arrow_extractor = NumpyArrowExtractor
pandas_arrow_extractor = PandasArrowExtractor
def __init__(
self,
features: Optional[Features] = None,
token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None,
):
self.features = features
self.token_per_repo_id = token_per_repo_id
self.python_features_decoder = PythonFeaturesDecoder(self.features, self.token_per_repo_id)
self.pandas_features_decoder = PandasFeaturesDecoder(self.features) | 250 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
def __call__(self, pa_table: pa.Table, query_type: str) -> Union[RowFormat, ColumnFormat, BatchFormat]:
if query_type == "row":
return self.format_row(pa_table)
elif query_type == "column":
return self.format_column(pa_table)
elif query_type == "batch":
return self.format_batch(pa_table)
def format_row(self, pa_table: pa.Table) -> RowFormat:
raise NotImplementedError
def format_column(self, pa_table: pa.Table) -> ColumnFormat:
raise NotImplementedError
def format_batch(self, pa_table: pa.Table) -> BatchFormat:
raise NotImplementedError | 250 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
class TensorFormatter(Formatter[RowFormat, ColumnFormat, BatchFormat]):
def recursive_tensorize(self, data_struct: dict):
raise NotImplementedError | 251 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
class ArrowFormatter(Formatter[pa.Table, pa.Array, pa.Table]):
def format_row(self, pa_table: pa.Table) -> pa.Table:
return self.simple_arrow_extractor().extract_row(pa_table)
def format_column(self, pa_table: pa.Table) -> pa.Array:
return self.simple_arrow_extractor().extract_column(pa_table)
def format_batch(self, pa_table: pa.Table) -> pa.Table:
return self.simple_arrow_extractor().extract_batch(pa_table) | 252 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
class PythonFormatter(Formatter[Mapping, list, Mapping]):
def __init__(self, features=None, lazy=False, token_per_repo_id=None):
super().__init__(features, token_per_repo_id)
self.lazy = lazy
def format_row(self, pa_table: pa.Table) -> Mapping:
if self.lazy:
return LazyRow(pa_table, self)
row = self.python_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return row
def format_column(self, pa_table: pa.Table) -> list:
column = self.python_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
if self.lazy:
return LazyBatch(pa_table, self)
batch = self.python_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
return batch | 253 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
class PandasFormatter(Formatter[pd.DataFrame, pd.Series, pd.DataFrame]):
def format_row(self, pa_table: pa.Table) -> pd.DataFrame:
row = self.pandas_arrow_extractor().extract_row(pa_table)
row = self.pandas_features_decoder.decode_row(row)
return row
def format_column(self, pa_table: pa.Table) -> pd.Series:
column = self.pandas_arrow_extractor().extract_column(pa_table)
column = self.pandas_features_decoder.decode_column(column, pa_table.column_names[0])
return column
def format_batch(self, pa_table: pa.Table) -> pd.DataFrame:
row = self.pandas_arrow_extractor().extract_batch(pa_table)
row = self.pandas_features_decoder.decode_batch(row)
return row | 254 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
class CustomFormatter(Formatter[dict, ColumnFormat, dict]):
"""
A user-defined custom formatter function defined by a ``transform``.
The transform must take as input a batch of data extracted for an arrow table using the python extractor,
and return a batch.
If the output batch is not a dict, then output_all_columns won't work.
If the ouput batch has several fields, then querying a single column won't work since we don't know which field
to return.
"""
def __init__(self, transform: Callable[[dict], dict], features=None, token_per_repo_id=None, **kwargs):
super().__init__(features=features, token_per_repo_id=token_per_repo_id)
self.transform = transform | 255 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
def format_row(self, pa_table: pa.Table) -> dict:
formatted_batch = self.format_batch(pa_table)
try:
return _unnest(formatted_batch)
except Exception as exc:
raise TypeError(
f"Custom formatting function must return a dict of sequences to be able to pick a row, but got {formatted_batch}"
) from exc | 255 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
def format_column(self, pa_table: pa.Table) -> ColumnFormat:
formatted_batch = self.format_batch(pa_table)
if hasattr(formatted_batch, "keys"):
if len(formatted_batch.keys()) > 1:
raise TypeError(
"Tried to query a column but the custom formatting function returns too many columns. "
f"Only one column was expected but got columns {list(formatted_batch.keys())}."
)
else:
raise TypeError(
f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}"
)
try:
return formatted_batch[pa_table.column_names[0]]
except Exception as exc:
raise TypeError(
f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}"
) from exc | 255 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
def format_batch(self, pa_table: pa.Table) -> dict:
batch = self.python_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
return self.transform(batch) | 255 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/formatting.py |
class TorchFormatter(TensorFormatter[Mapping, "torch.Tensor", Mapping]):
def __init__(self, features=None, token_per_repo_id=None, **torch_tensor_kwargs):
super().__init__(features=features, token_per_repo_id=token_per_repo_id)
self.torch_tensor_kwargs = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _consolidate(self, column):
import torch
if isinstance(column, list) and column:
if all(
isinstance(x, torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column
):
return torch.stack(column)
return column
def _tensorize(self, value):
import torch
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value.tolist()
default_dtype = {} | 256 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/formatting/torch_formatter.py |