text
stringlengths 24
253k
| type
stringclasses 1
value | start
int64 67
146k
| end
int64 223
278k
| depth
int64 0
1
| filepath
stringlengths 74
128
| parent_class
stringclasses 1
value | class_index
int64 0
271
|
---|---|---|---|---|---|---|---|
class DatasetInfoMixin:
"""This base class exposes some attributes of DatasetInfo
at the base level of the Dataset for easy access.
"""
def __init__(self, info: DatasetInfo, split: Optional[NamedSplit]):
self._info = info
self._split = split
@property
def info(self):
"""[`~datasets.DatasetInfo`] object containing all the metadata in the dataset."""
return self._info
@property
def split(self):
"""[`~datasets.NamedSplit`] object corresponding to a named dataset split."""
return self._split
@property
def builder_name(self) -> str:
return self._info.builder_name
@property
def citation(self) -> str:
return self._info.citation
@property
def config_name(self) -> str:
return self._info.config_name
@property
def dataset_size(self) -> Optional[int]:
return self._info.dataset_size
@property
def description(self) -> str:
return self._info.description
@property
def download_checksums(self) -> Optional[dict]:
return self._info.download_checksums
@property
def download_size(self) -> Optional[int]:
return self._info.download_size
@property
def features(self) -> Optional[Features]:
return self._info.features.copy() if self._info.features is not None else None
@property
def homepage(self) -> Optional[str]:
return self._info.homepage
@property
def license(self) -> Optional[str]:
return self._info.license
@property
def size_in_bytes(self) -> Optional[int]:
return self._info.size_in_bytes
@property
def supervised_keys(self):
return self._info.supervised_keys
@property
def version(self):
return self._info.version | class_definition | 4,179 | 5,999 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py | null | 0 |
class TensorflowDatasetMixin:
_TF_DATASET_REFS = set()
@staticmethod
def _get_output_signature(
dataset: "Dataset",
collate_fn: Callable,
collate_fn_args: dict,
cols_to_retain: Optional[List[str]] = None,
batch_size: Optional[int] = None,
num_test_batches: int = 20,
):
"""Private method used by `to_tf_dataset()` to find the shapes and dtypes of samples from this dataset
after being passed through the collate_fn. Tensorflow needs an exact signature for tf.numpy_function, so
the only way to do this is to run test batches - the collator may add or rename columns, so we can't figure
it out just by inspecting the dataset.
Args:
dataset (`Dataset`): Dataset to load samples from.
collate_fn(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for
validation/evaluation.
collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate
lists of samples into a batch.
collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the
`collate_fn`.
batch_size (`int`, optional): The size of batches loaded from the dataset. Used for shape inference.
Can be None, which indicates that batch sizes can be variable.
num_test_batches (`int`): The number of batches to load from the dataset for shape inference.
Returns:
`dict`: Dict mapping column names to tf.Tensorspec objects
`dict`: Dict mapping column names to np.dtype objects
"""
if config.TF_AVAILABLE:
import tensorflow as tf
else:
raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
if len(dataset) == 0:
raise ValueError("Unable to get the output signature because the dataset is empty.")
if batch_size is not None:
batch_size = min(len(dataset), batch_size)
test_batch_size = 1
if cols_to_retain is not None:
cols_to_retain = list(set(cols_to_retain + ["label_ids", "label", "labels"]))
test_batches = []
for _ in range(num_test_batches):
indices = sample(range(len(dataset)), test_batch_size)
test_batch = dataset[indices]
if cols_to_retain is not None:
test_batch = {key: value for key, value in test_batch.items() if key in cols_to_retain}
test_batch = [{key: value[i] for key, value in test_batch.items()} for i in range(test_batch_size)]
test_batch = collate_fn(test_batch, **collate_fn_args)
test_batches.append(test_batch)
tf_columns_to_signatures = {}
np_columns_to_dtypes = {}
for column in test_batches[0].keys():
raw_arrays = [batch[column] for batch in test_batches]
# In case the collate_fn returns something strange
np_arrays = []
for array in raw_arrays:
if isinstance(array, np.ndarray):
np_arrays.append(array)
elif isinstance(array, tf.Tensor):
np_arrays.append(array.numpy())
else:
np_arrays.append(np.array(array))
if np.issubdtype(np_arrays[0].dtype, np.integer) or np_arrays[0].dtype == bool:
tf_dtype = tf.int64
np_dtype = np.int64
elif np.issubdtype(np_arrays[0].dtype, np.number):
tf_dtype = tf.float32
np_dtype = np.float32
elif np_arrays[0].dtype.kind == "U": # Unicode strings
np_dtype = np.str_
tf_dtype = tf.string
else:
raise RuntimeError(
f"Unrecognized array dtype {np_arrays[0].dtype}. \n"
"Nested types and image/audio types are not supported yet."
)
shapes = [array.shape for array in np_arrays]
static_shape = []
for dim in range(len(shapes[0])):
sizes = {shape[dim] for shape in shapes}
if dim == 0:
static_shape.append(batch_size)
continue
if len(sizes) == 1: # This dimension looks constant
static_shape.append(sizes.pop())
else: # Use None for variable dimensions
static_shape.append(None)
tf_columns_to_signatures[column] = tf.TensorSpec(shape=static_shape, dtype=tf_dtype)
np_columns_to_dtypes[column] = np_dtype
return tf_columns_to_signatures, np_columns_to_dtypes
def to_tf_dataset(
self,
batch_size: Optional[int] = None,
columns: Optional[Union[str, List[str]]] = None,
shuffle: bool = False,
collate_fn: Optional[Callable] = None,
drop_remainder: bool = False,
collate_fn_args: Optional[Dict[str, Any]] = None,
label_cols: Optional[Union[str, List[str]]] = None,
prefetch: bool = True,
num_workers: int = 0,
num_test_batches: int = 20,
):
"""Create a `tf.data.Dataset` from the underlying Dataset. This `tf.data.Dataset` will load and collate batches from
the Dataset, and is suitable for passing to methods like `model.fit()` or `model.predict()`. The dataset will yield
`dicts` for both inputs and labels unless the `dict` would contain only a single key, in which case a raw
`tf.Tensor` is yielded instead.
Args:
batch_size (`int`, *optional*):
Size of batches to load from the dataset. Defaults to `None`, which implies that the dataset won't be
batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`.
columns (`List[str]` or `str`, *optional*):
Dataset column(s) to load in the `tf.data.Dataset`.
Column names that are created by the `collate_fn` and that do not exist in the original dataset can be used.
shuffle(`bool`, defaults to `False`):
Shuffle the dataset order when loading. Recommended `True` for training, `False` for
validation/evaluation.
drop_remainder(`bool`, defaults to `False`):
Drop the last incomplete batch when loading. Ensures
that all batches yielded by the dataset will have the same length on the batch dimension.
collate_fn(`Callable`, *optional*):
A function or callable object (such as a `DataCollator`) that will collate
lists of samples into a batch.
collate_fn_args (`Dict`, *optional*):
An optional `dict` of keyword arguments to be passed to the
`collate_fn`.
label_cols (`List[str]` or `str`, defaults to `None`):
Dataset column(s) to load as labels.
Note that many models compute loss internally rather than letting Keras do it, in which case
passing the labels here is optional, as long as they're in the input `columns`.
prefetch (`bool`, defaults to `True`):
Whether to run the dataloader in a separate thread and maintain
a small buffer of batches for training. Improves performance by allowing data to be loaded in the
background while the model is training.
num_workers (`int`, defaults to `0`):
Number of workers to use for loading the dataset.
num_test_batches (`int`, defaults to `20`):
Number of batches to use to infer the output signature of the dataset.
The higher this number, the more accurate the signature will be, but the longer it will take to
create the dataset.
Returns:
`tf.data.Dataset`
Example:
```py
>>> ds_train = ds["train"].to_tf_dataset(
... columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'],
... shuffle=True,
... batch_size=16,
... collate_fn=data_collator,
... )
```
"""
if config.TF_AVAILABLE:
import tensorflow as tf
else:
raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
if (isinstance(columns, list) and len(columns) == 1) or (
isinstance(label_cols, list) and len(label_cols) == 1
):
warnings.warn(
"The output of `to_tf_dataset` will change when a passing single element list for `labels` or "
"`columns` in the next datasets version. To return a tuple structure rather than dict, pass a "
"single string.\n"
"Old behaviour: columns=['a'], labels=['labels'] -> (tf.Tensor, tf.Tensor) \n"
" : columns='a', labels='labels' -> (tf.Tensor, tf.Tensor) \n"
"New behaviour: columns=['a'],labels=['labels'] -> ({'a': tf.Tensor}, {'labels': tf.Tensor}) \n"
" : columns='a', labels='labels' -> (tf.Tensor, tf.Tensor) ",
FutureWarning,
)
if isinstance(tf.distribute.get_strategy(), tf.distribute.TPUStrategy):
logger.warning(
"Note that to_tf_dataset() loads the data with a generator rather than a full tf.data "
"pipeline and is not compatible with remote TPU connections. If you encounter errors, please "
"try using a TPU VM or, if your data can fit in memory, loading it into memory as a dict of "
"Tensors instead of streaming with to_tf_dataset()."
)
if collate_fn is None:
# Set a very simple default collator that just stacks things together
collate_fn = minimal_tf_collate_fn
if collate_fn_args is None:
collate_fn_args = {}
if label_cols and not columns:
raise ValueError("Cannot specify label_cols without specifying columns!")
if label_cols is None:
label_cols = []
elif isinstance(label_cols, str):
label_cols = [label_cols]
if len(set(label_cols)) < len(label_cols):
raise ValueError("List of label_cols contains duplicates.")
if columns:
if isinstance(columns, str):
columns = [columns]
if len(set(columns)) < len(columns):
raise ValueError("List of columns contains duplicates.")
cols_to_retain = list(set(columns + label_cols))
else:
cols_to_retain = None # Indicates keeping all valid columns
columns = []
if self.format["type"] not in ["custom", "numpy"]:
dataset = self.with_format("numpy")
else:
dataset = self
# TODO(Matt, QL): deprecate the retention of label_ids and label
output_signature, columns_to_np_types = dataset._get_output_signature(
dataset,
collate_fn=collate_fn,
collate_fn_args=collate_fn_args,
cols_to_retain=cols_to_retain,
batch_size=batch_size if drop_remainder else None,
num_test_batches=num_test_batches,
)
if "labels" in output_signature:
if ("label_ids" in columns or "label" in columns) and "labels" not in columns:
columns = [col for col in columns if col not in ["label_ids", "label"]] + ["labels"]
if ("label_ids" in label_cols or "label" in label_cols) and "labels" not in label_cols:
label_cols = [col for col in label_cols if col not in ["label_ids", "label"]] + ["labels"]
for col in columns:
if col not in output_signature:
raise ValueError(f"Column {col} not found in dataset!")
for col in label_cols:
if col not in output_signature:
raise ValueError(f"Label column {col} not found in dataset!")
if num_workers == 0:
tf_dataset = dataset_to_tf(
dataset=dataset,
cols_to_retain=cols_to_retain,
collate_fn=collate_fn,
collate_fn_args=collate_fn_args,
columns_to_np_types=columns_to_np_types,
output_signature=output_signature,
shuffle=shuffle,
batch_size=batch_size,
drop_remainder=drop_remainder,
)
elif num_workers > 0:
if batch_size is None:
raise NotImplementedError(
"`batch_size` must be specified when using multiple workers, as unbatched multiprocessing "
"is not supported yet. Please provide a `batch_size` if `num_workers` is greater than 0."
)
tf_dataset = multiprocess_dataset_to_tf(
dataset=dataset,
cols_to_retain=cols_to_retain,
collate_fn=collate_fn,
collate_fn_args=collate_fn_args,
columns_to_np_types=columns_to_np_types,
output_signature=output_signature,
shuffle=shuffle,
batch_size=batch_size,
drop_remainder=drop_remainder,
num_workers=num_workers,
)
else:
raise ValueError("num_workers must be >= 0")
def split_features_and_labels(input_batch):
# TODO(Matt, QL): deprecate returning the dict content when there's only one key
features = {key: tensor for key, tensor in input_batch.items() if key in columns}
labels = {key: tensor for key, tensor in input_batch.items() if key in label_cols}
if len(features) == 1:
features = list(features.values())[0]
if len(labels) == 1:
labels = list(labels.values())[0]
if isinstance(labels, dict) and len(labels) == 0:
return features
else:
return features, labels
if cols_to_retain is not None:
tf_dataset = tf_dataset.map(split_features_and_labels)
if prefetch:
tf_dataset = tf_dataset.prefetch(tf.data.experimental.AUTOTUNE)
# Remove a reference to the open Arrow file on delete
def cleanup_callback(ref):
dataset.__del__()
self._TF_DATASET_REFS.remove(ref)
self._TF_DATASET_REFS.add(weakref.ref(tf_dataset, cleanup_callback))
return tf_dataset | class_definition | 6,002 | 20,798 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py | null | 1 |
class DatasetTransformationNotAllowedError(Exception):
pass | class_definition | 20,801 | 20,864 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py | null | 2 |
class NonExistentDatasetError(Exception):
"""Used when we expect the existence of a dataset"""
pass | class_definition | 24,928 | 25,036 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py | null | 3 |
class Dataset(DatasetInfoMixin, IndexableMixin, TensorflowDatasetMixin):
"""A Dataset backed by an Arrow table."""
def __init__(
self,
arrow_table: Table,
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
indices_table: Optional[Table] = None,
fingerprint: Optional[str] = None,
):
info = info.copy() if info is not None else DatasetInfo()
DatasetInfoMixin.__init__(self, info=info, split=split)
IndexableMixin.__init__(self)
self._data: Table = _check_table(arrow_table)
self._indices: Optional[Table] = _check_table(indices_table) if indices_table is not None else None
maybe_register_dataset_for_temp_dir_deletion(self)
self._format_type: Optional[str] = None
self._format_kwargs: dict = {}
self._format_columns: Optional[list] = None
self._output_all_columns: bool = False
self._fingerprint: str = fingerprint
# Read metadata
if self._data.schema.metadata is not None and b"huggingface" in self._data.schema.metadata:
metadata = json.loads(self._data.schema.metadata[b"huggingface"].decode())
if (
"fingerprint" in metadata and self._fingerprint is None
): # try to load fingerprint from the arrow file metadata
self._fingerprint = metadata["fingerprint"]
# Infer features if None
inferred_features = Features.from_arrow_schema(arrow_table.schema)
if self.info.features is None:
self.info.features = inferred_features
else: # make sure the nested columns are in the right order
try:
self.info.features = self.info.features.reorder_fields_as(inferred_features)
except ValueError as e:
raise ValueError(
f"{e}\nThe 'source' features come from dataset_info.json, and the 'target' ones are those of the dataset arrow file."
)
# In case there are types like pa.dictionary that we need to convert to the underlying type
if self.data.schema != self.info.features.arrow_schema:
self._data = self.data.cast(self.info.features.arrow_schema)
# Infer fingerprint if None
if self._fingerprint is None:
self._fingerprint = generate_fingerprint(self)
# Sanity checks
if self._info.features is None:
raise ValueError("Features can't be None in a Dataset object")
if self._fingerprint is None:
raise ValueError("Fingerprint can't be None in a Dataset object")
if self.info.features.type != inferred_features.type:
raise ValueError(
f"External features info don't match the dataset:\nGot\n{self.info.features}\nwith type\n{self.info.features.type}\n\nbut expected something like\n{inferred_features}\nwith type\n{inferred_features.type}"
)
if self._indices is not None:
if not pa.types.is_unsigned_integer(self._indices.column(0).type):
raise ValueError(
f"indices must be an Arrow table of unsigned integers, current type is {self._indices.column(0).type}"
)
_check_column_names(self._data.column_names)
self._data = update_metadata_with_features(self._data, self._info.features)
@property
def features(self) -> Features:
features = super().features
if features is None: # this is already checked in __init__
raise ValueError("Features can't be None in a Dataset object")
return features
@classmethod
def from_file(
cls,
filename: str,
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
indices_filename: Optional[str] = None,
in_memory: bool = False,
) -> "Dataset":
"""Instantiate a Dataset backed by an Arrow table at filename.
Args:
filename (`str`):
File name of the dataset.
info (`DatasetInfo`, *optional*):
Dataset information, like description, citation, etc.
split (`NamedSplit`, *optional*):
Name of the dataset split.
indices_filename (`str`, *optional*):
File names of the indices.
in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
Returns:
[`Dataset`]
"""
table = ArrowReader.read_table(filename, in_memory=in_memory)
if indices_filename is not None:
indices_pa_table = ArrowReader.read_table(indices_filename, in_memory=in_memory)
else:
indices_pa_table = None
return cls(
arrow_table=table,
info=info,
split=split,
indices_table=indices_pa_table,
)
@classmethod
def from_buffer(
cls,
buffer: pa.Buffer,
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
indices_buffer: Optional[pa.Buffer] = None,
) -> "Dataset":
"""Instantiate a Dataset backed by an Arrow buffer.
Args:
buffer (`pyarrow.Buffer`):
Arrow buffer.
info (`DatasetInfo`, *optional*):
Dataset information, like description, citation, etc.
split (`NamedSplit`, *optional*):
Name of the dataset split.
indices_buffer (`pyarrow.Buffer`, *optional*):
Indices Arrow buffer.
Returns:
[`Dataset`]
"""
table = InMemoryTable.from_buffer(buffer)
if indices_buffer is not None:
indices_table = InMemoryTable.from_buffer(buffer)
else:
indices_table = None
return cls(table, info=info, split=split, indices_table=indices_table)
@classmethod
def from_pandas(
cls,
df: pd.DataFrame,
features: Optional[Features] = None,
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
preserve_index: Optional[bool] = None,
) -> "Dataset":
"""
Convert `pandas.DataFrame` to a `pyarrow.Table` to create a [`Dataset`].
The column types in the resulting Arrow Table are inferred from the dtypes of the `pandas.Series` in the
DataFrame. In the case of non-object Series, the NumPy dtype is translated to its Arrow equivalent. In the
case of `object`, we need to guess the datatype by looking at the Python objects in this Series.
Be aware that Series of the `object` dtype don't carry enough information to always lead to a meaningful Arrow
type. In the case that we cannot infer a type, e.g. because the DataFrame is of length 0 or the Series only
contains `None/nan` objects, the type is set to `null`. This behavior can be avoided by constructing explicit
features and passing it to this function.
Important: a dataset created with from_pandas() lives in memory
and therefore doesn't have an associated cache directory.
This may change in the feature, but in the meantime if you
want to reduce memory usage you should write it back on disk
and reload using e.g. save_to_disk / load_from_disk.
Args:
df (`pandas.DataFrame`):
Dataframe that contains the dataset.
features ([`Features`], *optional*):
Dataset features.
info (`DatasetInfo`, *optional*):
Dataset information, like description, citation, etc.
split (`NamedSplit`, *optional*):
Name of the dataset split.
preserve_index (`bool`, *optional*):
Whether to store the index as an additional column in the resulting Dataset.
The default of `None` will store the index as a column, except for `RangeIndex` which is stored as metadata only.
Use `preserve_index=True` to force it to be stored as a column.
Returns:
[`Dataset`]
Example:
```py
>>> ds = Dataset.from_pandas(df)
```
"""
if info is not None and features is not None and info.features != features:
raise ValueError(
f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}"
)
features = features if features is not None else info.features if info is not None else None
if info is None:
info = DatasetInfo()
info.features = features
table = InMemoryTable.from_pandas(
df=df,
preserve_index=preserve_index,
)
if features is not None:
# more expensive cast than InMemoryTable.from_pandas(..., schema=features.arrow_schema)
# needed to support the str to Audio conversion for instance
table = table.cast(features.arrow_schema)
return cls(table, info=info, split=split)
@classmethod
def from_polars(
cls,
df: "pl.DataFrame",
features: Optional[Features] = None,
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
) -> "Dataset":
"""
Collect the underlying arrow arrays in an Arrow Table.
This operation is mostly zero copy.
Data types that do copy:
* CategoricalType
Args:
df (`polars.DataFrame`): DataFrame to convert to Arrow Table
features (`Features`, optional): Dataset features.
info (`DatasetInfo`, optional): Dataset information, like description, citation, etc.
split (`NamedSplit`, optional): Name of the dataset split.
Examples:
```py
>>> ds = Dataset.from_polars(df)
```
"""
if info is not None and features is not None and info.features != features:
raise ValueError(
f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}"
)
features = features if features is not None else info.features if info is not None else None
if info is None:
info = DatasetInfo()
info.features = features
table = InMemoryTable(df.to_arrow())
if features is not None:
# more expensive cast than InMemoryTable.from_polars(..., schema=features.arrow_schema)
# needed to support the str to Audio conversion for instance
table = table.cast(features.arrow_schema)
return cls(table, info=info, split=split)
@classmethod
def from_dict(
cls,
mapping: dict,
features: Optional[Features] = None,
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
) -> "Dataset":
"""
Convert `dict` to a `pyarrow.Table` to create a [`Dataset`].
Important: a dataset created with from_dict() lives in memory
and therefore doesn't have an associated cache directory.
This may change in the feature, but in the meantime if you
want to reduce memory usage you should write it back on disk
and reload using e.g. save_to_disk / load_from_disk.
Args:
mapping (`Mapping`):
Mapping of strings to Arrays or Python lists.
features ([`Features`], *optional*):
Dataset features.
info (`DatasetInfo`, *optional*):
Dataset information, like description, citation, etc.
split (`NamedSplit`, *optional*):
Name of the dataset split.
Returns:
[`Dataset`]
"""
if info is not None and features is not None and info.features != features:
raise ValueError(
f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}"
)
features = features if features is not None else info.features if info is not None else None
arrow_typed_mapping = {}
for col, data in mapping.items():
if isinstance(data, (pa.Array, pa.ChunkedArray)):
data = cast_array_to_feature(data, features[col]) if features is not None else data
else:
data = OptimizedTypedSequence(
features.encode_column(data, col) if features is not None else data,
type=features[col] if features is not None else None,
col=col,
)
arrow_typed_mapping[col] = data
mapping = arrow_typed_mapping
pa_table = InMemoryTable.from_pydict(mapping=mapping)
if info is None:
info = DatasetInfo()
info.features = features
if info.features is None:
info.features = Features(
{
col: generate_from_arrow_type(data.type)
if isinstance(data, (pa.Array, pa.ChunkedArray))
else data.get_inferred_type()
for col, data in mapping.items()
}
)
return cls(pa_table, info=info, split=split)
@classmethod
def from_list(
cls,
mapping: List[dict],
features: Optional[Features] = None,
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
) -> "Dataset":
"""
Convert a list of dicts to a `pyarrow.Table` to create a [`Dataset`]`.
Note that the keys of the first entry will be used to determine the dataset columns,
regardless of what is passed to features.
Important: a dataset created with from_list() lives in memory
and therefore doesn't have an associated cache directory.
This may change in the feature, but in the meantime if you
want to reduce memory usage you should write it back on disk
and reload using e.g. save_to_disk / load_from_disk.
Args:
mapping (`List[dict]`): A list of mappings of strings to row values.
features (`Features`, optional): Dataset features.
info (`DatasetInfo`, optional): Dataset information, like description, citation, etc.
split (`NamedSplit`, optional): Name of the dataset split.
Returns:
[`Dataset`]
"""
# for simplicity and consistency wrt OptimizedTypedSequence we do not use InMemoryTable.from_pylist here
mapping = {k: [r.get(k) for r in mapping] for k in mapping[0]} if mapping else {}
return cls.from_dict(mapping, features, info, split)
@staticmethod
def from_csv(
path_or_paths: Union[PathLike, List[PathLike]],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
"""Create Dataset from CSV file(s).
Args:
path_or_paths (`path-like` or list of `path-like`):
Path(s) of the CSV file(s).
split ([`NamedSplit`], *optional*):
Split name to be assigned to the dataset.
features ([`Features`], *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
num_proc (`int`, *optional*, defaults to `None`):
Number of processes when downloading and generating the dataset locally.
This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
<Added version="2.8.0"/>
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`pandas.read_csv`].
Returns:
[`Dataset`]
Example:
```py
>>> ds = Dataset.from_csv('path/to/dataset.csv')
```
"""
# Dynamic import to avoid circular dependency
from .io.csv import CsvDatasetReader
return CsvDatasetReader(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
num_proc=num_proc,
**kwargs,
).read()
@staticmethod
def from_generator(
generator: Callable,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
gen_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
split: NamedSplit = Split.TRAIN,
**kwargs,
):
"""Create a Dataset from a generator.
Args:
generator (:`Callable`):
A generator function that `yields` examples.
features ([`Features`], *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
gen_kwargs(`dict`, *optional*):
Keyword arguments to be passed to the `generator` callable.
You can define a sharded dataset by passing the list of shards in `gen_kwargs` and setting `num_proc` greater than 1.
num_proc (`int`, *optional*, defaults to `None`):
Number of processes when downloading and generating the dataset locally.
This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
If `num_proc` is greater than one, then all list values in `gen_kwargs` must be the same length. These values will be split between calls to the generator. The number of shards will be the minimum of the shortest list in `gen_kwargs` and `num_proc`.
<Added version="2.7.0"/>
split ([`NamedSplit`], defaults to `Split.TRAIN`):
Split name to be assigned to the dataset.
<Added version="2.21.0"/>
**kwargs (additional keyword arguments):
Keyword arguments to be passed to :[`GeneratorConfig`].
Returns:
[`Dataset`]
Example:
```py
>>> def gen():
... yield {"text": "Good", "label": 0}
... yield {"text": "Bad", "label": 1}
...
>>> ds = Dataset.from_generator(gen)
```
```py
>>> def gen(shards):
... for shard in shards:
... with open(shard) as f:
... for line in f:
... yield {"line": line}
...
>>> shards = [f"data{i}.txt" for i in range(32)]
>>> ds = Dataset.from_generator(gen, gen_kwargs={"shards": shards})
```
"""
from .io.generator import GeneratorDatasetInputStream
return GeneratorDatasetInputStream(
generator=generator,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
gen_kwargs=gen_kwargs,
num_proc=num_proc,
split=split,
**kwargs,
).read()
@staticmethod
def from_json(
path_or_paths: Union[PathLike, List[PathLike]],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
field: Optional[str] = None,
num_proc: Optional[int] = None,
**kwargs,
):
"""Create Dataset from JSON or JSON Lines file(s).
Args:
path_or_paths (`path-like` or list of `path-like`):
Path(s) of the JSON or JSON Lines file(s).
split ([`NamedSplit`], *optional*):
Split name to be assigned to the dataset.
features ([`Features`], *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
field (`str`, *optional*):
Field name of the JSON file where the dataset is contained in.
num_proc (`int`, *optional* defaults to `None`):
Number of processes when downloading and generating the dataset locally.
This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
<Added version="2.8.0"/>
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`JsonConfig`].
Returns:
[`Dataset`]
Example:
```py
>>> ds = Dataset.from_json('path/to/dataset.json')
```
"""
# Dynamic import to avoid circular dependency
from .io.json import JsonDatasetReader
return JsonDatasetReader(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
field=field,
num_proc=num_proc,
**kwargs,
).read()
@staticmethod
def from_parquet(
path_or_paths: Union[PathLike, List[PathLike]],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
columns: Optional[List[str]] = None,
num_proc: Optional[int] = None,
**kwargs,
):
"""Create Dataset from Parquet file(s).
Args:
path_or_paths (`path-like` or list of `path-like`):
Path(s) of the Parquet file(s).
split (`NamedSplit`, *optional*):
Split name to be assigned to the dataset.
features (`Features`, *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
columns (`List[str]`, *optional*):
If not `None`, only these columns will be read from the file.
A column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'.
num_proc (`int`, *optional*, defaults to `None`):
Number of processes when downloading and generating the dataset locally.
This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
<Added version="2.8.0"/>
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`ParquetConfig`].
Returns:
[`Dataset`]
Example:
```py
>>> ds = Dataset.from_parquet('path/to/dataset.parquet')
```
"""
# Dynamic import to avoid circular dependency
from .io.parquet import ParquetDatasetReader
return ParquetDatasetReader(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
columns=columns,
num_proc=num_proc,
**kwargs,
).read()
@staticmethod
def from_text(
path_or_paths: Union[PathLike, List[PathLike]],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
"""Create Dataset from text file(s).
Args:
path_or_paths (`path-like` or list of `path-like`):
Path(s) of the text file(s).
split (`NamedSplit`, *optional*):
Split name to be assigned to the dataset.
features (`Features`, *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
num_proc (`int`, *optional*, defaults to `None`):
Number of processes when downloading and generating the dataset locally.
This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
<Added version="2.8.0"/>
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`TextConfig`].
Returns:
[`Dataset`]
Example:
```py
>>> ds = Dataset.from_text('path/to/dataset.txt')
```
"""
# Dynamic import to avoid circular dependency
from .io.text import TextDatasetReader
return TextDatasetReader(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
num_proc=num_proc,
**kwargs,
).read()
@staticmethod
def from_spark(
df: "pyspark.sql.DataFrame",
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
keep_in_memory: bool = False,
cache_dir: str = None,
working_dir: str = None,
load_from_cache_file: bool = True,
**kwargs,
):
"""Create a Dataset from Spark DataFrame. Dataset downloading is distributed over Spark workers.
Args:
df (`pyspark.sql.DataFrame`):
The DataFrame containing the desired data.
split (`NamedSplit`, *optional*):
Split name to be assigned to the dataset.
features (`Features`, *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data. When using a multi-node Spark cluster, the cache_dir must be accessible to both
workers and the driver.
keep_in_memory (`bool`):
Whether to copy the data in-memory.
working_dir (`str`, *optional*)
Intermediate directory for each Spark worker to write data to before moving it to `cache_dir`. Setting
a non-NFS intermediate directory may improve performance.
load_from_cache_file (`bool`):
Whether to load the dataset from the cache if possible.
Returns:
[`Dataset`]
Example:
```py
>>> df = spark.createDataFrame(
>>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]],
>>> columns=["id", "name"],
>>> )
>>> ds = Dataset.from_spark(df)
```
"""
# Dynamic import to avoid circular dependency
from .io.spark import SparkDatasetReader
if sys.platform == "win32":
raise EnvironmentError("Dataset.from_spark is not currently supported on Windows")
return SparkDatasetReader(
df,
split=split,
features=features,
streaming=False,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
working_dir=working_dir,
load_from_cache_file=load_from_cache_file,
**kwargs,
).read()
@staticmethod
def from_sql(
sql: Union[str, "sqlalchemy.sql.Selectable"],
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
):
"""Create Dataset from SQL query or database table.
Args:
sql (`str` or `sqlalchemy.sql.Selectable`):
SQL query to be executed or a table name.
con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`):
A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) used to instantiate a database connection or a SQLite3/SQLAlchemy connection object.
features ([`Features`], *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`SqlConfig`].
Returns:
[`Dataset`]
Example:
```py
>>> # Fetch a database table
>>> ds = Dataset.from_sql("test_data", "postgres:///db_name")
>>> # Execute a SQL query on the table
>>> ds = Dataset.from_sql("SELECT sentence FROM test_data", "postgres:///db_name")
>>> # Use a Selectable object to specify the query
>>> from sqlalchemy import select, text
>>> stmt = select([text("sentence")]).select_from(text("test_data"))
>>> ds = Dataset.from_sql(stmt, "postgres:///db_name")
```
<Tip>
The returned dataset can only be cached if `con` is specified as URI string.
</Tip>
"""
from .io.sql import SqlDatasetReader
return SqlDatasetReader(
sql,
con,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
**kwargs,
).read()
def __setstate__(self, state):
self.__dict__.update(state)
maybe_register_dataset_for_temp_dir_deletion(self)
return self
def __del__(self):
if hasattr(self, "_data"):
del self._data
if hasattr(self, "_indices"):
del self._indices
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables
self.__del__()
def save_to_disk(
self,
dataset_path: PathLike,
max_shard_size: Optional[Union[str, int]] = None,
num_shards: Optional[int] = None,
num_proc: Optional[int] = None,
storage_options: Optional[dict] = None,
):
"""
Saves a dataset to a dataset directory, or in a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`.
For [`Image`], [`Audio`] and [`Video`] data:
All the Image(), Audio() and Video() data are stored in the arrow files.
If you want to store paths or urls, please use the Value("string") type.
Args:
dataset_path (`path-like`):
Path (e.g. `dataset/train`) or remote URI (e.g. `s3://my-bucket/dataset/train`)
of the dataset directory where the dataset will be saved to.
max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
(like `"50MB"`).
num_shards (`int`, *optional*):
Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`.
<Added version="2.8.0"/>
num_proc (`int`, *optional*):
Number of processes when downloading and generating the dataset locally.
Multiprocessing is disabled by default.
<Added version="2.8.0"/>
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.8.0"/>
Example:
```py
>>> ds.save_to_disk("path/to/dataset/directory")
>>> ds.save_to_disk("path/to/dataset/directory", max_shard_size="1GB")
>>> ds.save_to_disk("path/to/dataset/directory", num_shards=1024)
```
"""
if max_shard_size is not None and num_shards is not None:
raise ValueError(
"Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both."
)
if self.list_indexes():
raise ValueError("please remove all the indexes using `dataset.drop_index` before saving a dataset")
if num_shards is None:
dataset_nbytes = self._estimate_nbytes()
max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
num_shards = int(dataset_nbytes / max_shard_size) + 1
num_shards = max(num_shards, num_proc or 1)
num_proc = num_proc if num_proc is not None else 1
num_shards = num_shards if num_shards is not None else num_proc
fs: fsspec.AbstractFileSystem
fs, _ = url_to_fs(dataset_path, **(storage_options or {}))
if not is_remote_filesystem(fs):
parent_cache_files_paths = {
Path(cache_filename["filename"]).resolve().parent for cache_filename in self.cache_files
}
# Check that the dataset doesn't overwrite iself. It can cause a permission error on Windows and a segfault on linux.
if Path(dataset_path).expanduser().resolve() in parent_cache_files_paths:
raise PermissionError(
f"Tried to overwrite {Path(dataset_path).expanduser().resolve()} but a dataset can't overwrite itself."
)
fs.makedirs(dataset_path, exist_ok=True)
# Get json serializable state
state = {
key: self.__dict__[key]
for key in [
"_fingerprint",
"_format_columns",
"_format_kwargs",
"_format_type",
"_output_all_columns",
]
}
state["_split"] = str(self.split) if self.split is not None else self.split
state["_data_files"] = [
{"filename": f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"} for shard_idx in range(num_shards)
]
for k in state["_format_kwargs"].keys():
try:
json.dumps(state["_format_kwargs"][k])
except TypeError as e:
raise TypeError(
str(e) + f"\nThe format kwargs must be JSON serializable, but key '{k}' isn't."
) from None
# Get json serializable dataset info
dataset_info = asdict(self._info)
shards_done = 0
pbar = hf_tqdm(
unit=" examples",
total=len(self),
desc=f"Saving the dataset ({shards_done}/{num_shards} shards)",
)
kwargs_per_job = (
{
"job_id": shard_idx,
"shard": self.shard(num_shards=num_shards, index=shard_idx, contiguous=True),
"fpath": posixpath.join(dataset_path, f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"),
"storage_options": storage_options,
}
for shard_idx in range(num_shards)
)
shard_lengths = [None] * num_shards
shard_sizes = [None] * num_shards
if num_proc > 1:
with Pool(num_proc) as pool:
with pbar:
for job_id, done, content in iflatmap_unordered(
pool, Dataset._save_to_disk_single, kwargs_iterable=kwargs_per_job
):
if done:
shards_done += 1
pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)")
logger.debug(f"Finished writing shard number {job_id} of {num_shards}.")
shard_lengths[job_id], shard_sizes[job_id] = content
else:
pbar.update(content)
else:
with pbar:
for kwargs in kwargs_per_job:
for job_id, done, content in Dataset._save_to_disk_single(**kwargs):
if done:
shards_done += 1
pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)")
logger.debug(f"Finished writing shard number {job_id} of {num_shards}.")
shard_lengths[job_id], shard_sizes[job_id] = content
else:
pbar.update(content)
with fs.open(
posixpath.join(dataset_path, config.DATASET_STATE_JSON_FILENAME), "w", encoding="utf-8"
) as state_file:
json.dump(state, state_file, indent=2, sort_keys=True)
with fs.open(
posixpath.join(dataset_path, config.DATASET_INFO_FILENAME), "w", encoding="utf-8"
) as dataset_info_file:
# Sort only the first level of keys, or we might shuffle fields of nested features if we use sort_keys=True
sorted_keys_dataset_info = {key: dataset_info[key] for key in sorted(dataset_info)}
json.dump(sorted_keys_dataset_info, dataset_info_file, indent=2)
@staticmethod
def _save_to_disk_single(job_id: int, shard: "Dataset", fpath: str, storage_options: Optional[dict]):
batch_size = config.DEFAULT_MAX_BATCH_SIZE
num_examples_progress_update = 0
writer = ArrowWriter(
features=shard.features,
path=fpath,
storage_options=storage_options,
embed_local_files=True,
)
try:
_time = time.time()
for pa_table in shard.with_format("arrow").iter(batch_size):
writer.write_table(pa_table)
num_examples_progress_update += len(pa_table)
if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
_time = time.time()
yield job_id, False, num_examples_progress_update
num_examples_progress_update = 0
finally:
yield job_id, False, num_examples_progress_update
num_examples, num_bytes = writer.finalize()
writer.close()
yield job_id, True, (num_examples, num_bytes)
@staticmethod
def _build_local_temp_path(uri_or_path: str) -> Path:
"""
Builds and returns a Path concatenating a local temporary dir with the dir path (or absolute/relative
path extracted from the uri) passed.
Args:
uri_or_path (`str`): Path (e.g. `"dataset/train"`) or remote URI (e.g.
`"s3://my-bucket/dataset/train"`) to concatenate.
Returns:
:class:`Path`: the concatenated path (temp dir + path)
"""
src_dataset_path = Path(uri_or_path)
tmp_dir = get_temporary_cache_files_directory()
return Path(tmp_dir, src_dataset_path.relative_to(src_dataset_path.anchor))
@staticmethod
def load_from_disk(
dataset_path: PathLike,
keep_in_memory: Optional[bool] = None,
storage_options: Optional[dict] = None,
) -> "Dataset":
"""
Loads a dataset that was previously saved using [`save_to_disk`] from a dataset directory, or from a
filesystem using any implementation of `fsspec.spec.AbstractFileSystem`.
Args:
dataset_path (`path-like`):
Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`)
of the dataset directory where the dataset will be loaded from.
keep_in_memory (`bool`, defaults to `None`):
Whether to copy the dataset in-memory. If `None`, the
dataset will not be copied in-memory unless explicitly enabled by setting
`datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the
[improve performance](../cache#improve-performance) section.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.8.0"/>
Returns:
[`Dataset`] or [`DatasetDict`]:
- If `dataset_path` is a path of a dataset directory, the dataset requested.
- If `dataset_path` is a path of a dataset dict directory, a `datasets.DatasetDict` with each split.
Example:
```py
>>> ds = load_from_disk("path/to/dataset/directory")
```
"""
fs: fsspec.AbstractFileSystem
fs, dataset_path = url_to_fs(dataset_path, **(storage_options or {}))
dest_dataset_path = dataset_path
dataset_dict_json_path = posixpath.join(dest_dataset_path, config.DATASETDICT_JSON_FILENAME)
dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME)
dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME)
dataset_dict_is_file = fs.isfile(dataset_dict_json_path)
dataset_info_is_file = fs.isfile(dataset_info_path)
dataset_state_is_file = fs.isfile(dataset_state_json_path)
if not dataset_info_is_file and not dataset_state_is_file:
if dataset_dict_is_file:
raise FileNotFoundError(
f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead."
)
raise FileNotFoundError(
f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object but provided path is not a `Dataset`."
)
if not dataset_info_is_file:
if dataset_dict_is_file:
raise FileNotFoundError(
f"No such file: '{dataset_info_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead."
)
raise FileNotFoundError(
f"No such file: '{dataset_info_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`."
)
if not dataset_state_is_file:
if dataset_dict_is_file:
raise FileNotFoundError(
f"No such file: '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead."
)
raise FileNotFoundError(
f"No such file: '{dataset_state_json_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`."
)
# copies file from filesystem if it is remote filesystem to local filesystem and modifies dataset_path to temp directory containing local copies
if is_remote_filesystem(fs):
src_dataset_path = dest_dataset_path
dest_dataset_path = Dataset._build_local_temp_path(src_dataset_path)
fs.download(src_dataset_path, dest_dataset_path.as_posix(), recursive=True)
dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME)
dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME)
with open(dataset_state_json_path, encoding="utf-8") as state_file:
state = json.load(state_file)
with open(dataset_info_path, encoding="utf-8") as dataset_info_file:
dataset_info = DatasetInfo.from_dict(json.load(dataset_info_file))
dataset_size = estimate_dataset_size(
Path(dest_dataset_path, data_file["filename"]) for data_file in state["_data_files"]
)
keep_in_memory = keep_in_memory if keep_in_memory is not None else is_small_dataset(dataset_size)
table_cls = InMemoryTable if keep_in_memory else MemoryMappedTable
arrow_table = concat_tables(
thread_map(
table_cls.from_file,
[posixpath.join(dest_dataset_path, data_file["filename"]) for data_file in state["_data_files"]],
tqdm_class=hf_tqdm,
desc="Loading dataset from disk",
# set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached
disable=len(state["_data_files"]) <= 16 or None,
)
)
split = state["_split"]
split = Split(split) if split is not None else split
dataset = Dataset(
arrow_table=arrow_table,
info=dataset_info,
split=split,
fingerprint=state["_fingerprint"],
)
format = {
"type": state["_format_type"],
"format_kwargs": state["_format_kwargs"],
"columns": state["_format_columns"],
"output_all_columns": state["_output_all_columns"],
}
dataset = dataset.with_format(**format)
return dataset
@property
def data(self) -> Table:
"""The Apache Arrow table backing the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.data
MemoryMappedTable
text: string
label: int64
----
text: [["compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .","the soundtrack alone is worth the price of admission .","rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .","beneath the film's obvious determination to shock at any cost lies considerable skill and determination , backed by sheer nerve .","bielinsky is a filmmaker of impressive talent .","so beautifully acted and directed , it's clear that washington most certainly has a new career ahead of him if he so chooses .","a visual spectacle full of stunning images and effects .","a gentle and engrossing character study .","it's enough to watch huppert scheming , with her small , intelligent eyes as steady as any noir villain , and to enjoy the perfectly pitched web of tension that chabrol spins .","an engrossing portrait of uncompromising artists trying to create something original against the backdrop of a corporate music industry that only seems to care about the bottom line .",...,"ultimately , jane learns her place as a girl , softens up and loses some of the intensity that made her an interesting character to begin with .","ah-nuld's action hero days might be over .","it's clear why deuces wild , which was shot two years ago , has been gathering dust on mgm's shelf .","feels like nothing quite so much as a middle-aged moviemaker's attempt to surround himself with beautiful , half-naked women .","when the precise nature of matthew's predicament finally comes into sharp focus , the revelation fails to justify the build-up .","this picture is murder by numbers , and as easy to be bored by as your abc's , despite a few whopping shootouts .","hilarious musical comedy though stymied by accents thick as mud .","if you are into splatter movies , then you will probably have a reasonably good time with the salton sea .","a dull , simple-minded and stereotypical tale of drugs , death and mind-numbing indifference on the inner-city streets .","the feature-length stretch . . . strains the show's concept ."]]
label: [[1,1,1,1,1,1,1,1,1,1,...,0,0,0,0,0,0,0,0,0,0]]
```
"""
return self._data
@property
def cache_files(self) -> List[dict]:
"""The cache files containing the Apache Arrow table backing the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.cache_files
[{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}]
```
"""
cache_files = list_table_cache_files(self._data)
if self._indices is not None:
cache_files += list_table_cache_files(self._indices)
return [{"filename": cache_filename} for cache_filename in cache_files]
@property
def num_columns(self) -> int:
"""Number of columns in the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.num_columns
2
```
"""
return self._data.num_columns
@property
def num_rows(self) -> int:
"""Number of rows in the dataset (same as [`Dataset.__len__`]).
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.num_rows
1066
```
"""
if self._indices is not None:
return self._indices.num_rows
return self._data.num_rows
@property
def column_names(self) -> List[str]:
"""Names of the columns in the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.column_names
['text', 'label']
```
"""
return self._data.column_names
@property
def shape(self) -> Tuple[int, int]:
"""Shape of the dataset (number of columns, number of rows).
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.shape
(1066, 2)
```
"""
if self._indices is not None:
return (self._indices.num_rows, self._data.num_columns)
return self._data.shape
def unique(self, column: str) -> List:
"""Return a list of the unique elements in a column.
This is implemented in the low-level backend and as such, very fast.
Args:
column (`str`):
Column name (list all the column names with [`~datasets.Dataset.column_names`]).
Returns:
`list`: List of unique elements in the given column.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.unique('label')
[1, 0]
```
"""
if column not in self._data.column_names:
raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).")
if self._indices is not None and self._indices.num_rows != self._data.num_rows:
dataset = self.flatten_indices()
else:
dataset = self
return dataset._data.column(column).unique().to_pylist()
def class_encode_column(self, column: str, include_nulls: bool = False) -> "Dataset":
"""Casts the given column as [`~datasets.features.ClassLabel`] and updates the table.
Args:
column (`str`):
The name of the column to cast (list all the column names with [`~datasets.Dataset.column_names`])
include_nulls (`bool`, defaults to `False`):
Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label.
<Added version="1.14.2"/>
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("boolq", split="validation")
>>> ds.features
{'answer': Value(dtype='bool', id=None),
'passage': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None)}
>>> ds = ds.class_encode_column('answer')
>>> ds.features
{'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None),
'passage': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None)}
```
"""
# Sanity checks
if column not in self._data.column_names:
raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).")
src_feat = self._info.features[column]
if not isinstance(src_feat, Value):
raise ValueError(
f"Class encoding is only supported for {Value.__name__} column, and column {column} is {type(src_feat).__name__}."
)
if src_feat.dtype != "string" or (include_nulls and None in self.unique(column)):
def stringify_column(batch):
batch[column] = [
str(sample) if include_nulls or sample is not None else None for sample in batch[column]
]
return batch
dset = self.map(
stringify_column,
batched=True,
desc="Stringifying the column",
)
else:
dset = self
# Create the new feature
class_names = sorted(str(sample) for sample in dset.unique(column) if include_nulls or sample is not None)
dst_feat = ClassLabel(names=class_names)
def cast_to_class_labels(batch):
batch[column] = [
dst_feat.str2int(str(sample)) if include_nulls or sample is not None else None
for sample in batch[column]
]
return batch
new_features = dset.features.copy()
new_features[column] = dst_feat
dset = dset.map(
cast_to_class_labels,
batched=True,
features=new_features,
desc="Casting to class labels",
)
return dset
@fingerprint_transform(inplace=False)
def flatten(self, new_fingerprint: Optional[str] = None, max_depth=16) -> "Dataset":
"""Flatten the table.
Each column with a struct type is flattened into one column per struct field.
Other columns are left unchanged.
Args:
new_fingerprint (`str`, *optional*):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
Returns:
[`Dataset`]: A copy of the dataset with flattened columns.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("squad", split="train")
>>> ds.features
{'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None),
'context': Value(dtype='string', id=None),
'id': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None),
'title': Value(dtype='string', id=None)}
>>> ds.flatten()
Dataset({
features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
num_rows: 87599
})
```
"""
dataset = copy.deepcopy(self)
for depth in range(1, max_depth):
if any(isinstance(field.type, pa.StructType) for field in dataset._data.schema):
dataset._data = dataset._data.flatten()
else:
break
dataset.info.features = self._info.features.flatten(max_depth=max_depth)
dataset.info.features = Features({col: dataset.info.features[col] for col in dataset.data.column_names})
dataset._data = update_metadata_with_features(dataset._data, dataset.features)
logger.info(f"Flattened dataset from depth {depth} to depth {1 if depth + 1 < max_depth else 'unknown'}.")
dataset._fingerprint = new_fingerprint
return dataset
def cast(
self,
features: Features,
batch_size: Optional[int] = 1000,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
num_proc: Optional[int] = None,
) -> "Dataset":
"""
Cast the dataset to a new set of features.
Args:
features ([`Features`]):
New features to cast the dataset to.
The name of the fields in the features must match the current column names.
The type of the data must also be convertible from one type to the other.
For non-trivial conversion, e.g. `str` <-> `ClassLabel` you should use [`~datasets.Dataset.map`] to update the Dataset.
batch_size (`int`, defaults to `1000`):
Number of examples per batch provided to cast.
If `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to cast.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
load_from_cache_file (`bool`, defaults to `True` if caching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
cache_file_name (`str`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running [`~datasets.Dataset.map`].
num_proc (`int`, *optional*, defaults to `None`):
Number of processes for multiprocessing. By default it doesn't
use multiprocessing.
Returns:
[`Dataset`]: A copy of the dataset with casted features.
Example:
```py
>>> from datasets import load_dataset, ClassLabel, Value
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.features
{'label': ClassLabel(names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> new_features = ds.features.copy()
>>> new_features['label'] = ClassLabel(names=['bad', 'good'])
>>> new_features['text'] = Value('large_string')
>>> ds = ds.cast(new_features)
>>> ds.features
{'label': ClassLabel(names=['bad', 'good'], id=None),
'text': Value(dtype='large_string', id=None)}
```
"""
if sorted(features) != sorted(self._data.column_names):
raise ValueError(
f"The columns in features ({list(features)}) must be identical "
f"as the columns in the dataset: {self._data.column_names}"
)
schema = features.arrow_schema
format = self.format
dataset = self.with_format("arrow")
# capture the PyArrow version here to make the lambda serializable on Windows
dataset = dataset.map(
partial(table_cast, schema=schema),
batched=True,
batch_size=batch_size,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
cache_file_name=cache_file_name,
writer_batch_size=writer_batch_size,
num_proc=num_proc,
features=features,
desc="Casting the dataset",
)
dataset = dataset.with_format(**format)
return dataset
@fingerprint_transform(inplace=False)
def cast_column(self, column: str, feature: FeatureType, new_fingerprint: Optional[str] = None) -> "Dataset":
"""Cast column to feature for decoding.
Args:
column (`str`):
Column name.
feature (`FeatureType`):
Target feature.
new_fingerprint (`str`, *optional*):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
Returns:
[`Dataset`]
Example:
```py
>>> from datasets import load_dataset, ClassLabel
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.features
{'label': ClassLabel(names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
>>> ds.features
{'label': ClassLabel(names=['bad', 'good'], id=None),
'text': Value(dtype='string', id=None)}
```
"""
if hasattr(feature, "decode_example"):
dataset = copy.deepcopy(self)
dataset._info.features[column] = feature
dataset._fingerprint = new_fingerprint
dataset._data = dataset._data.cast(dataset.features.arrow_schema)
dataset._data = update_metadata_with_features(dataset._data, dataset.features)
return dataset
else:
features = self.features
features[column] = feature
return self.cast(features)
@transmit_format
@fingerprint_transform(inplace=False)
def remove_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset":
"""
Remove one or several column(s) in the dataset and the features associated to them.
You can also remove a column using [`~datasets.Dataset.map`] with `remove_columns` but the present method
doesn't copy the data of the remaining columns and is thus faster.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to remove.
new_fingerprint (`str`, *optional*):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
Returns:
[`Dataset`]: A copy of the dataset object without the columns to remove.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds = ds.remove_columns('label')
Dataset({
features: ['text'],
num_rows: 1066
})
>>> ds = ds.remove_columns(column_names=ds.column_names) # Removing all the columns returns an empty dataset with the `num_rows` property set to 0
Dataset({
features: [],
num_rows: 0
})
```
"""
dataset = copy.deepcopy(self)
if isinstance(column_names, str):
column_names = [column_names]
missing_columns = set(column_names) - set(self._data.column_names)
if missing_columns:
raise ValueError(
f"Column name {list(missing_columns)} not in the dataset. "
f"Current columns in the dataset: {dataset._data.column_names}"
)
for column_name in column_names:
del dataset._info.features[column_name]
dataset._data = dataset._data.drop(column_names)
dataset._data = update_metadata_with_features(dataset._data, dataset.features)
dataset._fingerprint = new_fingerprint
return dataset
@fingerprint_transform(inplace=False)
def rename_column(
self, original_column_name: str, new_column_name: str, new_fingerprint: Optional[str] = None
) -> "Dataset":
"""
Rename a column in the dataset, and move the features associated to the original column under the new column
name.
Args:
original_column_name (`str`):
Name of the column to rename.
new_column_name (`str`):
New name for the column.
new_fingerprint (`str`, *optional*):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
Returns:
[`Dataset`]: A copy of the dataset with a renamed column.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds = ds.rename_column('label', 'label_new')
Dataset({
features: ['text', 'label_new'],
num_rows: 1066
})
```
"""
dataset = copy.deepcopy(self)
if original_column_name not in dataset._data.column_names:
raise ValueError(
f"Original column name {original_column_name} not in the dataset. "
f"Current columns in the dataset: {dataset._data.column_names}"
)
if new_column_name in dataset._data.column_names:
raise ValueError(
f"New column name {new_column_name} already in the dataset. "
f"Please choose a column name which is not already in the dataset. "
f"Current columns in the dataset: {dataset._data.column_names}"
)
if not new_column_name:
raise ValueError("New column name is empty.")
def rename(columns):
return [new_column_name if col == original_column_name else col for col in columns]
new_column_names = rename(self._data.column_names)
if self._format_columns is not None:
dataset._format_columns = rename(self._format_columns)
dataset._info.features = Features(
{
new_column_name if col == original_column_name else col: feature
for col, feature in self._info.features.items()
}
)
dataset._data = dataset._data.rename_columns(new_column_names)
dataset._data = update_metadata_with_features(dataset._data, dataset.features)
dataset._fingerprint = new_fingerprint
return dataset
@fingerprint_transform(inplace=False)
def rename_columns(self, column_mapping: Dict[str, str], new_fingerprint: Optional[str] = None) -> "Dataset":
"""
Rename several columns in the dataset, and move the features associated to the original columns under
the new column names.
Args:
column_mapping (`Dict[str, str]`):
A mapping of columns to rename to their new names
new_fingerprint (`str`, *optional*):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
Returns:
[`Dataset`]: A copy of the dataset with renamed columns
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds = ds.rename_columns({'text': 'text_new', 'label': 'label_new'})
Dataset({
features: ['text_new', 'label_new'],
num_rows: 1066
})
```
"""
dataset = copy.deepcopy(self)
extra_columns = set(column_mapping.keys()) - set(dataset.column_names)
if extra_columns:
raise ValueError(
f"Original column names {extra_columns} not in the dataset. "
f"Current columns in the dataset: {dataset._data.column_names}"
)
number_of_duplicates_in_new_columns = len(column_mapping.values()) - len(set(column_mapping.values()))
if number_of_duplicates_in_new_columns != 0:
raise ValueError(
"New column names must all be different, but this column mapping "
f"has {number_of_duplicates_in_new_columns} duplicates"
)
empty_new_columns = [new_col for new_col in column_mapping.values() if not new_col]
if empty_new_columns:
raise ValueError(f"New column names {empty_new_columns} are empty.")
def rename(columns):
return [column_mapping[col] if col in column_mapping else col for col in columns]
new_column_names = rename(self._data.column_names)
if self._format_columns is not None:
dataset._format_columns = rename(self._format_columns)
dataset._info.features = Features(
{
column_mapping[col] if col in column_mapping else col: feature
for col, feature in (self._info.features or {}).items()
}
)
dataset._data = dataset._data.rename_columns(new_column_names)
dataset._data = update_metadata_with_features(dataset._data, dataset.features)
dataset._fingerprint = new_fingerprint
return dataset
@transmit_format
@fingerprint_transform(inplace=False)
def select_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset":
"""Select one or several column(s) in the dataset and the features
associated to them.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to keep.
new_fingerprint (`str`, *optional*):
The new fingerprint of the dataset after transform. If `None`,
the new fingerprint is computed using a hash of the previous
fingerprint, and the transform arguments.
Returns:
[`Dataset`]: A copy of the dataset object which only consists of
selected columns.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.select_columns(['text'])
Dataset({
features: ['text'],
num_rows: 1066
})
```
"""
if isinstance(column_names, str):
column_names = [column_names]
missing_columns = set(column_names) - set(self._data.column_names)
if missing_columns:
raise ValueError(
f"Column name {list(missing_columns)} not in the "
"dataset. Current columns in the dataset: "
f"{self._data.column_names}."
)
dataset = copy.deepcopy(self)
dataset._data = dataset._data.select(column_names)
dataset._info.features = Features({col: self._info.features[col] for col in dataset._data.column_names})
dataset._data = update_metadata_with_features(dataset._data, dataset.features)
dataset._fingerprint = new_fingerprint
return dataset
def __len__(self):
"""Number of rows in the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.__len__
<bound method Dataset.__len__ of Dataset({
features: ['text', 'label'],
num_rows: 1066
})>
```
"""
return self.num_rows
def __iter__(self):
"""Iterate through the examples.
If a formatting is set with [`Dataset.set_format`] rows will be returned with the
selected format.
"""
if self._indices is None:
# Fast iteration
# Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch)
format_kwargs = self._format_kwargs if self._format_kwargs is not None else {}
formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs)
batch_size = config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER
for pa_subtable in table_iter(self.data, batch_size=batch_size):
for i in range(pa_subtable.num_rows):
pa_subtable_ex = pa_subtable.slice(i, 1)
formatted_output = format_table(
pa_subtable_ex,
0,
formatter=formatter,
format_columns=self._format_columns,
output_all_columns=self._output_all_columns,
)
yield formatted_output
else:
for i in range(self.num_rows):
yield self._getitem(
i,
)
def iter(self, batch_size: int, drop_last_batch: bool = False):
"""Iterate through the batches of size `batch_size`.
If a formatting is set with [`~datasets.Dataset.set_format`] rows will be returned with the
selected format.
Args:
batch_size (:obj:`int`): size of each batch to yield.
drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be
dropped
"""
if self._indices is None:
# Fast iteration
# Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch)
format_kwargs = self._format_kwargs if self._format_kwargs is not None else {}
formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs)
for pa_subtable in table_iter(self.data, batch_size=batch_size, drop_last_batch=drop_last_batch):
formatted_batch = format_table(
pa_subtable,
range(pa_subtable.num_rows),
formatter=formatter,
format_columns=self._format_columns,
output_all_columns=self._output_all_columns,
)
yield formatted_batch
else:
num_rows = self.num_rows if not drop_last_batch else self.num_rows // batch_size * batch_size
for i in range(0, num_rows, batch_size):
yield self._getitem(
slice(i, i + batch_size),
)
def __repr__(self):
return f"Dataset({{\n features: {list(self._info.features.keys())},\n num_rows: {self.num_rows}\n}})"
@property
def format(self):
return {
"type": self._format_type,
"format_kwargs": self._format_kwargs,
"columns": self.column_names if self._format_columns is None else self._format_columns,
"output_all_columns": self._output_all_columns,
}
@contextlib.contextmanager
def formatted_as(
self,
type: Optional[str] = None,
columns: Optional[List] = None,
output_all_columns: bool = False,
**format_kwargs,
):
"""To be used in a `with` statement. Set `__getitem__` return format (type and columns).
Args:
type (`str`, *optional*):
Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
`None` means `__getitem__`` returns python objects (default).
columns (`List[str]`, *optional*):
Columns to format in the output.
`None` means `__getitem__` returns all columns (default).
output_all_columns (`bool`, defaults to `False`):
Keep un-formatted columns as well in the output (as python objects).
**format_kwargs (additional keyword arguments):
Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
"""
old_format_type = self._format_type
old_format_kwargs = self._format_kwargs
old_format_columns = self._format_columns
old_output_all_columns = self._output_all_columns
try:
self.set_format(type, columns, output_all_columns, **format_kwargs)
yield
finally:
self.set_format(old_format_type, old_format_columns, old_output_all_columns, **old_format_kwargs)
@fingerprint_transform(inplace=True)
def set_format(
self,
type: Optional[str] = None,
columns: Optional[List] = None,
output_all_columns: bool = False,
**format_kwargs,
):
"""Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly.
The format `type` (for example "numpy") is used to format batches when using `__getitem__`.
It's also possible to use custom transforms for formatting using [`~datasets.Dataset.set_transform`].
Args:
type (`str`, *optional*):
Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
`None` means `__getitem__` returns python objects (default).
columns (`List[str]`, *optional*):
Columns to format in the output.
`None` means `__getitem__` returns all columns (default).
output_all_columns (`bool`, defaults to `False`):
Keep un-formatted columns as well in the output (as python objects).
**format_kwargs (additional keyword arguments):
Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
It is possible to call [`~datasets.Dataset.map`] after calling `set_format`. Since `map` may add new columns, then the list of formatted columns
gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted as:
```
new formatted columns = (all columns - previously unformatted columns)
```
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
>>> ds.set_format(type='numpy', columns=['text', 'label'])
>>> ds.format
{'type': 'numpy',
'format_kwargs': {},
'columns': ['text', 'label'],
'output_all_columns': False}
```
"""
format_kwargs.update(format_kwargs.pop("format_kwargs", {})) # allow to use self.set_format(**self.format)
# Check that the format_type and format_kwargs are valid and make it possible to have a Formatter
type = get_format_type_from_alias(type)
get_formatter(type, features=self._info.features, **format_kwargs)
# Check filter column
if isinstance(columns, str):
columns = [columns]
if isinstance(columns, tuple):
columns = list(columns)
if columns is not None:
missing_columns = set(columns) - set(self._data.column_names)
if missing_columns:
raise ValueError(
f"Columns {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}"
)
if columns is not None:
columns = columns.copy() # Ensures modifications made to the list after this call don't cause bugs
self._format_type = type
self._format_kwargs = format_kwargs
self._format_columns = columns
self._output_all_columns = output_all_columns
logger.debug(
"Set __getitem__(key) output type to %s for %s columns "
" (when key is int or slice) and %s output other (un-formatted) columns.",
"python objects" if type is None else type,
"no" if columns is None else str(columns),
"do" if output_all_columns else "don't",
)
def reset_format(self):
"""Reset `__getitem__` return format to python objects and all columns.
Same as `self.set_format()`
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
>>> ds.set_format(type='numpy', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
>>> ds.format
{'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
'format_kwargs': {},
'output_all_columns': False,
'type': 'numpy'}
>>> ds.reset_format()
>>> ds.format
{'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
'format_kwargs': {},
'output_all_columns': False,
'type': None}
```
"""
self.set_format()
def set_transform(
self,
transform: Optional[Callable],
columns: Optional[List] = None,
output_all_columns: bool = False,
):
"""Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called.
As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].
Args:
transform (`Callable`, *optional*):
User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`].
A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch.
This function is applied right before returning the objects in `__getitem__`.
columns (`List[str]`, *optional*):
Columns to format in the output.
If specified, then the input batch of the transform only contains those columns.
output_all_columns (`bool`, defaults to `False`):
Keep un-formatted columns as well in the output (as python objects).
If set to True, then the other un-formatted columns are kept with the output of the transform.
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
>>> def encode(batch):
... return tokenizer(batch['text'], padding=True, truncation=True, return_tensors='pt')
>>> ds.set_transform(encode)
>>> ds[0]
{'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1]),
'input_ids': tensor([ 101, 29353, 2135, 15102, 1996, 9428, 20868, 2890, 8663, 6895,
20470, 2571, 3663, 2090, 4603, 3017, 3008, 1998, 2037, 24211,
5637, 1998, 11690, 2336, 1012, 102]),
'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0])}
```
"""
self.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform)
def with_format(
self,
type: Optional[str] = None,
columns: Optional[List] = None,
output_all_columns: bool = False,
**format_kwargs,
):
"""Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly.
The format `type` (for example "numpy") is used to format batches when using `__getitem__`.
It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`].
Contrary to [`~datasets.Dataset.set_format`], `with_format` returns a new [`Dataset`] object.
Args:
type (`str`, *optional*):
Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
`None` means `__getitem__` returns python objects (default).
columns (`List[str]`, *optional*):
Columns to format in the output.
`None` means `__getitem__` returns all columns (default).
output_all_columns (`bool`, defaults to `False`):
Keep un-formatted columns as well in the output (as python objects).
**format_kwargs (additional keyword arguments):
Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
>>> ds.format
{'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
'format_kwargs': {},
'output_all_columns': False,
'type': None}
>>> ds = ds.with_format("torch")
>>> ds.format
{'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
'format_kwargs': {},
'output_all_columns': False,
'type': 'torch'}
>>> ds[0]
{'text': 'compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .',
'label': tensor(1),
'input_ids': tensor([ 101, 18027, 16310, 16001, 1103, 9321, 178, 11604, 7235, 6617,
1742, 2165, 2820, 1206, 6588, 22572, 12937, 1811, 2153, 1105,
1147, 12890, 19587, 6463, 1105, 15026, 1482, 119, 102, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0]),
'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])}
```
"""
dataset = copy.deepcopy(self)
dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)
return dataset
def with_transform(
self,
transform: Optional[Callable],
columns: Optional[List] = None,
output_all_columns: bool = False,
):
"""Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called.
As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].
Contrary to [`~datasets.Dataset.set_transform`], `with_transform` returns a new [`Dataset`] object.
Args:
transform (`Callable`, `optional`):
User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`].
A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch.
This function is applied right before returning the objects in `__getitem__`.
columns (`List[str]`, `optional`):
Columns to format in the output.
If specified, then the input batch of the transform only contains those columns.
output_all_columns (`bool`, defaults to `False`):
Keep un-formatted columns as well in the output (as python objects).
If set to `True`, then the other un-formatted columns are kept with the output of the transform.
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> def encode(example):
... return tokenizer(example["text"], padding=True, truncation=True, return_tensors='pt')
>>> ds = ds.with_transform(encode)
>>> ds[0]
{'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1]),
'input_ids': tensor([ 101, 18027, 16310, 16001, 1103, 9321, 178, 11604, 7235, 6617,
1742, 2165, 2820, 1206, 6588, 22572, 12937, 1811, 2153, 1105,
1147, 12890, 19587, 6463, 1105, 15026, 1482, 119, 102]),
'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0])}
```
"""
dataset = copy.deepcopy(self)
dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns)
return dataset
def _getitem(self, key: Union[int, slice, str, ListLike[int]], **kwargs) -> Union[Dict, List]:
"""
Can be used to index columns (by string names) or rows (by integer, slice, or list-like of integer indices)
"""
if isinstance(key, bool):
raise TypeError("dataset index must be int, str, slice or collection of int, not bool")
format_type = kwargs["format_type"] if "format_type" in kwargs else self._format_type
format_columns = kwargs["format_columns"] if "format_columns" in kwargs else self._format_columns
output_all_columns = (
kwargs["output_all_columns"] if "output_all_columns" in kwargs else self._output_all_columns
)
format_kwargs = kwargs["format_kwargs"] if "format_kwargs" in kwargs else self._format_kwargs
format_kwargs = format_kwargs if format_kwargs is not None else {}
formatter = get_formatter(format_type, features=self._info.features, **format_kwargs)
pa_subtable = query_table(self._data, key, indices=self._indices)
formatted_output = format_table(
pa_subtable, key, formatter=formatter, format_columns=format_columns, output_all_columns=output_all_columns
)
return formatted_output
@overload
def __getitem__(self, key: Union[int, slice, Iterable[int]]) -> Dict: # noqa: F811
...
@overload
def __getitem__(self, key: str) -> List: # noqa: F811
...
def __getitem__(self, key): # noqa: F811
"""Can be used to index columns (by string names) or rows (by integer index or iterable of indices or bools)."""
return self._getitem(key)
def __getitems__(self, keys: List) -> List:
"""Can be used to get a batch using a list of integers indices."""
batch = self.__getitem__(keys)
n_examples = len(batch[next(iter(batch))])
return [{col: array[i] for col, array in batch.items()} for i in range(n_examples)]
def cleanup_cache_files(self) -> int:
"""Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is
one.
Be careful when running this command that no other process is currently using other cache files.
Returns:
`int`: Number of removed files.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.cleanup_cache_files()
10
```
"""
current_cache_files = [os.path.abspath(cache_file["filename"]) for cache_file in self.cache_files]
if not current_cache_files:
return 0
cache_directory = os.path.dirname(current_cache_files[0])
logger.info(f"Listing files in {cache_directory}")
files: List[str] = os.listdir(cache_directory)
files_to_remove = []
for f_name in files:
full_name = os.path.abspath(os.path.join(cache_directory, f_name))
if f_name.startswith("cache-") and f_name.endswith(".arrow"):
if full_name in current_cache_files:
logger.info(f"Keeping currently used cache file at {full_name}")
continue
files_to_remove.append(full_name)
for file_path in files_to_remove:
logger.info(f"Removing {file_path}")
os.remove(file_path)
return len(files_to_remove)
def _get_cache_file_path(self, fingerprint):
if is_caching_enabled() and self.cache_files:
cache_file_name = "cache-" + fingerprint + ".arrow"
cache_directory = os.path.dirname(self.cache_files[0]["filename"])
else:
cache_file_name = "cache-" + generate_random_fingerprint() + ".arrow"
cache_directory = get_temporary_cache_files_directory()
cache_file_path = os.path.join(cache_directory, cache_file_name)
return cache_file_path
@transmit_format
def map(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
with_rank: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
drop_last_batch: bool = False,
remove_columns: Optional[Union[str, List[str]]] = None,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
features: Optional[Features] = None,
disable_nullable: bool = False,
fn_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
suffix_template: str = "_{rank:05d}_of_{num_proc:05d}",
new_fingerprint: Optional[str] = None,
desc: Optional[str] = None,
) -> "Dataset":
"""
Apply a function to all the examples in the table (individually or in batches) and update the table.
If your function returns a column that already exists, then it overwrites it.
You can specify whether the function should be batched or not with the `batched` parameter:
- If batched is `False`, then the function takes 1 example in and should return 1 example.
An example is a dictionary, e.g. `{"text": "Hello there !"}`.
- If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`.
- If batched is `True` and `batch_size` is `n > 1`, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
Note that the last batch may have less than `n` examples.
A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
Args:
function (`Callable`): Function with one of the following signatures:
- `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False`
- `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
- `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False`
- `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
For advanced usage, the function can also return a `pyarrow.Table`.
Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
If no function is provided, default to identity function: `lambda x: x`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the
signature of `function` should be `def function(example, idx[, rank]): ...`.
with_rank (`bool`, defaults to `False`):
Provide process rank to `function`. Note that in this case the
signature of `function` should be `def function(example[, idx], rank): ...`.
input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`):
The columns to be passed into `function`
as positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`.
If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`.
drop_last_batch (`bool`, defaults to `False`):
Whether a last batch smaller than the batch_size should be
dropped instead of being processed by the function.
remove_columns (`Optional[Union[str, List[str]]]`, defaults to `None`):
Remove a selection of columns while doing the mapping.
Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
columns with names in `remove_columns`, these columns will be kept.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
cache_file_name (`str`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
features (`Optional[datasets.Features]`, defaults to `None`):
Use a specific Features to store the cache file
instead of the automatically generated one.
disable_nullable (`bool`, defaults to `False`):
Disallow null values in the table.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`.
num_proc (`int`, *optional*, defaults to `None`):
Max number of processes when generating cache. Already cached shards are loaded sequentially.
suffix_template (`str`):
If `cache_file_name` is specified, then this suffix
will be added at the end of the base name of each. Defaults to `"_{rank:05d}_of_{num_proc:05d}"`. For example, if `cache_file_name` is "processed.arrow", then for
`rank=1` and `num_proc=4`, the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix.
new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
desc (`str`, *optional*, defaults to `None`):
Meaningful description to be displayed alongside with the progress bar while mapping examples.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> def add_prefix(example):
... example["text"] = "Review: " + example["text"]
... return example
>>> ds = ds.map(add_prefix)
>>> ds[0:3]["text"]
['Review: compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .',
'Review: the soundtrack alone is worth the price of admission .',
'Review: rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .']
# process a batch of examples
>>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True)
# set number of processors
>>> ds = ds.map(add_prefix, num_proc=4)
```
"""
if keep_in_memory and cache_file_name is not None:
raise ValueError("Please use either `keep_in_memory` or `cache_file_name` but not both.")
if num_proc is not None and num_proc <= 0:
raise ValueError("num_proc must be an integer > 0.")
# If the array is empty we do nothing (but we make sure to handle an empty indices mapping and remove the requested columns anyway)
if len(self) == 0:
if self._indices is not None: # empty indices mapping
self = Dataset(
self.data.slice(0, 0),
info=self.info.copy(),
split=self.split,
fingerprint=new_fingerprint,
)
if remove_columns:
return self.remove_columns(remove_columns)
else:
return self
if function is None:
function = lambda x: x # noqa: E731
if isinstance(input_columns, str):
input_columns = [input_columns]
if input_columns is not None:
missing_columns = set(input_columns) - set(self._data.column_names)
if missing_columns:
raise ValueError(
f"Input column {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}"
)
if isinstance(remove_columns, str):
remove_columns = [remove_columns]
if remove_columns is not None:
missing_columns = set(remove_columns) - set(self._data.column_names)
if missing_columns:
raise ValueError(
f"Column to remove {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}"
)
load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
if fn_kwargs is None:
fn_kwargs = {}
if num_proc is not None and num_proc > len(self):
num_proc = len(self)
logger.warning(
f"num_proc must be <= {len(self)}. Reducing num_proc to {num_proc} for dataset of size {len(self)}."
)
dataset_kwargs = {
"shard": self,
"function": function,
"with_indices": with_indices,
"with_rank": with_rank,
"input_columns": input_columns,
"batched": batched,
"batch_size": batch_size,
"drop_last_batch": drop_last_batch,
"remove_columns": remove_columns,
"keep_in_memory": keep_in_memory,
"writer_batch_size": writer_batch_size,
"features": features,
"disable_nullable": disable_nullable,
"fn_kwargs": fn_kwargs,
}
if new_fingerprint is None:
# we create a unique hash from the function,
# current dataset file and the mapping args
transform = format_transform_for_fingerprint(Dataset._map_single)
kwargs_for_fingerprint = format_kwargs_for_fingerprint(Dataset._map_single, (), dataset_kwargs)
kwargs_for_fingerprint["fingerprint_name"] = "new_fingerprint"
new_fingerprint = update_fingerprint(self._fingerprint, transform, kwargs_for_fingerprint)
else:
validate_fingerprint(new_fingerprint)
dataset_kwargs["new_fingerprint"] = new_fingerprint
if self.cache_files:
if cache_file_name is None:
cache_file_name = self._get_cache_file_path(new_fingerprint)
dataset_kwargs["cache_file_name"] = cache_file_name
def load_processed_shard_from_cache(shard_kwargs):
"""Load a processed shard from cache if it exists, otherwise throw an error."""
shard = shard_kwargs["shard"]
# Check if we've already cached this computation (indexed by a hash)
if shard_kwargs["cache_file_name"] is not None:
if os.path.exists(shard_kwargs["cache_file_name"]) and load_from_cache_file:
info = shard.info.copy()
info.features = features
return Dataset.from_file(shard_kwargs["cache_file_name"], info=info, split=shard.split)
raise NonExistentDatasetError
num_shards = num_proc if num_proc is not None else 1
if batched and drop_last_batch:
pbar_total = len(self) // num_shards // batch_size * num_shards * batch_size
else:
pbar_total = len(self)
shards_done = 0
if num_proc is None or num_proc == 1:
transformed_dataset = None
try:
transformed_dataset = load_processed_shard_from_cache(dataset_kwargs)
logger.info(f"Loading cached processed dataset at {dataset_kwargs['cache_file_name']}")
except NonExistentDatasetError:
pass
if transformed_dataset is None:
with hf_tqdm(
unit=" examples",
total=pbar_total,
desc=desc or "Map",
) as pbar:
for rank, done, content in Dataset._map_single(**dataset_kwargs):
if done:
shards_done += 1
logger.debug(f"Finished processing shard number {rank} of {num_shards}.")
transformed_dataset = content
else:
pbar.update(content)
assert transformed_dataset is not None, "Failed to retrieve the result from map"
# update fingerprint if the dataset changed
if transformed_dataset._fingerprint != self._fingerprint:
transformed_dataset._fingerprint = new_fingerprint
return transformed_dataset
else:
def format_cache_file_name(
cache_file_name: Optional[str],
rank: Union[int, Literal["*"]], # noqa: F722
) -> Optional[str]:
if not cache_file_name:
return cache_file_name
sep = cache_file_name.rindex(".")
base_name, extension = cache_file_name[:sep], cache_file_name[sep:]
if isinstance(rank, int):
cache_file_name = base_name + suffix_template.format(rank=rank, num_proc=num_proc) + extension
logger.info(f"Process #{rank} will write at {cache_file_name}")
else:
cache_file_name = (
base_name
+ suffix_template.replace("{rank:05d}", "{rank}").format(rank=rank, num_proc=num_proc)
+ extension
)
return cache_file_name
def format_new_fingerprint(new_fingerprint: str, rank: int) -> str:
new_fingerprint = new_fingerprint + suffix_template.format(rank=rank, num_proc=num_proc)
validate_fingerprint(new_fingerprint)
return new_fingerprint
prev_env = deepcopy(os.environ)
# check if parallelism if off
# from https://github.com/huggingface/tokenizers/blob/bb668bc439dc34389b71dbb8ce0c597f15707b53/tokenizers/src/utils/parallelism.rs#L22
if prev_env.get("TOKENIZERS_PARALLELISM", "false").lower() not in (
"",
"off",
"false",
"f",
"no",
"n",
"0",
):
logger.warning("Setting TOKENIZERS_PARALLELISM=false for forked processes.")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
shards = [
self.shard(num_shards=num_proc, index=rank, contiguous=True, keep_in_memory=keep_in_memory)
for rank in range(num_proc)
]
kwargs_per_job = [
{
**dataset_kwargs,
"shard": shards[rank],
"cache_file_name": format_cache_file_name(cache_file_name, rank),
"rank": rank,
"offset": sum(len(s) for s in shards[:rank]),
"new_fingerprint": format_new_fingerprint(new_fingerprint, rank),
}
for rank in range(num_shards)
]
transformed_shards = [None] * num_shards
for rank in range(num_shards):
try:
transformed_shards[rank] = load_processed_shard_from_cache(kwargs_per_job[rank])
kwargs_per_job[rank] = None
except NonExistentDatasetError:
pass
kwargs_per_job = [kwargs for kwargs in kwargs_per_job if kwargs is not None]
# We try to create a pool with as many workers as dataset not yet cached.
if kwargs_per_job:
if len(kwargs_per_job) < num_shards:
logger.info(
f"Reprocessing {len(kwargs_per_job)}/{num_shards} shards because some of them were missing from the cache."
)
with Pool(len(kwargs_per_job)) as pool:
os.environ = prev_env
logger.info(f"Spawning {num_proc} processes")
with hf_tqdm(
unit=" examples",
total=pbar_total,
desc=(desc or "Map") + f" (num_proc={num_proc})",
) as pbar:
for rank, done, content in iflatmap_unordered(
pool, Dataset._map_single, kwargs_iterable=kwargs_per_job
):
if done:
shards_done += 1
logger.debug(f"Finished processing shard number {rank} of {num_shards}.")
transformed_shards[rank] = content
else:
pbar.update(content)
# Avoids PermissionError on Windows (the error: https://github.com/huggingface/datasets/actions/runs/4026734820/jobs/6921621805)
for kwargs in kwargs_per_job:
del kwargs["shard"]
else:
logger.info(f"Loading cached processed dataset at {format_cache_file_name(cache_file_name, '*')}")
assert None not in transformed_shards, (
f"Failed to retrieve results from map: result list {transformed_shards} still contains None - at least one worker failed to return its results"
)
logger.info(f"Concatenating {num_proc} shards")
result = _concatenate_map_style_datasets(transformed_shards)
# update fingerprint if the dataset changed
if any(
transformed_shard._fingerprint != shard._fingerprint
for transformed_shard, shard in zip(transformed_shards, shards)
):
result._fingerprint = new_fingerprint
else:
result._fingerprint = self._fingerprint
return result
@staticmethod
def _map_single(
shard: "Dataset",
function: Optional[Callable] = None,
with_indices: bool = False,
with_rank: bool = False,
input_columns: Optional[List[str]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
drop_last_batch: bool = False,
remove_columns: Optional[List[str]] = None,
keep_in_memory: bool = False,
cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
features: Optional[Features] = None,
disable_nullable: bool = False,
fn_kwargs: Optional[dict] = None,
new_fingerprint: Optional[str] = None,
rank: Optional[int] = None,
offset: int = 0,
) -> Iterable[Tuple[int, bool, Union[int, "Dataset"]]]:
"""Apply a function to all the elements in the table (individually or in batches)
and update the table (if function does update examples).
Args:
shard (`datasets.Dataset`): Dataset to map the transform on.
function (`Callable`): with one of the following signature:
- `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False`
- `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
- `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False`
- `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
For advanced usage, the function can also return a `pyarrow.Table`.
Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
If no function is provided, default to identity function: lambda x: x
with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
with_rank (`bool`, default `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`.
input_columns (`Optional[List[str]]`, defaults to `None`): The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`): Provide batch of examples to `function`
batch_size (`int`, optional, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`
`batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function`
drop_last_batch (`bool`, default: `False`): Whether a last batch smaller than the batch_size should be
dropped instead of being processed by the function.
remove_columns (`Optional[List[str]]`, defaults to `None`): Remove a selection of columns while doing the mapping.
Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
columns with names in `remove_columns`, these columns will be kept.
keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file.
cache_file_name (`str`, optional, defaults to `None`): Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.
features (`Optional[datasets.Features]`, defaults to `None`): Use a specific Features to store the cache file
instead of the automatically generated one.
disable_nullable (`bool`, defaults to `False`): Disallow null values in the table.
fn_kwargs (`Dict`, optional, defaults to `None`): Keyword arguments to be passed to `function`
new_fingerprint (`str`, optional, defaults to `None`): the new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
rank: (`int`, optional, defaults to `None`): If specified, this is the process rank when doing multiprocessing
offset: (`int`, defaults to 0): If specified, this is an offset applied to the indices passed to `function` if `with_indices=True`.
"""
if fn_kwargs is None:
fn_kwargs = {}
# If we do batch computation but no batch size is provided, default to the full dataset
if batched and (batch_size is None or batch_size <= 0):
batch_size = shard.num_rows
# We set this variable to True after processing the first example/batch in
# `apply_function_on_filtered_inputs` if the map function returns a dict.
# If set to False, no new arrow table will be created
update_data = None
format_kwargs = shard._format_kwargs.copy()
# Lazy formatting is only available for the default format (None/python)
if not input_columns and shard._format_type is None:
format_kwargs["lazy"] = True
input_formatter = get_formatter(
shard._format_type,
features=shard.features,
**format_kwargs,
)
class NumExamplesMismatchError(Exception):
pass
def validate_function_output(processed_inputs, indices):
"""Validate output of the map function."""
allowed_processed_inputs_types = (Mapping, pa.Table, pd.DataFrame)
if config.POLARS_AVAILABLE and "polars" in sys.modules:
import polars as pl
allowed_processed_inputs_types += (pl.DataFrame,)
if processed_inputs is not None and not isinstance(processed_inputs, allowed_processed_inputs_types):
raise TypeError(
f"Provided `function` which is applied to all elements of table returns a variable of type {type(processed_inputs)}. Make sure provided `function` returns a variable of type `dict` (or a pyarrow table) to update the dataset or `None` if you are only interested in side effects."
)
elif isinstance(indices, list) and isinstance(processed_inputs, Mapping):
allowed_batch_return_types = (list, np.ndarray, pd.Series)
if config.POLARS_AVAILABLE and "polars" in sys.modules:
import polars as pl
allowed_batch_return_types += (pl.Series, pl.DataFrame)
if config.TF_AVAILABLE and "tensorflow" in sys.modules:
import tensorflow as tf
allowed_batch_return_types += (tf.Tensor,)
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
allowed_batch_return_types += (torch.Tensor,)
if config.JAX_AVAILABLE and "jax" in sys.modules:
import jax.numpy as jnp
allowed_batch_return_types += (jnp.ndarray,)
all_dict_values_are_lists = all(
isinstance(value, allowed_batch_return_types) for value in processed_inputs.values()
)
if all_dict_values_are_lists is False:
raise TypeError(
f"Provided `function` which is applied to all elements of table returns a `dict` of types {[type(x) for x in processed_inputs.values()]}. When using `batched=True`, make sure provided `function` returns a `dict` of types like `{allowed_batch_return_types}`."
)
def apply_function_on_filtered_inputs(pa_inputs, indices, check_same_num_examples=False, offset=0):
"""Utility to apply the function on a selection of columns."""
nonlocal update_data
inputs = format_table(
pa_inputs,
0 if not batched else range(pa_inputs.num_rows),
format_columns=input_columns,
formatter=input_formatter,
)
fn_args = [inputs] if input_columns is None else [inputs[col] for col in input_columns]
if offset == 0:
effective_indices = indices
else:
effective_indices = [i + offset for i in indices] if isinstance(indices, list) else indices + offset
additional_args = ()
if with_indices:
additional_args += (effective_indices,)
if with_rank:
additional_args += (rank,)
processed_inputs = function(*fn_args, *additional_args, **fn_kwargs)
if isinstance(processed_inputs, LazyDict):
processed_inputs = {
k: v for k, v in processed_inputs.data.items() if k not in processed_inputs.keys_to_format
}
returned_lazy_dict = True
else:
returned_lazy_dict = False
if update_data is None:
# Check if the function returns updated examples
updatable_types = (Mapping, pa.Table, pd.DataFrame)
if config.POLARS_AVAILABLE and "polars" in sys.modules:
import polars as pl
updatable_types += (pl.DataFrame,)
update_data = isinstance(processed_inputs, updatable_types)
validate_function_output(processed_inputs, indices)
if not update_data:
return None # Nothing to update, let's move on
if shard._format_type or input_columns:
# TODO(QL, MS): ideally the behavior should be the same even if the dataset is formatted (may require major release)
inputs_to_merge = dict(zip(pa_inputs.column_names, pa_inputs.itercolumns()))
elif isinstance(inputs, LazyDict):
inputs_to_merge = {
k: (v if k not in inputs.keys_to_format else pa_inputs[k]) for k, v in inputs.data.items()
}
else:
inputs_to_merge = inputs
if remove_columns is not None:
for column in remove_columns:
# `function` can modify input in-place causing column to be already removed.
if column in inputs_to_merge:
inputs_to_merge.pop(column)
if returned_lazy_dict and column in processed_inputs:
processed_inputs.pop(column)
if check_same_num_examples:
input_num_examples = len(pa_inputs)
processed_inputs_num_examples = len(processed_inputs[next(iter(processed_inputs.keys()))])
if input_num_examples != processed_inputs_num_examples:
raise NumExamplesMismatchError()
if isinstance(inputs, Mapping) and isinstance(processed_inputs, Mapping):
# The .map() transform *updates* the dataset:
# the output dictionary contains both the the input data and the output data.
# The output dictionary may contain Arrow values from `inputs_to_merge` so that we can re-write them efficiently.
return {**inputs_to_merge, **processed_inputs}
else:
return processed_inputs
def init_buffer_and_writer():
# Prepare output buffer and batched writer in memory or on file if we update the table
writer_features = features
if writer_features is None:
writer_features = shard.features
update_features = True
else:
update_features = False
if keep_in_memory or cache_file_name is None:
buf_writer = pa.BufferOutputStream()
tmp_file = None
writer = ArrowWriter(
features=writer_features,
stream=buf_writer,
writer_batch_size=writer_batch_size,
update_features=update_features,
fingerprint=new_fingerprint,
disable_nullable=disable_nullable,
)
else:
buf_writer = None
logger.info(f"Caching processed dataset at {cache_file_name}")
cache_dir = os.path.dirname(cache_file_name)
os.makedirs(cache_dir, exist_ok=True)
tmp_file = tempfile.NamedTemporaryFile("wb", dir=cache_dir, delete=False)
writer = ArrowWriter(
features=writer_features,
path=tmp_file.name,
writer_batch_size=writer_batch_size,
update_features=update_features,
fingerprint=new_fingerprint,
disable_nullable=disable_nullable,
)
return buf_writer, writer, tmp_file
num_examples_progress_update = 0
# If `update_data` is True after processing the first example/batch, initalize these resources with `init_buffer_and_writer`
buf_writer, writer, tmp_file = None, None, None
# Check if Polars is available and import it if so
if config.POLARS_AVAILABLE and "polars" in sys.modules:
import polars as pl
# Optionally initialize the writer as a context manager
with contextlib.ExitStack() as stack:
try:
arrow_formatted_shard = shard.with_format("arrow")
# Loop over single examples or batches and write to buffer/file if examples are to be updated
if not batched:
shard_iterable = enumerate(arrow_formatted_shard)
else:
num_rows = len(shard) if not drop_last_batch else len(shard) // batch_size * batch_size
shard_iterable = zip(
range(0, num_rows, batch_size),
arrow_formatted_shard.iter(batch_size, drop_last_batch=drop_last_batch),
)
if not batched:
_time = time.time()
for i, example in shard_iterable:
example = apply_function_on_filtered_inputs(example, i, offset=offset)
if update_data:
if i == 0:
buf_writer, writer, tmp_file = init_buffer_and_writer()
stack.enter_context(writer)
if isinstance(example, pa.Table):
writer.write_row(example)
elif isinstance(example, pd.DataFrame):
writer.write_row(pa.Table.from_pandas(example))
elif (
config.POLARS_AVAILABLE
and "polars" in sys.modules
and isinstance(example, pl.DataFrame)
):
writer.write_row(example.to_arrow())
else:
writer.write(example)
num_examples_progress_update += 1
if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
_time = time.time()
yield rank, False, num_examples_progress_update
num_examples_progress_update = 0
else:
_time = time.time()
for i, batch in shard_iterable:
num_examples_in_batch = len(batch)
indices = list(
range(*(slice(i, i + batch_size).indices(shard.num_rows)))
) # Something simpler?
try:
batch = apply_function_on_filtered_inputs(
batch,
indices,
check_same_num_examples=len(shard.list_indexes()) > 0,
offset=offset,
)
except NumExamplesMismatchError:
raise DatasetTransformationNotAllowedError(
"Using `.map` in batched mode on a dataset with attached indexes is allowed only if it doesn't create or remove existing examples. You can first run `.drop_index() to remove your index and then re-add it."
) from None
if update_data:
if i == 0:
buf_writer, writer, tmp_file = init_buffer_and_writer()
stack.enter_context(writer)
if isinstance(batch, pa.Table):
writer.write_table(batch)
elif isinstance(batch, pd.DataFrame):
writer.write_table(pa.Table.from_pandas(batch))
elif (
config.POLARS_AVAILABLE and "polars" in sys.modules and isinstance(batch, pl.DataFrame)
):
writer.write_table(batch.to_arrow())
else:
writer.write_batch(batch)
num_examples_progress_update += num_examples_in_batch
if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
_time = time.time()
yield rank, False, num_examples_progress_update
num_examples_progress_update = 0
if update_data and writer is not None:
writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file
except (Exception, KeyboardInterrupt):
yield rank, False, num_examples_progress_update
if update_data:
if writer is not None:
writer.finalize()
if tmp_file is not None:
tmp_file.close()
if os.path.exists(tmp_file.name):
os.remove(tmp_file.name)
raise
yield rank, False, num_examples_progress_update
if update_data and tmp_file is not None:
tmp_file.close()
shutil.move(tmp_file.name, cache_file_name)
umask = os.umask(0o666)
os.umask(umask)
os.chmod(cache_file_name, 0o666 & ~umask)
if update_data:
# Create new Dataset from buffer or file
info = shard.info.copy()
info.features = writer._features
if buf_writer is None:
yield rank, True, Dataset.from_file(cache_file_name, info=info, split=shard.split)
else:
yield rank, True, Dataset.from_buffer(buf_writer.getvalue(), info=info, split=shard.split)
else:
yield rank, True, shard
@transmit_format
@fingerprint_transform(inplace=False)
def batch(
self,
batch_size: int,
drop_last_batch: bool = False,
num_proc: Optional[int] = None,
new_fingerprint: Optional[str] = None,
) -> "Dataset":
"""
Group samples from the dataset into batches.
Args:
batch_size (`int`):
The number of samples in each batch.
drop_last_batch (`bool`, defaults to `False`):
Whether to drop the last incomplete batch.
num_proc (`int`, *optional*, defaults to `None`):
Max number of processes when generating cache. Already cached shards are loaded sequentially.
new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
Returns:
[`Dataset`]: A new Dataset where each item is a batch of multiple samples from the original dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> batched_ds = ds.batch(batch_size=4)
>>> batched_ds[0]
{'text': ['compassionately explores the seemingly irreconcilable situation...', ...], # 4 items
'label': [1, 1, 1, 1]}
```
"""
def batch_fn(example):
return {k: [v] for k, v in example.items()}
return self.map(
batch_fn,
batched=True,
batch_size=batch_size,
drop_last_batch=drop_last_batch,
num_proc=num_proc,
new_fingerprint=new_fingerprint,
desc="Batching examples",
)
@transmit_format
@fingerprint_transform(
inplace=False, ignore_kwargs=["load_from_cache_file", "cache_file_name", "desc"], version="2.0.1"
)
def filter(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
with_rank: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
fn_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
suffix_template: str = "_{rank:05d}_of_{num_proc:05d}",
new_fingerprint: Optional[str] = None,
desc: Optional[str] = None,
) -> "Dataset":
"""Apply a filter function to all the elements in the table in batches
and update the table so that the dataset only includes examples according to the filter function.
Args:
function (`Callable`): Callable with one of the following signatures:
- `function(example: Dict[str, Any]) -> bool` if `batched=False` and `with_indices=False` and `with_rank=False`
- `function(example: Dict[str, Any], *extra_args) -> bool` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
- `function(batch: Dict[str, List]) -> List[bool]` if `batched=True` and `with_indices=False` and `with_rank=False`
- `function(batch: Dict[str, List], *extra_args) -> List[bool]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
If no function is provided, defaults to an always `True` function: `lambda x: True`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the
signature of `function` should be `def function(example, idx[, rank]): ...`.
with_rank (`bool`, defaults to `False`):
Provide process rank to `function`. Note that in this case the
signature of `function` should be `def function(example[, idx], rank): ...`.
input_columns (`str` or `List[str]`, *optional*):
The columns to be passed into `function` as
positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if
`batched = True`. If `batched = False`, one example per batch is passed to `function`.
If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
cache_file_name (`str`, *optional*):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
fn_kwargs (`dict`, *optional*):
Keyword arguments to be passed to `function`.
num_proc (`int`, *optional*):
Number of processes for multiprocessing. By default it doesn't
use multiprocessing.
suffix_template (`str`):
If `cache_file_name` is specified, then this suffix will be added at the end of the base name of each.
For example, if `cache_file_name` is `"processed.arrow"`, then for `rank = 1` and `num_proc = 4`,
the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix (default
`_{rank:05d}_of_{num_proc:05d}`).
new_fingerprint (`str`, *optional*):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
desc (`str`, *optional*, defaults to `None`):
Meaningful description to be displayed alongside with the progress bar while filtering examples.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.filter(lambda x: x["label"] == 1)
Dataset({
features: ['text', 'label'],
num_rows: 533
})
```
"""
if len(self.list_indexes()) > 0:
raise DatasetTransformationNotAllowedError(
"Using `.filter` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.`"
)
if function is None:
function = lambda x: True # noqa: E731
if len(self) == 0:
return self
indices = self.map(
function=partial(
get_indices_from_mask_function,
function,
batched,
with_indices,
with_rank,
input_columns,
self._indices,
),
with_indices=True,
with_rank=True,
features=Features({"indices": Value("uint64")}),
batched=True,
batch_size=batch_size,
remove_columns=self.column_names,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
cache_file_name=cache_file_name,
writer_batch_size=writer_batch_size,
fn_kwargs=fn_kwargs,
num_proc=num_proc,
suffix_template=suffix_template,
new_fingerprint=new_fingerprint,
input_columns=input_columns,
desc=desc or "Filter",
)
new_dataset = copy.deepcopy(self)
new_dataset._indices = indices.data
new_dataset._fingerprint = new_fingerprint
return new_dataset
@transmit_format
@fingerprint_transform(inplace=False, ignore_kwargs=["cache_file_name"])
def flatten_indices(
self,
keep_in_memory: bool = False,
cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
features: Optional[Features] = None,
disable_nullable: bool = False,
num_proc: Optional[int] = None,
new_fingerprint: Optional[str] = None,
) -> "Dataset":
"""Create and cache a new Dataset by flattening the indices mapping.
Args:
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
cache_file_name (`str`, *optional*, default `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
features (`Optional[datasets.Features]`, defaults to `None`):
Use a specific [`Features`] to store the cache file
instead of the automatically generated one.
disable_nullable (`bool`, defaults to `False`):
Allow null values in the table.
num_proc (`int`, optional, default `None`):
Max number of processes when generating cache. Already cached shards are loaded sequentially
new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
"""
return self.map(
batched=True, # for speed
keep_in_memory=keep_in_memory,
cache_file_name=cache_file_name,
writer_batch_size=writer_batch_size,
features=features,
disable_nullable=disable_nullable,
new_fingerprint=new_fingerprint,
desc="Flattening the indices",
num_proc=num_proc,
)
def _new_dataset_with_indices(
self,
indices_cache_file_name: Optional[str] = None,
indices_buffer: Optional[pa.Buffer] = None,
fingerprint: Optional[str] = None,
) -> "Dataset":
"""Return a new Dataset obtained by adding indices (provided in indices_cache_file_name or in a buffer) to the
current Dataset.
"""
if indices_cache_file_name is None and indices_buffer is None:
raise ValueError("At least one of indices_cache_file_name or indices_buffer must be provided.")
if fingerprint is None:
raise ValueError("please specify a fingerprint for the dataset with indices")
if indices_cache_file_name is not None:
indices_table = MemoryMappedTable.from_file(indices_cache_file_name)
else:
indices_table = InMemoryTable.from_buffer(indices_buffer)
# Return new Dataset object
# don't forget to copy the objects
return Dataset(
self._data,
info=self.info.copy(),
split=self.split,
indices_table=indices_table,
fingerprint=fingerprint,
)
@transmit_format
@fingerprint_transform(inplace=False, ignore_kwargs=["indices_cache_file_name"])
def select(
self,
indices: Iterable,
keep_in_memory: bool = False,
indices_cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
new_fingerprint: Optional[str] = None,
) -> "Dataset":
"""Create a new dataset with rows selected following the list/array of indices.
Args:
indices (`range`, `list`, `iterable`, `ndarray` or `Series`):
Range, list or 1D-array of integer indices for indexing.
If the indices correspond to a contiguous range, the Arrow table is simply sliced.
However passing a list of indices that are not contiguous creates indices mapping, which is much less efficient,
but still faster than recreating an Arrow table made of the requested rows.
keep_in_memory (`bool`, defaults to `False`):
Keep the indices mapping in memory instead of writing it to a cache file.
indices_cache_file_name (`str`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
indices mapping instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.select(range(4))
Dataset({
features: ['text', 'label'],
num_rows: 4
})
```
"""
if keep_in_memory and indices_cache_file_name is not None:
raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.")
if len(self.list_indexes()) > 0:
raise DatasetTransformationNotAllowedError(
"Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
)
# If the array is empty we do nothing
if len(self) == 0:
return self
# If indices is a PyArrow array, we convert to NumPy
if isinstance(indices, (pa.Array, pa.ChunkedArray)):
indices = indices.to_numpy().astype(np.int64)
# Convert generator objects to lists
if isinstance(indices, Iterator):
indices = list(indices)
# If the indices are contiguous, simply slice the arrow table
if isinstance(indices, range):
if _is_range_contiguous(indices) and indices.start >= 0:
start, length = indices.start, indices.stop - indices.start
return self._select_contiguous(start, length, new_fingerprint=new_fingerprint)
else:
try:
start = next(iter(indices))
except StopIteration:
# if `indices` is an empty iterable, we return an empty dataset
return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint)
if start >= 0:
counter_from_start = itertools.count(start=start)
if all(i == j for i, j in zip(indices, counter_from_start)):
length = next(counter_from_start) - start
return self._select_contiguous(start, length, new_fingerprint=new_fingerprint)
# If not contiguous, we need to create a new indices mapping
return self._select_with_indices_mapping(
indices,
keep_in_memory=keep_in_memory,
indices_cache_file_name=indices_cache_file_name,
writer_batch_size=writer_batch_size,
new_fingerprint=new_fingerprint,
)
@transmit_format
@fingerprint_transform(inplace=False)
def _select_contiguous(
self,
start: int,
length: int,
new_fingerprint: Optional[str] = None,
) -> "Dataset":
"""Create a new dataset with rows from a contiguous slice of data.
The slice is defined by that start index and its length.
Args:
start (`int`): start index.
length (`int`): length of the slice to select.
new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds._select_contiguous(0, 4)
Dataset({
features: ['text', 'label'],
num_rows: 4
})
```
"""
if len(self.list_indexes()) > 0:
raise DatasetTransformationNotAllowedError(
"Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
)
# If the array is empty we do nothing
if len(self) == 0:
return self
_check_valid_indices_value(start, len(self))
_check_valid_indices_value(start + length - 1, len(self))
if self._indices is None or length == 0:
return Dataset(
self.data.slice(start, length),
info=self.info.copy(),
split=self.split,
fingerprint=new_fingerprint,
)
else:
return Dataset(
self.data,
info=self.info.copy(),
split=self.split,
indices_table=self._indices.slice(start, length),
fingerprint=new_fingerprint,
)
@transmit_format
@fingerprint_transform(inplace=False, ignore_kwargs=["indices_cache_file_name"])
def _select_with_indices_mapping(
self,
indices: Iterable,
keep_in_memory: bool = False,
indices_cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
new_fingerprint: Optional[str] = None,
) -> "Dataset":
"""Create a new dataset with rows selected following the list/array of indices.
The new dataset is made by creating a new indices mapping on top of the main arrow table.
Args:
indices (sequence, iterable, range, ndarray or Series): List or 1D-array of integer indices for indexing.
keep_in_memory (`bool`, default `False`): Keep the indices mapping in memory instead of writing it to a cache file.
indices_cache_file_name (`str`, optional, default `None`): Provide the name of a path for the cache file. It is used to store the
indices mapping instead of the automatically generated cache file name.
writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.
new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds._select_with_indices_mapping(range(4))
Dataset({
features: ['text', 'label'],
num_rows: 4
})
```
"""
if keep_in_memory and indices_cache_file_name is not None:
raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.")
if len(self.list_indexes()) > 0:
raise DatasetTransformationNotAllowedError(
"Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
)
# If the array is empty we do nothing
if len(self) == 0:
return self
# Prepare the writer for our indices arrow table
if keep_in_memory or indices_cache_file_name is None:
buf_writer = pa.BufferOutputStream()
tmp_file = None
writer = ArrowWriter(
stream=buf_writer, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices"
)
else:
buf_writer = None
logger.info(f"Caching indices mapping at {indices_cache_file_name}")
cache_dir = os.path.dirname(indices_cache_file_name)
os.makedirs(cache_dir, exist_ok=True)
tmp_file = tempfile.NamedTemporaryFile("wb", dir=cache_dir, delete=False)
writer = ArrowWriter(
path=tmp_file.name, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices"
)
indices = indices if isinstance(indices, list) else list(indices)
size = len(self)
if indices:
_check_valid_indices_value(int(max(indices)), size=size)
_check_valid_indices_value(int(min(indices)), size=size)
else:
return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint)
indices_array = pa.array(indices, type=pa.uint64())
# Check if we need to convert indices
if self._indices is not None:
indices_array = self._indices.column(0).take(indices_array)
indices_table = pa.Table.from_arrays([indices_array], names=["indices"])
with writer:
try:
writer.write_table(indices_table)
writer.finalize() # close_stream=bool(buf_writer is None)) We only close if we are writing in a file
except (Exception, KeyboardInterrupt):
if tmp_file is not None:
tmp_file.close()
if os.path.exists(tmp_file.name):
os.remove(tmp_file.name)
raise
if tmp_file is not None:
tmp_file.close()
shutil.move(tmp_file.name, indices_cache_file_name)
umask = os.umask(0o666)
os.umask(umask)
os.chmod(indices_cache_file_name, 0o666 & ~umask)
# Return new Dataset object
if buf_writer is None:
return self._new_dataset_with_indices(
indices_cache_file_name=indices_cache_file_name, fingerprint=new_fingerprint
)
else:
return self._new_dataset_with_indices(indices_buffer=buf_writer.getvalue(), fingerprint=new_fingerprint)
def skip(self, n: int) -> "Dataset":
"""
Create a new [`Dataset`] that skips the first `n` elements.
Args:
n (`int`):
Number of elements to skip.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> list(ds.take(3))
[{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
{'label': 1, 'text': 'effective but too-tepid biopic'}]
>>> ds = ds.skip(1)
>>> list(ds.take(3))
[{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
{'label': 1, 'text': 'effective but too-tepid biopic'},
{'label': 1,
'text': 'if you sometimes like to go to the movies to have fun , wasabi is a good place to start .'}]
```
"""
return self.select(range(n, len(self)))
def take(self, n: int) -> "Dataset":
"""
Create a new [`Dataset`] with only the first `n` elements.
Args:
n (`int`):
Number of elements to take.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> small_ds = ds.take(2)
>>> list(small_ds)
[{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}]
```
"""
return self.select(range(n))
@transmit_format
@fingerprint_transform(inplace=False, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"])
def sort(
self,
column_names: Union[str, Sequence_[str]],
reverse: Union[bool, Sequence_[bool]] = False,
null_placement: str = "at_end",
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
indices_cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
new_fingerprint: Optional[str] = None,
) -> "Dataset":
"""Create a new dataset sorted according to a single or multiple columns.
Args:
column_names (`Union[str, Sequence[str]]`):
Column name(s) to sort by.
reverse (`Union[bool, Sequence[bool]]`, defaults to `False`):
If `True`, sort by descending order rather than ascending. If a single bool is provided,
the value is applied to the sorting of all column names. Otherwise a list of bools with the
same length and order as column_names must be provided.
null_placement (`str`, defaults to `at_end`):
Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last`
<Added version="1.14.2"/>
keep_in_memory (`bool`, defaults to `False`):
Keep the sorted indices in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the sorted indices
can be identified, use it instead of recomputing.
indices_cache_file_name (`str`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
sorted indices instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
Higher value gives smaller cache files, lower value consume less temporary memory.
new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset('rotten_tomatoes', split='validation')
>>> ds['label'][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
>>> sorted_ds = ds.sort('label')
>>> sorted_ds['label'][:10]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
>>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False])
>>> another_sorted_ds['label'][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
```
"""
if len(self.list_indexes()) > 0:
raise DatasetTransformationNotAllowedError(
"Using `.sort` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
)
# If the array is empty we do nothing
if len(self) == 0:
return self
# Check proper format of and for duplicates in column_names
if isinstance(column_names, str):
column_names = [column_names]
# Check proper format and length of reverse
if not isinstance(reverse, bool):
if len(reverse) != len(column_names):
raise ValueError(
"Parameter 'reverse' should be either a boolean or a list of booleans with the same length as 'column_names'."
)
else:
reverse = [reverse] * len(column_names)
# Check whether column name(s) exist in dataset
for column in column_names:
if not isinstance(column, str) or column not in self._data.column_names:
raise ValueError(
f"Column '{column}' not found in the dataset. Please provide a column selected in: {self._data.column_names}"
)
# Change null_placement to conform to pyarrow's sort_indices() while ensuring backwards compatability
if null_placement not in ["at_start", "at_end"]:
if null_placement == "first":
null_placement = "at_start"
elif null_placement == "last":
null_placement = "at_end"
else:
raise ValueError(
f"null_placement '{null_placement}' is an invalid parameter value. Must be either 'last', 'at_end', 'first' or 'at_start'."
)
load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
# Check if we've already cached this computation (indexed by a hash)
if self.cache_files:
if indices_cache_file_name is None:
# we create a unique hash from the function, current dataset file and the mapping args
indices_cache_file_name = self._get_cache_file_path(new_fingerprint)
if os.path.exists(indices_cache_file_name) and load_from_cache_file:
logger.info(f"Loading cached sorted indices for dataset at {indices_cache_file_name}")
return self._new_dataset_with_indices(
fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name
)
sort_table = query_table(
table=self._data,
key=slice(0, len(self)),
indices=self._indices,
)
sort_keys = [
(col, "ascending" if not col_reverse else "descending") for col, col_reverse in zip(column_names, reverse)
]
indices = pc.sort_indices(sort_table, sort_keys=sort_keys, null_placement=null_placement)
return self.select(
indices=indices,
keep_in_memory=keep_in_memory,
indices_cache_file_name=indices_cache_file_name,
writer_batch_size=writer_batch_size,
new_fingerprint=new_fingerprint,
)
@transmit_format
@fingerprint_transform(
inplace=False, randomized_function=True, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"]
)
def shuffle(
self,
seed: Optional[int] = None,
generator: Optional[np.random.Generator] = None,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
indices_cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
new_fingerprint: Optional[str] = None,
) -> "Dataset":
"""Create a new Dataset where the rows are shuffled.
Currently shuffling uses numpy random generators.
You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64).
Shuffling takes the list of indices `[0:len(my_dataset)]` and shuffles it to create an indices mapping.
However as soon as your [`Dataset`] has an indices mapping, the speed can become 10x slower.
This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore.
To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping.
This may take a lot of time depending of the size of your dataset though:
```python
my_dataset[0] # fast
my_dataset = my_dataset.shuffle(seed=42)
my_dataset[0] # up to 10x slower
my_dataset = my_dataset.flatten_indices() # rewrite the shuffled dataset on disk as contiguous chunks of data
my_dataset[0] # fast again
```
In this case, we recommend switching to an [`IterableDataset`] and leveraging its fast approximate shuffling method [`IterableDataset.shuffle`].
It only shuffles the shards order and adds a shuffle buffer to your dataset, which keeps the speed of your dataset optimal:
```python
my_iterable_dataset = my_dataset.to_iterable_dataset(num_shards=128)
for example in enumerate(my_iterable_dataset): # fast
pass
shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100)
for example in enumerate(shuffled_iterable_dataset): # as fast as before
pass
```
Args:
seed (`int`, *optional*):
A seed to initialize the default BitGenerator if `generator=None`.
If `None`, then fresh, unpredictable entropy will be pulled from the OS.
If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
generator (`numpy.random.Generator`, *optional*):
Numpy random Generator to use to compute the permutation of the dataset rows.
If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
keep_in_memory (`bool`, default `False`):
Keep the shuffled indices in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the shuffled indices
can be identified, use it instead of recomputing.
indices_cache_file_name (`str`, *optional*):
Provide the name of a path for the cache file. It is used to store the
shuffled indices instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds['label'][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# set a seed
>>> shuffled_ds = ds.shuffle(seed=42)
>>> shuffled_ds['label'][:10]
[1, 0, 1, 1, 0, 0, 0, 0, 0, 0]
```
"""
if len(self.list_indexes()) > 0:
raise DatasetTransformationNotAllowedError(
"Using `.shuffle` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
)
# If the array is empty we do nothing
if len(self) == 0:
return self
if keep_in_memory and indices_cache_file_name is not None:
raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.")
if seed is not None and generator is not None:
raise ValueError("Both `seed` and `generator` were provided. Please specify just one of them.")
if generator is not None and not isinstance(generator, np.random.Generator):
raise ValueError("The provided generator must be an instance of numpy.random.Generator")
load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
if generator is None:
if seed is None:
_, seed, pos, *_ = np.random.get_state()
seed = seed[pos] if pos < 624 else seed[0]
_ = np.random.random() # do 1 step of rng
generator = np.random.default_rng(seed)
# Check if we've already cached this computation (indexed by a hash)
if self.cache_files:
if indices_cache_file_name is None:
# we create a unique hash from the function, current dataset file and the mapping args
indices_cache_file_name = self._get_cache_file_path(new_fingerprint)
if os.path.exists(indices_cache_file_name) and load_from_cache_file:
logger.info(f"Loading cached shuffled indices for dataset at {indices_cache_file_name}")
return self._new_dataset_with_indices(
fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name
)
permutation = generator.permutation(len(self))
return self.select(
indices=permutation,
keep_in_memory=keep_in_memory,
indices_cache_file_name=indices_cache_file_name if not keep_in_memory else None,
writer_batch_size=writer_batch_size,
new_fingerprint=new_fingerprint,
)
@transmit_format
@fingerprint_transform(
inplace=False,
randomized_function=True,
fingerprint_names=["train_new_fingerprint", "test_new_fingerprint"],
ignore_kwargs=["load_from_cache_file", "train_indices_cache_file_name", "test_indices_cache_file_name"],
)
def train_test_split(
self,
test_size: Union[float, int, None] = None,
train_size: Union[float, int, None] = None,
shuffle: bool = True,
stratify_by_column: Optional[str] = None,
seed: Optional[int] = None,
generator: Optional[np.random.Generator] = None,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
train_indices_cache_file_name: Optional[str] = None,
test_indices_cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
train_new_fingerprint: Optional[str] = None,
test_new_fingerprint: Optional[str] = None,
) -> "DatasetDict":
"""Return a dictionary ([`datasets.DatasetDict`]) with two random train and test subsets (`train` and `test` `Dataset` splits).
Splits are created from the dataset according to `test_size`, `train_size` and `shuffle`.
This method is similar to scikit-learn `train_test_split`.
Args:
test_size (`numpy.random.Generator`, *optional*):
Size of the test split
If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the test split.
If `int`, represents the absolute number of test samples.
If `None`, the value is set to the complement of the train size.
If `train_size` is also `None`, it will be set to `0.25`.
train_size (`numpy.random.Generator`, *optional*):
Size of the train split
If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the train split.
If `int`, represents the absolute number of train samples.
If `None`, the value is automatically set to the complement of the test size.
shuffle (`bool`, *optional*, defaults to `True`):
Whether or not to shuffle the data before splitting.
stratify_by_column (`str`, *optional*, defaults to `None`):
The column name of labels to be used to perform stratified split of data.
seed (`int`, *optional*):
A seed to initialize the default BitGenerator if `generator=None`.
If `None`, then fresh, unpredictable entropy will be pulled from the OS.
If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
generator (`numpy.random.Generator`, *optional*):
Numpy random Generator to use to compute the permutation of the dataset rows.
If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
keep_in_memory (`bool`, defaults to `False`):
Keep the splits indices in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the splits indices
can be identified, use it instead of recomputing.
train_cache_file_name (`str`, *optional*):
Provide the name of a path for the cache file. It is used to store the
train split indices instead of the automatically generated cache file name.
test_cache_file_name (`str`, *optional*):
Provide the name of a path for the cache file. It is used to store the
test split indices instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
train_new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the train set after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
test_new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the test set after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds = ds.train_test_split(test_size=0.2, shuffle=True)
DatasetDict({
train: Dataset({
features: ['text', 'label'],
num_rows: 852
})
test: Dataset({
features: ['text', 'label'],
num_rows: 214
})
})
# set a seed
>>> ds = ds.train_test_split(test_size=0.2, seed=42)
# stratified split
>>> ds = load_dataset("imdb",split="train")
Dataset({
features: ['text', 'label'],
num_rows: 25000
})
>>> ds = ds.train_test_split(test_size=0.2, stratify_by_column="label")
DatasetDict({
train: Dataset({
features: ['text', 'label'],
num_rows: 20000
})
test: Dataset({
features: ['text', 'label'],
num_rows: 5000
})
})
```
"""
from .dataset_dict import DatasetDict # import here because of circular dependency
if len(self.list_indexes()) > 0:
raise DatasetTransformationNotAllowedError(
"Using `.train_test_split` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
)
# If the array is empty we do nothing
if len(self) == 0:
return DatasetDict({"train": self, "test": self})
if test_size is None and train_size is None:
test_size = 0.25
# Safety checks similar to scikit-learn's ones.
# (adapted from https://github.com/scikit-learn/scikit-learn/blob/fd237278e895b42abe8d8d09105cbb82dc2cbba7/sklearn/model_selection/_split.py#L1750)
n_samples = len(self)
if (
isinstance(test_size, int)
and (test_size >= n_samples or test_size <= 0)
or isinstance(test_size, float)
and (test_size <= 0 or test_size >= 1)
):
raise ValueError(
f"test_size={test_size} should be either positive and smaller "
f"than the number of samples {n_samples} or a float in the (0, 1) range"
)
if (
isinstance(train_size, int)
and (train_size >= n_samples or train_size <= 0)
or isinstance(train_size, float)
and (train_size <= 0 or train_size >= 1)
):
raise ValueError(
f"train_size={train_size} should be either positive and smaller "
f"than the number of samples {n_samples} or a float in the (0, 1) range"
)
if train_size is not None and not isinstance(train_size, (int, float)):
raise ValueError(f"Invalid value for train_size: {train_size} of type {type(train_size)}")
if test_size is not None and not isinstance(test_size, (int, float)):
raise ValueError(f"Invalid value for test_size: {test_size} of type {type(test_size)}")
if isinstance(train_size, float) and isinstance(test_size, float) and train_size + test_size > 1:
raise ValueError(
f"The sum of test_size and train_size = {train_size + test_size}, should be in the (0, 1)"
" range. Reduce test_size and/or train_size."
)
if isinstance(test_size, float):
n_test = ceil(test_size * n_samples)
elif isinstance(test_size, int):
n_test = float(test_size)
if isinstance(train_size, float):
n_train = floor(train_size * n_samples)
elif isinstance(train_size, int):
n_train = float(train_size)
if train_size is None:
n_train = n_samples - n_test
elif test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError(
f"The sum of train_size and test_size = {n_train + n_test}, "
"should be smaller than the number of "
f"samples {n_samples}. Reduce test_size and/or "
"train_size."
)
n_train, n_test = int(n_train), int(n_test)
if n_train == 0:
raise ValueError(
f"With n_samples={n_samples}, test_size={test_size} and train_size={train_size}, the "
"resulting train set will be empty. Adjust any of the "
"aforementioned parameters."
)
load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
if generator is None and shuffle is True:
if seed is None:
_, seed, pos, *_ = np.random.get_state()
seed = seed[pos] if pos < 624 else seed[0]
_ = np.random.random() # do 1 step of rng
generator = np.random.default_rng(seed)
# Check if we've already cached this computation (indexed by a hash)
if self.cache_files:
if train_indices_cache_file_name is None or test_indices_cache_file_name is None:
# we create a unique hash from the function, current dataset file and the mapping args
if train_indices_cache_file_name is None:
train_indices_cache_file_name = self._get_cache_file_path(train_new_fingerprint)
if test_indices_cache_file_name is None:
test_indices_cache_file_name = self._get_cache_file_path(test_new_fingerprint)
if (
os.path.exists(train_indices_cache_file_name)
and os.path.exists(test_indices_cache_file_name)
and load_from_cache_file
):
logger.info(
f"Loading cached split indices for dataset at {train_indices_cache_file_name} and {test_indices_cache_file_name}"
)
return DatasetDict(
{
"train": self._new_dataset_with_indices(
fingerprint=train_new_fingerprint, indices_cache_file_name=train_indices_cache_file_name
),
"test": self._new_dataset_with_indices(
fingerprint=test_new_fingerprint, indices_cache_file_name=test_indices_cache_file_name
),
}
)
if not shuffle:
if stratify_by_column is not None:
raise ValueError("Stratified train/test split is not implemented for `shuffle=False`")
train_indices = np.arange(n_train)
test_indices = np.arange(n_train, n_train + n_test)
else:
# stratified partition
if stratify_by_column is not None:
if stratify_by_column not in self._info.features.keys():
raise ValueError(f"Key {stratify_by_column} not found in {self._info.features.keys()}")
if not isinstance(self._info.features[stratify_by_column], ClassLabel):
raise ValueError(
f"Stratifying by column is only supported for {ClassLabel.__name__} column, and column {stratify_by_column} is {type(self._info.features[stratify_by_column]).__name__}."
)
try:
train_indices, test_indices = next(
stratified_shuffle_split_generate_indices(
self.with_format("numpy")[stratify_by_column], n_train, n_test, rng=generator
)
)
except Exception as error:
if str(error) == "Minimum class count error":
raise ValueError(
f"The least populated class in {stratify_by_column} column has only 1"
" member, which is too few. The minimum"
" number of groups for any class cannot"
" be less than 2."
)
else:
raise error
# random partition
else:
permutation = generator.permutation(len(self))
test_indices = permutation[:n_test]
train_indices = permutation[n_test : (n_test + n_train)]
train_split = self.select(
indices=train_indices,
keep_in_memory=keep_in_memory,
indices_cache_file_name=train_indices_cache_file_name,
writer_batch_size=writer_batch_size,
new_fingerprint=train_new_fingerprint,
)
test_split = self.select(
indices=test_indices,
keep_in_memory=keep_in_memory,
indices_cache_file_name=test_indices_cache_file_name,
writer_batch_size=writer_batch_size,
new_fingerprint=test_new_fingerprint,
)
return DatasetDict({"train": train_split, "test": test_split})
def shard(
self,
num_shards: int,
index: int,
contiguous: bool = True,
keep_in_memory: bool = False,
indices_cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
) -> "Dataset":
"""Return the `index`-nth shard from dataset split into `num_shards` pieces.
This shards deterministically. `dataset.shard(n, i)` splits the dataset into contiguous chunks,
so it can be easily concatenated back together after processing. If `len(dataset) % n == l`, then the
first `l` dataset each have length `(len(dataset) // n) + 1`, and the remaining dataset have length `(len(dataset) // n)`.
`datasets.concatenate_datasets([dset.shard(n, i) for i in range(n)])` returns a dataset with the same order as the original.
Note: n should be less or equal to the number of elements in the dataset `len(dataset)`.
On the other hand, `dataset.shard(n, i, contiguous=False)` contains all elements of the dataset whose index mod `n = i`.
Be sure to shard before using any randomizing operator (such as `shuffle`).
It is best if the shard operator is used early in the dataset pipeline.
Args:
num_shards (`int`):
How many shards to split the dataset into.
index (`int`):
Which shard to select and return.
contiguous: (`bool`, defaults to `True`):
Whether to select contiguous blocks of indices for shards.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
indices_cache_file_name (`str`, *optional*):
Provide the name of a path for the cache file. It is used to store the
indices of each shard instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
This only concerns the indices mapping.
Number of indices per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds
Dataset({
features: ['text', 'label'],
num_rows: 1066
})
>>> ds.shard(num_shards=2, index=0)
Dataset({
features: ['text', 'label'],
num_rows: 533
})
```
"""
if not 0 <= index < num_shards:
raise ValueError("index should be in [0, num_shards-1]")
if contiguous:
div = len(self) // num_shards
mod = len(self) % num_shards
start = div * index + min(index, mod)
end = start + div + (1 if index < mod else 0)
indices = range(start, end)
else:
indices = np.arange(index, len(self), num_shards)
return self.select(
indices=indices,
keep_in_memory=keep_in_memory,
indices_cache_file_name=indices_cache_file_name,
writer_batch_size=writer_batch_size,
)
def to_csv(
self,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
storage_options: Optional[dict] = None,
**to_csv_kwargs,
) -> int:
"""Exports the dataset to csv
Args:
path_or_buf (`PathLike` or `FileOrBuffer`):
Either a path to a file (e.g. `file.csv`), a remote URI (e.g. `hf://datasets/username/my_dataset_name/data.csv`),
or a BinaryIO, where the dataset will be saved to in the specified format.
batch_size (`int`, *optional*):
Size of the batch to load in memory and write at once.
Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
num_proc (`int`, *optional*):
Number of processes for multiprocessing. By default it doesn't
use multiprocessing. `batch_size` in this case defaults to
`datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default
value if you have sufficient compute power.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.19.0"/>
**to_csv_kwargs (additional keyword arguments):
Parameters to pass to pandas's [`pandas.DataFrame.to_csv`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_csv.html).
<Changed version="2.10.0">
Now, `index` defaults to `False` if not specified.
If you would like to write the index, pass `index=True` and also set a name for the index column by
passing `index_label`.
</Changed>
Returns:
`int`: The number of characters or bytes written.
Example:
```py
>>> ds.to_csv("path/to/dataset/directory")
```
"""
# Dynamic import to avoid circular dependency
from .io.csv import CsvDatasetWriter
return CsvDatasetWriter(
self,
path_or_buf,
batch_size=batch_size,
num_proc=num_proc,
storage_options=storage_options,
**to_csv_kwargs,
).write()
def to_dict(self, batch_size: Optional[int] = None) -> Union[dict, Iterator[dict]]:
"""Returns the dataset as a Python dict. Can also return a generator for large datasets.
Args:
batch_size (`int`, *optional*): The size (number of rows) of the batches if `batched` is `True`.
Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
Returns:
`dict` or `Iterator[dict]`
Example:
```py
>>> ds.to_dict()
```
"""
return query_table(
table=self._data,
key=slice(0, len(self)),
indices=self._indices,
).to_pydict()
def to_list(self) -> list:
"""Returns the dataset as a Python list.
Returns:
`list`
Example:
```py
>>> ds.to_list()
```
"""
return query_table(
table=self._data,
key=slice(0, len(self)),
indices=self._indices,
).to_pylist()
def to_json(
self,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
storage_options: Optional[dict] = None,
**to_json_kwargs,
) -> int:
"""Export the dataset to JSON Lines or JSON.
The default output format is [JSON Lines](https://jsonlines.org/).
To export to [JSON](https://www.json.org), pass `lines=False` argument and the desired `orient`.
Args:
path_or_buf (`PathLike` or `FileOrBuffer`):
Either a path to a file (e.g. `file.json`), a remote URI (e.g. `hf://datasets/username/my_dataset_name/data.json`),
or a BinaryIO, where the dataset will be saved to in the specified format.
batch_size (`int`, *optional*):
Size of the batch to load in memory and write at once.
Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
num_proc (`int`, *optional*):
Number of processes for multiprocessing. By default, it doesn't
use multiprocessing. `batch_size` in this case defaults to
`datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default
value if you have sufficient compute power.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.19.0"/>
**to_json_kwargs (additional keyword arguments):
Parameters to pass to pandas's [`pandas.DataFrame.to_json`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html).
Default arguments are `lines=True` and `orient="records".
<Changed version="2.11.0">
The parameter `index` defaults to `False` if `orient` is `"split"` or `"table"`.
If you would like to write the index, pass `index=True`.
</Changed>
Returns:
`int`: The number of characters or bytes written.
Example:
```py
>>> ds.to_json("path/to/dataset/directory/filename.jsonl")
```
"""
# Dynamic import to avoid circular dependency
from .io.json import JsonDatasetWriter
return JsonDatasetWriter(
self,
path_or_buf,
batch_size=batch_size,
num_proc=num_proc,
storage_options=storage_options,
**to_json_kwargs,
).write()
def to_pandas(
self, batch_size: Optional[int] = None, batched: bool = False
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Returns the dataset as a `pandas.DataFrame`. Can also return a generator for large datasets.
Args:
batched (`bool`):
Set to `True` to return a generator that yields the dataset as batches
of `batch_size` rows. Defaults to `False` (returns the whole datasets once).
batch_size (`int`, *optional*):
The size (number of rows) of the batches if `batched` is `True`.
Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
Returns:
`pandas.DataFrame` or `Iterator[pandas.DataFrame]`
Example:
```py
>>> ds.to_pandas()
```
"""
if not batched:
return query_table(
table=self._data,
key=slice(0, len(self)),
indices=self._indices,
).to_pandas(types_mapper=pandas_types_mapper)
else:
batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
return (
query_table(
table=self._data,
key=slice(offset, offset + batch_size),
indices=self._indices,
).to_pandas(types_mapper=pandas_types_mapper)
for offset in range(0, len(self), batch_size)
)
def to_polars(
self,
batch_size: Optional[int] = None,
batched: bool = False,
schema_overrides: Optional[dict] = None,
rechunk: bool = True,
) -> Union["pl.DataFrame", Iterator["pl.DataFrame"]]:
"""Returns the dataset as a `polars.DataFrame`. Can also return a generator for large datasets.
Args:
batched (`bool`):
Set to `True` to return a generator that yields the dataset as batches
of `batch_size` rows. Defaults to `False` (returns the whole datasets once).
batch_size (`int`, *optional*):
The size (number of rows) of the batches if `batched` is `True`.
Defaults to `genomicsml.datasets.config.DEFAULT_MAX_BATCH_SIZE`.
schema_overrides (`dict`, *optional*):
Support type specification or override of one or more columns; note that
any dtypes inferred from the schema param will be overridden.
rechunk (`bool`):
Make sure that all data is in contiguous memory. Defaults to `True`.
Returns:
`polars.DataFrame` or `Iterator[polars.DataFrame]`
Example:
```py
>>> ds.to_polars()
```
"""
if config.POLARS_AVAILABLE:
import polars as pl
if not batched:
return pl.from_arrow(
query_table(
table=self._data,
key=slice(0, len(self)),
indices=self._indices if self._indices is not None else None,
),
schema_overrides=schema_overrides,
rechunk=rechunk,
)
else:
batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
return (
pl.from_arrow(
query_table(
table=self._data,
key=slice(offset, offset + batch_size),
indices=self._indices if self._indices is not None else None,
),
schema_overrides=schema_overrides,
rechunk=rechunk,
)
for offset in range(0, len(self), batch_size)
)
else:
raise ValueError("Polars needs to be installed to be able to return Polars dataframes.")
def to_parquet(
self,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
storage_options: Optional[dict] = None,
**parquet_writer_kwargs,
) -> int:
"""Exports the dataset to parquet
Args:
path_or_buf (`PathLike` or `FileOrBuffer`):
Either a path to a file (e.g. `file.parquet`), a remote URI (e.g. `hf://datasets/username/my_dataset_name/data.parquet`),
or a BinaryIO, where the dataset will be saved to in the specified format.
batch_size (`int`, *optional*):
Size of the batch to load in memory and write at once.
Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.19.0"/>
**parquet_writer_kwargs (additional keyword arguments):
Parameters to pass to PyArrow's `pyarrow.parquet.ParquetWriter`.
Returns:
`int`: The number of characters or bytes written.
Example:
```py
>>> ds.to_parquet("path/to/dataset/directory")
```
"""
# Dynamic import to avoid circular dependency
from .io.parquet import ParquetDatasetWriter
return ParquetDatasetWriter(
self, path_or_buf, batch_size=batch_size, storage_options=storage_options, **parquet_writer_kwargs
).write()
def to_sql(
self,
name: str,
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
batch_size: Optional[int] = None,
**sql_writer_kwargs,
) -> int:
"""Exports the dataset to a SQL database.
Args:
name (`str`):
Name of SQL table.
con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`):
A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) or a SQLite3/SQLAlchemy connection object used to write to a database.
batch_size (`int`, *optional*):
Size of the batch to load in memory and write at once.
Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
**sql_writer_kwargs (additional keyword arguments):
Parameters to pass to pandas's [`pandas.DataFrame.to_sql`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_sql.html).
<Changed version="2.11.0">
Now, `index` defaults to `False` if not specified.
If you would like to write the index, pass `index=True` and also set a name for the index column by
passing `index_label`.
</Changed>
Returns:
`int`: The number of records written.
Example:
```py
>>> # con provided as a connection URI string
>>> ds.to_sql("data", "sqlite:///my_own_db.sql")
>>> # con provided as a sqlite3 connection object
>>> import sqlite3
>>> con = sqlite3.connect("my_own_db.sql")
>>> with con:
... ds.to_sql("data", con)
```
"""
# Dynamic import to avoid circular dependency
from .io.sql import SqlDatasetWriter
return SqlDatasetWriter(self, name, con, batch_size=batch_size, **sql_writer_kwargs).write()
def _estimate_nbytes(self) -> int:
dataset_nbytes = self.data.nbytes
# Find decodable columns, because if there are any, we need to
# adjust the dataset size computation (needed for sharding) to account for possible external files
decodable_columns = [
k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True)
]
if decodable_columns:
# Approximate the space needed to store the bytes from the external files by analyzing the first 1000 examples
extra_nbytes = 0
def extra_nbytes_visitor(array, feature):
nonlocal extra_nbytes
if isinstance(feature, (Audio, Image, Video)):
for x in array.to_pylist():
if x is not None and x["bytes"] is None and x["path"] is not None:
size = xgetsize(x["path"])
extra_nbytes += size
extra_nbytes -= array.field("path").nbytes
table = self.with_format("arrow")[:1000]
table_visitor(table, extra_nbytes_visitor)
extra_nbytes = extra_nbytes * len(self.data) / len(table)
dataset_nbytes = dataset_nbytes + extra_nbytes
if self._indices is not None:
dataset_nbytes = dataset_nbytes * len(self._indices) / len(self.data)
return dataset_nbytes
@staticmethod
def _generate_tables_from_shards(shards: List["Dataset"], batch_size: int):
for shard_idx, shard in enumerate(shards):
for pa_table in shard.with_format("arrow").iter(batch_size):
yield shard_idx, pa_table
@staticmethod
def _generate_tables_from_cache_file(filename: str):
for batch_idx, batch in enumerate(_memory_mapped_record_batch_reader_from_file(filename)):
yield batch_idx, pa.Table.from_batches([batch])
def to_iterable_dataset(self, num_shards: Optional[int] = 1) -> "IterableDataset":
"""Get an [`datasets.IterableDataset`] from a map-style [`datasets.Dataset`].
This is equivalent to loading a dataset in streaming mode with [`datasets.load_dataset`], but much faster since the data is streamed from local files.
Contrary to map-style datasets, iterable datasets are lazy and can only be iterated over (e.g. using a for loop).
Since they are read sequentially in training loops, iterable datasets are much faster than map-style datasets.
All the transformations applied to iterable datasets like filtering or processing are done on-the-fly when you start iterating over the dataset.
Still, it is possible to shuffle an iterable dataset using [`datasets.IterableDataset.shuffle`].
This is a fast approximate shuffling that works best if you have multiple shards and if you specify a buffer size that is big enough.
To get the best speed performance, make sure your dataset doesn't have an indices mapping.
If this is the case, the data are not read contiguously, which can be slow sometimes.
You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed before switching to an iterable dataset.
Args:
num_shards (`int`, default to `1`):
Number of shards to define when instantiating the iterable dataset. This is especially useful for big datasets to be able to shuffle properly,
and also to enable fast parallel loading using a PyTorch DataLoader or in distributed setups for example.
Shards are defined using [`datasets.Dataset.shard`]: it simply slices the data without writing anything on disk.
Returns:
[`datasets.IterableDataset`]
Example:
Basic usage:
```python
>>> ids = ds.to_iterable_dataset()
>>> for example in ids:
... pass
```
With lazy filtering and processing:
```python
>>> ids = ds.to_iterable_dataset()
>>> ids = ids.filter(filter_fn).map(process_fn) # will filter and process on-the-fly when you start iterating over the iterable dataset
>>> for example in ids:
... pass
```
With sharding to enable efficient shuffling:
```python
>>> ids = ds.to_iterable_dataset(num_shards=64) # the dataset is split into 64 shards to be iterated over
>>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer for fast approximate shuffling when you start iterating
>>> for example in ids:
... pass
```
With a PyTorch DataLoader:
```python
>>> import torch
>>> ids = ds.to_iterable_dataset(num_shards=64)
>>> ids = ids.filter(filter_fn).map(process_fn)
>>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards to each worker to load, filter and process when you start iterating
>>> for example in ids:
... pass
```
With a PyTorch DataLoader and shuffling:
```python
>>> import torch
>>> ids = ds.to_iterable_dataset(num_shards=64)
>>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer when you start iterating
>>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from the shuffled list of shards to each worker when you start iterating
>>> for example in ids:
... pass
```
In a distributed setup like PyTorch DDP with a PyTorch DataLoader and shuffling
```python
>>> from datasets.distributed import split_dataset_by_node
>>> ids = ds.to_iterable_dataset(num_shards=512)
>>> ids = ids.shuffle(buffer_size=10_000, seed=42) # will shuffle the shards order and use a shuffle buffer when you start iterating
>>> ids = split_dataset_by_node(ds, world_size=8, rank=0) # will keep only 512 / 8 = 64 shards from the shuffled lists of shards when you start iterating
>>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from this node's list of shards to each worker when you start iterating
>>> for example in ids:
... pass
```
With shuffling and multiple epochs:
```python
>>> ids = ds.to_iterable_dataset(num_shards=64)
>>> ids = ids.shuffle(buffer_size=10_000, seed=42) # will shuffle the shards order and use a shuffle buffer when you start iterating
>>> for epoch in range(n_epochs):
... ids.set_epoch(epoch) # will use effective_seed = seed + epoch to shuffle the shards and for the shuffle buffer when you start iterating
... for example in ids:
... pass
```
Feel free to also use [`IterableDataset.set_epoch`] when using a PyTorch DataLoader or in distributed setups.
"""
from .iterable_dataset import ArrowExamplesIterable, IterableDataset
if self._format_type is not None:
raise NotImplementedError(
"Converting a formatted dataset to a formatted iterable dataset is not implemented yet. Please run `my_dataset = my_dataset.with_format(None)` before calling to_iterable_dataset"
)
if num_shards > len(self):
raise ValueError(
f"Unable to shard a dataset of size {len(self)} into {num_shards} shards (the number of shards exceeds the number of samples)."
)
if self._indices is not None:
logger.info(
"Converting an Arrow dataset to iterable but it has an indices mapping that can make it slower. "
"You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed."
)
shards = (
[copy.deepcopy(self)]
if num_shards == 1
else [
self.shard(num_shards=num_shards, index=shard_idx, contiguous=True) for shard_idx in range(num_shards)
]
)
ex_iterable = ArrowExamplesIterable(
Dataset._generate_tables_from_shards,
kwargs={"shards": shards, "batch_size": config.DEFAULT_MAX_BATCH_SIZE},
)
return IterableDataset(ex_iterable, info=DatasetInfo(features=self.features))
def _push_parquet_shards_to_hub(
self,
repo_id: str,
data_dir: str = "data",
split: Optional[str] = None,
token: Optional[str] = None,
revision: Optional[str] = None,
create_pr: Optional[bool] = False,
max_shard_size: Optional[Union[int, str]] = None,
num_shards: Optional[int] = None,
embed_external_files: bool = True,
) -> Tuple[str, str, int, int, List[str], int]:
"""Pushes the dataset shards as Parquet files to the hub.
Returns:
additions (`List[CommitOperation]`): list of the `CommitOperationAdd` of the uploaded shards
uploaded_size (`int`): number of uploaded bytes to the repository
dataset_nbytes (`int`): approximate size in bytes of the uploaded dataset afer uncompression
"""
# Find decodable columns, because if there are any, we need to:
# embed the bytes from the files in the shards
decodable_columns = (
[k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True)]
if embed_external_files
else []
)
dataset_nbytes = self._estimate_nbytes()
if num_shards is None:
max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
num_shards = int(dataset_nbytes / max_shard_size) + 1
num_shards = max(num_shards, 1)
shards = (self.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards))
if decodable_columns:
from .io.parquet import get_writer_batch_size
def shards_with_embedded_external_files(shards: Iterator[Dataset]) -> Iterator[Dataset]:
for shard in shards:
format = shard.format
shard = shard.with_format("arrow")
shard = shard.map(
embed_table_storage,
batched=True,
batch_size=get_writer_batch_size(shard.features),
keep_in_memory=True,
)
shard = shard.with_format(**format)
yield shard
shards = shards_with_embedded_external_files(shards)
api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
uploaded_size = 0
additions = []
for index, shard in hf_tqdm(
enumerate(shards),
desc="Uploading the dataset shards",
total=num_shards,
):
shard_path_in_repo = f"{data_dir}/{split}-{index:05d}-of-{num_shards:05d}.parquet"
buffer = BytesIO()
shard.to_parquet(buffer)
uploaded_size += buffer.tell()
shard_addition = CommitOperationAdd(path_in_repo=shard_path_in_repo, path_or_fileobj=buffer)
api.preupload_lfs_files(
repo_id=repo_id,
additions=[shard_addition],
repo_type="dataset",
revision=revision,
create_pr=create_pr,
)
additions.append(shard_addition)
return additions, uploaded_size, dataset_nbytes
def push_to_hub(
self,
repo_id: str,
config_name: str = "default",
set_default: Optional[bool] = None,
split: Optional[str] = None,
data_dir: Optional[str] = None,
commit_message: Optional[str] = None,
commit_description: Optional[str] = None,
private: Optional[bool] = None,
token: Optional[str] = None,
revision: Optional[str] = None,
create_pr: Optional[bool] = False,
max_shard_size: Optional[Union[int, str]] = None,
num_shards: Optional[int] = None,
embed_external_files: bool = True,
) -> CommitInfo:
"""Pushes the dataset to the hub as a Parquet dataset.
The dataset is pushed using HTTP requests and does not need to have neither git or git-lfs installed.
The resulting Parquet files are self-contained by default. If your dataset contains [`Image`], [`Audio`] or [`Video`]
data, the Parquet files will store the bytes of your images or audio files.
You can disable this by setting `embed_external_files` to `False`.
Args:
repo_id (`str`):
The ID of the repository to push to in the following format: `<user>/<dataset_name>` or
`<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace
of the logged-in user.
config_name (`str`, defaults to "default"):
The configuration name (or subset) of a dataset. Defaults to "default".
set_default (`bool`, *optional*):
Whether to set this configuration as the default one. Otherwise, the default configuration is the one
named "default".
split (`str`, *optional*):
The name of the split that will be given to that dataset. Defaults to `self.split`.
data_dir (`str`, *optional*):
Directory name that will contain the uploaded data files. Defaults to the `config_name` if different
from "default", else "data".
<Added version="2.17.0"/>
commit_message (`str`, *optional*):
Message to commit while pushing. Will default to `"Upload dataset"`.
commit_description (`str`, *optional*):
Description of the commit that will be created.
Additionally, description of the PR if a PR is created (`create_pr` is True).
<Added version="2.16.0"/>
private (`bool`, *optional*):
Whether to make the repo private. If `None` (default), the repo will be public unless the
organization's default is private. This value is ignored if the repo already exists.
token (`str`, *optional*):
An optional authentication token for the Hugging Face Hub. If no token is passed, will default
to the token saved locally when logging in with `huggingface-cli login`. Will raise an error
if no token is passed and the user is not logged-in.
revision (`str`, *optional*):
Branch to push the uploaded files to. Defaults to the `"main"` branch.
<Added version="2.15.0"/>
create_pr (`bool`, *optional*, defaults to `False`):
Whether to create a PR with the uploaded files or directly commit.
<Added version="2.15.0"/>
max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by
a unit (like `"5MB"`).
num_shards (`int`, *optional*):
Number of shards to write. By default, the number of shards depends on `max_shard_size`.
<Added version="2.8.0"/>
embed_external_files (`bool`, defaults to `True`):
Whether to embed file bytes in the shards.
In particular, this will do the following before the push for the fields of type:
- [`Audio`] and [`Image`]: remove local path information and embed file content in the Parquet files.
Return:
huggingface_hub.CommitInfo
Example:
```python
>>> dataset.push_to_hub("<organization>/<dataset_id>")
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>", private=True)
>>> dataset.push_to_hub("<organization>/<dataset_id>", max_shard_size="1GB")
>>> dataset.push_to_hub("<organization>/<dataset_id>", num_shards=1024)
```
If your dataset has multiple splits (e.g. train/validation/test):
```python
>>> train_dataset.push_to_hub("<organization>/<dataset_id>", split="train")
>>> val_dataset.push_to_hub("<organization>/<dataset_id>", split="validation")
>>> # later
>>> dataset = load_dataset("<organization>/<dataset_id>")
>>> train_dataset = dataset["train"]
>>> val_dataset = dataset["validation"]
```
If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages):
```python
>>> english_dataset.push_to_hub("<organization>/<dataset_id>", "en")
>>> french_dataset.push_to_hub("<organization>/<dataset_id>", "fr")
>>> # later
>>> english_dataset = load_dataset("<organization>/<dataset_id>", "en")
>>> french_dataset = load_dataset("<organization>/<dataset_id>", "fr")
```
"""
if "Video(" in str(self.features):
raise NotImplementedError(
"push_to_hub is not implemented for video datasets, instead you should upload the video files "
"using e.g. the huggingface_hub library and optionally upload a metadata.csv or metadata.jsonl "
"file containing other information like video captions, features or labels. More information "
"at https://huggingface.co./docs/datasets/main/en/video_load#videofolder"
)
if config_name == "data":
raise ValueError("`config_name` cannot be 'data'. Please, choose another name for configuration.")
if max_shard_size is not None and num_shards is not None:
raise ValueError(
"Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both."
)
if split is None:
split = str(self.split) if self.split is not None else "train"
if not re.match(_split_re, split):
raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.")
api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
repo_url = api.create_repo(
repo_id,
token=token,
repo_type="dataset",
private=private,
exist_ok=True,
)
repo_id = repo_url.repo_id
if revision is not None and not revision.startswith("refs/pr/"):
# We do not call create_branch for a PR reference: 400 Bad Request
api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True)
if not data_dir:
data_dir = config_name if config_name != "default" else "data" # for backward compatibility
additions, uploaded_size, dataset_nbytes = self._push_parquet_shards_to_hub(
repo_id=repo_id,
data_dir=data_dir,
split=split,
token=token,
revision=revision,
max_shard_size=max_shard_size,
num_shards=num_shards,
create_pr=create_pr,
embed_external_files=embed_external_files,
)
# Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern)
# and delete old split shards (if they exist)
repo_with_dataset_card, repo_with_dataset_infos = False, False
deletions, deleted_size = [], 0
repo_splits = [] # use a list to keep the order of the splits
repo_files_to_add = [addition.path_in_repo for addition in additions]
for repo_file in api.list_repo_tree(
repo_id=repo_id, revision=revision, repo_type="dataset", token=token, recursive=True
):
if not isinstance(repo_file, RepoFile):
continue
if repo_file.rfilename == config.REPOCARD_FILENAME:
repo_with_dataset_card = True
elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME:
repo_with_dataset_infos = True
elif (
repo_file.rfilename.startswith(f"{data_dir}/{split}-") and repo_file.rfilename not in repo_files_to_add
):
deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename))
deleted_size += repo_file.size
elif fnmatch.fnmatch(
repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*")
):
repo_split = string_to_dict(
repo_file.rfilename,
glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED),
)["split"]
if repo_split not in repo_splits:
repo_splits.append(repo_split)
organization, dataset_name = repo_id.split("/") if "/" in repo_id else (None, repo_id)
info_to_dump = self.info.copy()
info_to_dump.download_checksums = None
info_to_dump.download_size = uploaded_size
info_to_dump.dataset_size = dataset_nbytes
info_to_dump.size_in_bytes = uploaded_size + dataset_nbytes
info_to_dump.config_name = config_name
info_to_dump.splits = SplitDict(
{split: SplitInfo(split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name)}
)
# get the info from the README to update them
if repo_with_dataset_card:
dataset_card_path = api.hf_hub_download(
repo_id, config.REPOCARD_FILENAME, repo_type="dataset", revision=revision
)
dataset_card = DatasetCard.load(Path(dataset_card_path))
dataset_card_data = dataset_card.data
metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
dataset_infos: DatasetInfosDict = DatasetInfosDict.from_dataset_card_data(dataset_card_data)
if dataset_infos and config_name in dataset_infos:
repo_info = dataset_infos[config_name]
else:
repo_info = None
# get the deprecated dataset_infos.json to update them
elif repo_with_dataset_infos:
dataset_card = None
dataset_card_data = DatasetCardData()
metadata_configs = MetadataConfigs()
dataset_infos_path = api.hf_hub_download(
repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision
)
with open(dataset_infos_path, encoding="utf-8") as f:
dataset_infos: dict = json.load(f)
dataset_info = dataset_infos.get(config_name, None) if dataset_infos else None
repo_info = DatasetInfo.from_dict(dataset_info) if dataset_info else None
else:
dataset_card = None
dataset_card_data = DatasetCardData()
metadata_configs = MetadataConfigs()
repo_info = None
# update the total info to dump from existing info
if repo_info is not None:
logger.info("Updating downloaded metadata with the new split.")
if repo_info.splits and list(repo_info.splits) != [split]:
if self._info.features != repo_info.features:
raise ValueError(
f"Features of the new split don't match the features of the existing splits on the hub: {self._info.features} != {repo_info.features}"
)
if split in repo_info.splits:
repo_info.download_size -= deleted_size
repo_info.dataset_size -= repo_info.splits.get(split, SplitInfo()).num_bytes or 0
repo_info.download_checksums = None
repo_info.download_size = (repo_info.download_size or 0) + uploaded_size
repo_info.dataset_size = (repo_info.dataset_size or 0) + dataset_nbytes
repo_info.size_in_bytes = repo_info.download_size + repo_info.dataset_size
repo_info.splits.pop(split, None)
repo_info.splits[split] = SplitInfo(
split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name
)
info_to_dump = repo_info
# create the metadata configs if it was uploaded with push_to_hub before metadata configs existed
if not metadata_configs and repo_splits:
default_metadata_configs_to_dump = {
"data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits]
}
MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data)
# update the metadata configs
if config_name in metadata_configs:
metadata_config = metadata_configs[config_name]
if "data_files" in metadata_config:
data_files_to_dump = sanitize_patterns(metadata_config["data_files"])
else:
data_files_to_dump = {}
# add the new split
data_files_to_dump[split] = [f"{data_dir}/{split}-*"]
metadata_config_to_dump = {
"data_files": [
{
"split": _split,
"path": _pattern[0] if len(_pattern) == 1 else _pattern,
}
for _split, _pattern in data_files_to_dump.items()
]
}
else:
metadata_config_to_dump = {"data_files": [{"split": split, "path": f"{data_dir}/{split}-*"}]}
if set_default and config_name != "default":
if metadata_configs:
default_config_name = metadata_configs.get_default_config_name()
if default_config_name == "default":
raise ValueError(
"There exists a configuration named 'default'. To set a different configuration as default, "
"rename the 'default' one first."
)
else:
_ = metadata_configs[default_config_name].pop("default")
metadata_config_to_dump["default"] = True
# push to the deprecated dataset_infos.json
if repo_with_dataset_infos:
dataset_infos_path = api.hf_hub_download(
repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision
)
with open(dataset_infos_path, encoding="utf-8") as f:
dataset_infos: dict = json.load(f)
dataset_infos[config_name] = asdict(info_to_dump)
buffer = BytesIO()
buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8"))
additions.append(
CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer)
)
# push to README
DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data)
MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data)
dataset_card = DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card
additions.append(
CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode())
)
commit_message = commit_message if commit_message is not None else "Upload dataset"
if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT:
commit_info = api.create_commit(
repo_id,
operations=additions + deletions,
commit_message=commit_message,
commit_description=commit_description,
token=token,
repo_type="dataset",
revision=revision,
create_pr=create_pr,
)
else:
logger.info(
f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits."
)
num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT)
for i in range(0, num_commits):
operations = additions[
i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT
] + (deletions if i == 0 else [])
commit_info = api.create_commit(
repo_id,
operations=operations,
commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})",
commit_description=commit_description,
token=token,
repo_type="dataset",
revision=revision,
create_pr=create_pr,
)
logger.info(
f"Commit #{i + 1} completed"
+ (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "")
+ "."
)
return commit_info
@transmit_format
@fingerprint_transform(inplace=False)
def add_column(
self, name: str, column: Union[list, np.array], new_fingerprint: str, feature: Optional[FeatureType] = None
):
"""Add column to Dataset.
<Added version="1.7"/>
Args:
name (`str`):
Column name.
column (`list` or `np.array`):
Column data to be added.
feature (`FeatureType` or `None`, defaults to `None`):
Column datatype.
Returns:
[`Dataset`]
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> more_text = ds["text"]
>>> ds.add_column(name="text_2", column=more_text)
Dataset({
features: ['text', 'label', 'text_2'],
num_rows: 1066
})
```
"""
if feature:
pyarrow_schema = Features({name: feature}).arrow_schema
else:
pyarrow_schema = None
column_table = InMemoryTable.from_pydict({name: column}, schema=pyarrow_schema)
_check_column_names(self._data.column_names + column_table.column_names)
dataset = self.flatten_indices() if self._indices is not None else self
# Concatenate tables horizontally
table = concat_tables([dataset._data, column_table], axis=1)
# Update features
info = dataset.info.copy()
info.features.update(Features.from_arrow_schema(column_table.schema))
table = update_metadata_with_features(table, info.features)
return Dataset(table, info=info, split=self.split, indices_table=None, fingerprint=new_fingerprint)
def add_faiss_index(
self,
column: str,
index_name: Optional[str] = None,
device: Optional[int] = None,
string_factory: Optional[str] = None,
metric_type: Optional[int] = None,
custom_index: Optional["faiss.Index"] = None, # noqa: F821
batch_size: int = 1000,
train_size: Optional[int] = None,
faiss_verbose: bool = False,
dtype=np.float32,
):
"""Add a dense index using Faiss for fast retrieval.
By default the index is done over the vectors of the specified column.
You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
You can find more information about Faiss here:
- For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory)
Args:
column (`str`):
The column of the vectors to add to the index.
index_name (`str`, *optional*):
The `index_name`/identifier of the index.
This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`].
By default it corresponds to `column`.
device (`Union[int, List[int]]`, *optional*):
If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
string_factory (`str`, *optional*):
This is passed to the index factory of Faiss to create the index.
Default index class is `IndexFlat`.
metric_type (`int`, *optional*):
Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
custom_index (`faiss.Index`, *optional*):
Custom Faiss index that you already have instantiated and configured for your needs.
batch_size (`int`):
Size of the batch to use while adding vectors to the `FaissIndex`. Default value is `1000`.
<Added version="2.4.0"/>
train_size (`int`, *optional*):
If the index needs a training step, specifies how many vectors will be used to train the index.
faiss_verbose (`bool`, defaults to `False`):
Enable the verbosity of the Faiss index.
dtype (`data-type`):
The dtype of the numpy arrays that are indexed.
Default is `np.float32`.
Example:
```python
>>> ds = datasets.load_dataset('crime_and_punish', split='train')
>>> ds_with_embeddings = ds.map(lambda example: {'embeddings': embed(example['line']}))
>>> ds_with_embeddings.add_faiss_index(column='embeddings')
>>> # query
>>> scores, retrieved_examples = ds_with_embeddings.get_nearest_examples('embeddings', embed('my new query'), k=10)
>>> # save index
>>> ds_with_embeddings.save_faiss_index('embeddings', 'my_index.faiss')
>>> ds = datasets.load_dataset('crime_and_punish', split='train')
>>> # load index
>>> ds.load_faiss_index('embeddings', 'my_index.faiss')
>>> # query
>>> scores, retrieved_examples = ds.get_nearest_examples('embeddings', embed('my new query'), k=10)
```
"""
with self.formatted_as(type="numpy", columns=[column], dtype=dtype):
super().add_faiss_index(
column=column,
index_name=index_name,
device=device,
string_factory=string_factory,
metric_type=metric_type,
custom_index=custom_index,
batch_size=batch_size,
train_size=train_size,
faiss_verbose=faiss_verbose,
)
return self
def add_faiss_index_from_external_arrays(
self,
external_arrays: np.array,
index_name: str,
device: Optional[int] = None,
string_factory: Optional[str] = None,
metric_type: Optional[int] = None,
custom_index: Optional["faiss.Index"] = None, # noqa: F821
batch_size: int = 1000,
train_size: Optional[int] = None,
faiss_verbose: bool = False,
dtype=np.float32,
):
"""Add a dense index using Faiss for fast retrieval.
The index is created using the vectors of `external_arrays`.
You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
You can find more information about Faiss here:
- For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory)
Args:
external_arrays (`np.array`):
If you want to use arrays from outside the lib for the index, you can set `external_arrays`.
It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`.
index_name (`str`):
The `index_name`/identifier of the index.
This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`].
device (Optional `Union[int, List[int]]`, *optional*):
If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
string_factory (`str`, *optional*):
This is passed to the index factory of Faiss to create the index.
Default index class is `IndexFlat`.
metric_type (`int`, *optional*):
Type of metric. Ex: `faiss.faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
custom_index (`faiss.Index`, *optional*):
Custom Faiss index that you already have instantiated and configured for your needs.
batch_size (`int`, *optional*):
Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000.
<Added version="2.4.0"/>
train_size (`int`, *optional*):
If the index needs a training step, specifies how many vectors will be used to train the index.
faiss_verbose (`bool`, defaults to False):
Enable the verbosity of the Faiss index.
dtype (`numpy.dtype`):
The dtype of the numpy arrays that are indexed. Default is np.float32.
"""
super().add_faiss_index_from_external_arrays(
external_arrays=external_arrays.astype(dtype),
index_name=index_name,
device=device,
string_factory=string_factory,
metric_type=metric_type,
custom_index=custom_index,
batch_size=batch_size,
train_size=train_size,
faiss_verbose=faiss_verbose,
)
def add_elasticsearch_index(
self,
column: str,
index_name: Optional[str] = None,
host: Optional[str] = None,
port: Optional[int] = None,
es_client: Optional["elasticsearch.Elasticsearch"] = None, # noqa: F821
es_index_name: Optional[str] = None,
es_index_config: Optional[dict] = None,
):
"""Add a text index using ElasticSearch for fast retrieval. This is done in-place.
Args:
column (`str`):
The column of the documents to add to the index.
index_name (`str`, *optional*):
The `index_name`/identifier of the index.
This is the index name that is used to call [`~Dataset.get_nearest_examples`] or [`~Dataset.search`].
By default it corresponds to `column`.
host (`str`, *optional*, defaults to `localhost`):
Host of where ElasticSearch is running.
port (`str`, *optional*, defaults to `9200`):
Port of where ElasticSearch is running.
es_client (`elasticsearch.Elasticsearch`, *optional*):
The elasticsearch client used to create the index if host and port are `None`.
es_index_name (`str`, *optional*):
The elasticsearch index name used to create the index.
es_index_config (`dict`, *optional*):
The configuration of the elasticsearch index.
Default config is:
```
{
"settings": {
"number_of_shards": 1,
"analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
},
"mappings": {
"properties": {
"text": {
"type": "text",
"analyzer": "standard",
"similarity": "BM25"
},
}
},
}
```
Example:
```python
>>> es_client = elasticsearch.Elasticsearch()
>>> ds = datasets.load_dataset('crime_and_punish', split='train')
>>> ds.add_elasticsearch_index(column='line', es_client=es_client, es_index_name="my_es_index")
>>> scores, retrieved_examples = ds.get_nearest_examples('line', 'my new query', k=10)
```
"""
with self.formatted_as(type=None, columns=[column]):
super().add_elasticsearch_index(
column=column,
index_name=index_name,
host=host,
port=port,
es_client=es_client,
es_index_name=es_index_name,
es_index_config=es_index_config,
)
return self
@transmit_format
@fingerprint_transform(inplace=False)
def add_item(self, item: dict, new_fingerprint: str):
"""Add item to Dataset.
<Added version="1.7"/>
Args:
item (`dict`):
Item data to be added.
Returns:
[`Dataset`]
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> new_review = {'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'}
>>> ds = ds.add_item(new_review)
>>> ds[-1]
{'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'}
```
"""
item_table = InMemoryTable.from_pydict({k: [v] for k, v in item.items()})
# We don't call _check_if_features_can_be_aligned here so this cast is "unsafe"
dset_features, item_features = _align_features(
[self._info.features, Features.from_arrow_schema(item_table.schema)]
)
# Cast to align the schemas of the tables and concatenate the tables
table = concat_tables(
[
self._data.cast(dset_features.arrow_schema) if self._info.features != dset_features else self._data,
item_table.cast(item_features.arrow_schema),
]
)
if self._indices is None:
indices_table = None
else:
item_indices_array = pa.array([len(self._data)], type=pa.uint64())
item_indices_table = InMemoryTable.from_arrays([item_indices_array], names=["indices"])
indices_table = concat_tables([self._indices, item_indices_table])
info = self.info.copy()
info.features.update(item_features)
table = update_metadata_with_features(table, info.features)
return Dataset(
table,
info=info,
split=self.split,
indices_table=indices_table,
fingerprint=new_fingerprint,
)
def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "Dataset":
"""Align the dataset's label ID and label name mapping to match an input `label2id` mapping.
This is useful when you want to ensure that a model's predicted labels are aligned with the dataset.
The alignment in done using the lowercase label names.
Args:
label2id (`dict`):
The label name to ID mapping to align the dataset with.
label_column (`str`):
The column name of labels to align on.
Example:
```python
>>> # dataset with mapping {'entailment': 0, 'neutral': 1, 'contradiction': 2}
>>> ds = load_dataset("glue", "mnli", split="train")
>>> # mapping to align with
>>> label2id = {'CONTRADICTION': 0, 'NEUTRAL': 1, 'ENTAILMENT': 2}
>>> ds_aligned = ds.align_labels_with_mapping(label2id, "label")
```
"""
# Sanity checks
if label_column not in self._data.column_names:
raise ValueError(f"Column ({label_column}) not in table columns ({self._data.column_names}).")
label_feature = self._info.features[label_column]
if not (
isinstance(label_feature, ClassLabel)
or (isinstance(label_feature, Sequence) and isinstance(label_feature.feature, ClassLabel))
):
raise ValueError(
f"Aligning labels with a mapping is only supported for {ClassLabel.__name__} column or {Sequence.__name__} column with the inner type {ClassLabel.__name__}, and column {label_feature} is of type {type(label_feature).__name__}."
)
# Sort input mapping by ID value to ensure the label names are aligned
label2id = dict(sorted(label2id.items(), key=lambda item: item[1]))
label_names = list(label2id.keys())
# Some label mappings use uppercase label names so we lowercase them during alignment
label2id = {k.lower(): v for k, v in label2id.items()}
int2str_function = (
label_feature.int2str if isinstance(label_feature, ClassLabel) else label_feature.feature.int2str
)
if isinstance(label_feature, ClassLabel):
def process_label_ids(batch):
dset_label_names = [
int2str_function(label_id).lower() if label_id is not None else None
for label_id in batch[label_column]
]
batch[label_column] = [
label2id[label_name] if label_name is not None else None for label_name in dset_label_names
]
return batch
else:
def process_label_ids(batch):
dset_label_names = [
[int2str_function(label_id).lower() if label_id is not None else None for label_id in seq]
for seq in batch[label_column]
]
batch[label_column] = [
[label2id[label_name] if label_name is not None else None for label_name in seq]
for seq in dset_label_names
]
return batch
features = self.features
features[label_column] = (
ClassLabel(num_classes=len(label_names), names=label_names)
if isinstance(label_feature, ClassLabel)
else Sequence(ClassLabel(num_classes=len(label_names), names=label_names))
)
return self.map(process_label_ids, features=features, batched=True, desc="Aligning the labels") | class_definition | 25,039 | 277,916 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py | null | 4 |
class NumExamplesMismatchError(Exception):
pass | class_definition | 146,059 | 146,118 | 1 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py | Dataset | 5 |
class Url(str):
pass | class_definition | 928 | 952 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py | null | 6 |
class EmptyDatasetError(FileNotFoundError):
pass | class_definition | 955 | 1,007 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py | null | 7 |
class DataFilesList(List[str]):
"""
List of data files (absolute local paths or URLs).
It has two construction methods given the user's data files patterns:
- ``from_hf_repo``: resolve patterns inside a dataset repository
- ``from_local_or_remote``: resolve patterns from a local path
Moreover, DataFilesList has an additional attribute ``origin_metadata``.
It can store:
- the last modified time of local files
- ETag of remote files
- commit sha of a dataset repository
Thanks to this additional attribute, it is possible to hash the list
and get a different hash if and only if at least one file changed.
This is useful for caching Dataset objects that are obtained from a list of data files.
"""
def __init__(self, data_files: List[str], origin_metadata: List[SingleOriginMetadata]) -> None:
super().__init__(data_files)
self.origin_metadata = origin_metadata
def __add__(self, other: "DataFilesList") -> "DataFilesList":
return DataFilesList([*self, *other], self.origin_metadata + other.origin_metadata)
@classmethod
def from_hf_repo(
cls,
patterns: List[str],
dataset_info: huggingface_hub.hf_api.DatasetInfo,
base_path: Optional[str] = None,
allowed_extensions: Optional[List[str]] = None,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesList":
base_path = f"hf://datasets/{dataset_info.id}@{dataset_info.sha}/{base_path or ''}".rstrip("/")
return cls.from_patterns(
patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config
)
@classmethod
def from_local_or_remote(
cls,
patterns: List[str],
base_path: Optional[str] = None,
allowed_extensions: Optional[List[str]] = None,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesList":
base_path = base_path if base_path is not None else Path().resolve().as_posix()
return cls.from_patterns(
patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config
)
@classmethod
def from_patterns(
cls,
patterns: List[str],
base_path: Optional[str] = None,
allowed_extensions: Optional[List[str]] = None,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesList":
base_path = base_path if base_path is not None else Path().resolve().as_posix()
data_files = []
for pattern in patterns:
try:
data_files.extend(
resolve_pattern(
pattern,
base_path=base_path,
allowed_extensions=allowed_extensions,
download_config=download_config,
)
)
except FileNotFoundError:
if not has_magic(pattern):
raise
origin_metadata = _get_origin_metadata(data_files, download_config=download_config)
return cls(data_files, origin_metadata)
def filter_extensions(self, extensions: List[str]) -> "DataFilesList":
pattern = "|".join("\\" + ext for ext in extensions)
pattern = re.compile(f".*({pattern})(\\..+)?$")
return DataFilesList(
[data_file for data_file in self if pattern.match(data_file)],
origin_metadata=self.origin_metadata,
) | class_definition | 22,243 | 25,785 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py | null | 8 |
class DataFilesDict(Dict[str, DataFilesList]):
"""
Dict of split_name -> list of data files (absolute local paths or URLs).
It has two construction methods given the user's data files patterns :
- ``from_hf_repo``: resolve patterns inside a dataset repository
- ``from_local_or_remote``: resolve patterns from a local path
Moreover, each list is a DataFilesList. It is possible to hash the dictionary
and get a different hash if and only if at least one file changed.
For more info, see [`DataFilesList`].
This is useful for caching Dataset objects that are obtained from a list of data files.
Changing the order of the keys of this dictionary also doesn't change its hash.
"""
@classmethod
def from_local_or_remote(
cls,
patterns: Dict[str, Union[List[str], DataFilesList]],
base_path: Optional[str] = None,
allowed_extensions: Optional[List[str]] = None,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesDict":
out = cls()
for key, patterns_for_key in patterns.items():
out[key] = (
patterns_for_key
if isinstance(patterns_for_key, DataFilesList)
else DataFilesList.from_local_or_remote(
patterns_for_key,
base_path=base_path,
allowed_extensions=allowed_extensions,
download_config=download_config,
)
)
return out
@classmethod
def from_hf_repo(
cls,
patterns: Dict[str, Union[List[str], DataFilesList]],
dataset_info: huggingface_hub.hf_api.DatasetInfo,
base_path: Optional[str] = None,
allowed_extensions: Optional[List[str]] = None,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesDict":
out = cls()
for key, patterns_for_key in patterns.items():
out[key] = (
patterns_for_key
if isinstance(patterns_for_key, DataFilesList)
else DataFilesList.from_hf_repo(
patterns_for_key,
dataset_info=dataset_info,
base_path=base_path,
allowed_extensions=allowed_extensions,
download_config=download_config,
)
)
return out
@classmethod
def from_patterns(
cls,
patterns: Dict[str, Union[List[str], DataFilesList]],
base_path: Optional[str] = None,
allowed_extensions: Optional[List[str]] = None,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesDict":
out = cls()
for key, patterns_for_key in patterns.items():
out[key] = (
patterns_for_key
if isinstance(patterns_for_key, DataFilesList)
else DataFilesList.from_patterns(
patterns_for_key,
base_path=base_path,
allowed_extensions=allowed_extensions,
download_config=download_config,
)
)
return out
def filter_extensions(self, extensions: List[str]) -> "DataFilesDict":
out = type(self)()
for key, data_files_list in self.items():
out[key] = data_files_list.filter_extensions(extensions)
return out | class_definition | 25,788 | 29,225 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py | null | 9 |
class DataFilesPatternsList(List[str]):
"""
List of data files patterns (absolute local paths or URLs).
For each pattern there should also be a list of allowed extensions
to keep, or a None ot keep all the files for the pattern.
"""
def __init__(
self,
patterns: List[str],
allowed_extensions: List[Optional[List[str]]],
):
super().__init__(patterns)
self.allowed_extensions = allowed_extensions
def __add__(self, other):
return DataFilesList([*self, *other], self.allowed_extensions + other.allowed_extensions)
@classmethod
def from_patterns(
cls, patterns: List[str], allowed_extensions: Optional[List[str]] = None
) -> "DataFilesPatternsList":
return cls(patterns, [allowed_extensions] * len(patterns))
def resolve(
self,
base_path: str,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesList":
base_path = base_path if base_path is not None else Path().resolve().as_posix()
data_files = []
for pattern, allowed_extensions in zip(self, self.allowed_extensions):
try:
data_files.extend(
resolve_pattern(
pattern,
base_path=base_path,
allowed_extensions=allowed_extensions,
download_config=download_config,
)
)
except FileNotFoundError:
if not has_magic(pattern):
raise
origin_metadata = _get_origin_metadata(data_files, download_config=download_config)
return DataFilesList(data_files, origin_metadata)
def filter_extensions(self, extensions: List[str]) -> "DataFilesPatternsList":
return DataFilesPatternsList(
self, [allowed_extensions + extensions for allowed_extensions in self.allowed_extensions]
) | class_definition | 29,228 | 31,193 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py | null | 10 |
class DataFilesPatternsDict(Dict[str, DataFilesPatternsList]):
"""
Dict of split_name -> list of data files patterns (absolute local paths or URLs).
"""
@classmethod
def from_patterns(
cls, patterns: Dict[str, List[str]], allowed_extensions: Optional[List[str]] = None
) -> "DataFilesPatternsDict":
out = cls()
for key, patterns_for_key in patterns.items():
out[key] = (
patterns_for_key
if isinstance(patterns_for_key, DataFilesPatternsList)
else DataFilesPatternsList.from_patterns(
patterns_for_key,
allowed_extensions=allowed_extensions,
)
)
return out
def resolve(
self,
base_path: str,
download_config: Optional[DownloadConfig] = None,
) -> "DataFilesDict":
out = DataFilesDict()
for key, data_files_patterns_list in self.items():
out[key] = data_files_patterns_list.resolve(base_path, download_config)
return out
def filter_extensions(self, extensions: List[str]) -> "DataFilesPatternsDict":
out = type(self)()
for key, data_files_patterns_list in self.items():
out[key] = data_files_patterns_list.filter_extensions(extensions)
return out | class_definition | 31,196 | 32,536 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py | null | 11 |
class _BaseExamplesIterable:
"""Base class for the examples iterable used by an IterableDataset"""
def __init__(self) -> None:
self._state_dict: Optional[Union[list, dict]] = None
def __iter__(self) -> Iterator[Tuple[Key, dict]]:
"""An examples iterable should yield tuples (example_key, example) of type (int/str, dict)"""
raise NotImplementedError(f"{type(self)} doesn't implement __iter__ yet")
@property
def iter_arrow(self) -> Optional[Callable[[], Iterator[Tuple[Key, pa.Table]]]]:
return None
@property
def is_typed(self) -> bool:
return False
@property
def features(self) -> Optional[Features]:
return None
def shuffle_data_sources(self, generator: np.random.Generator) -> "_BaseExamplesIterable":
"""
Either shuffle the shards/sources of the dataset, or propagate the shuffling to the underlying iterable.
If the order of the shards must stay fixed (when using .skip or .take for example), then this method returns self.
"""
raise NotImplementedError(f"{type(self)} doesn't implement shuffle_data_sources yet")
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "_BaseExamplesIterable":
"""Either keep only the requested shard, or propagate the request to the underlying iterable."""
raise NotImplementedError(f"{type(self)} doesn't implement shard_data_sources yet")
def split_shard_indices_by_worker(self, num_shards: int, index: int, contiguous=True) -> List[int]:
if contiguous:
div = self.num_shards // num_shards
mod = self.num_shards % num_shards
start = div * index + min(index, mod)
end = start + div + (1 if index < mod else 0)
return list(range(start, end))
else:
return list(range(index, self.num_shards, num_shards))
@property
def num_shards(self) -> int:
raise NotImplementedError(f"{type(self)} doesn't implement num_shards yet")
def _init_state_dict(self) -> dict:
raise NotImplementedError(f"{type(self)} doesn't implement _init_state_dict yet")
def load_state_dict(self, state_dict: dict) -> dict:
def _inner_load_state_dict(state, new_state):
if new_state is not None and isinstance(state, dict):
for key in new_state:
state[key] = _inner_load_state_dict(state[key], new_state[key])
return state
elif new_state is not None and isinstance(state, list):
for i in range(len(state)):
state[i] = _inner_load_state_dict(state[i], new_state[i])
return state
return new_state
return _inner_load_state_dict(self._state_dict, state_dict)
def state_dict(self) -> dict:
if self._state_dict:
return copy.deepcopy(self._state_dict)
raise RuntimeError("State dict is not initialized, please call ex_iterable._init_state_dict() first.") | class_definition | 4,646 | 7,694 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 12 |
class ExamplesIterable(_BaseExamplesIterable):
def __init__(self, generate_examples_fn: Callable[..., Tuple[Key, dict]], kwargs: dict):
super().__init__()
self.generate_examples_fn = generate_examples_fn
self.kwargs = kwargs
def _init_state_dict(self) -> dict:
self._state_dict = {"shard_idx": 0, "shard_example_idx": 0}
return self._state_dict
def __iter__(self):
shard_idx_start = self._state_dict["shard_idx"] if self._state_dict else 0
for gen_kwags in islice(_split_gen_kwargs(self.kwargs, max_num_jobs=self.num_shards), shard_idx_start, None):
shard_example_idx_start = self._state_dict["shard_example_idx"] if self._state_dict else 0
for key_example in islice(self.generate_examples_fn(**gen_kwags), shard_example_idx_start, None):
if self._state_dict:
self._state_dict["shard_example_idx"] += 1
yield key_example
if self._state_dict:
self._state_dict["shard_idx"] += 1
self._state_dict["shard_example_idx"] = 0
def shuffle_data_sources(self, generator: np.random.Generator) -> "ExamplesIterable":
return ShuffledDataSourcesExamplesIterable(self.generate_examples_fn, self.kwargs, generator)
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "ExamplesIterable":
"""Keep only the requested shard."""
gen_kwargs_list = _split_gen_kwargs(self.kwargs, max_num_jobs=self.num_shards)
shard_indices = self.split_shard_indices_by_worker(num_shards, index, contiguous=contiguous)
requested_gen_kwargs = _merge_gen_kwargs([gen_kwargs_list[i] for i in shard_indices])
return ExamplesIterable(self.generate_examples_fn, requested_gen_kwargs)
@property
def num_shards(self) -> int:
return _number_of_shards_in_gen_kwargs(self.kwargs) | class_definition | 7,697 | 9,617 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 13 |
class ShuffledDataSourcesExamplesIterable(ExamplesIterable):
def __init__(
self, generate_examples_fn: Callable[..., Tuple[Key, dict]], kwargs: dict, generator: np.random.Generator
):
super().__init__(generate_examples_fn, kwargs)
self.generator = deepcopy(generator)
def _init_state_dict(self) -> dict:
self._state_dict = {"shard_idx": 0, "shard_example_idx": 0}
return self._state_dict
def __iter__(self):
"""Shuffle the kwargs order to shuffle shards"""
rng = deepcopy(self.generator)
kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
shard_idx_start = self._state_dict["shard_idx"] if self._state_dict else 0
for gen_kwags in islice(
_split_gen_kwargs(kwargs_with_shuffled_shards, max_num_jobs=self.num_shards), shard_idx_start, None
):
shard_example_idx_start = self._state_dict["shard_example_idx"] if self._state_dict else 0
for key_example in islice(self.generate_examples_fn(**gen_kwags), shard_example_idx_start, None):
if self._state_dict:
self._state_dict["shard_example_idx"] += 1
yield key_example
if self._state_dict:
self._state_dict["shard_idx"] += 1
self._state_dict["shard_example_idx"] = 0
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "ExamplesIterable":
"""Keep only the requested shard."""
rng = deepcopy(self.generator)
kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
return ExamplesIterable(self.generate_examples_fn, kwargs_with_shuffled_shards).shard_data_sources(
num_shards, index, contiguous=contiguous
) | class_definition | 9,620 | 11,419 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 14 |
class ArrowExamplesIterable(_BaseExamplesIterable):
def __init__(self, generate_tables_fn: Callable[..., Tuple[Key, pa.Table]], kwargs: dict):
super().__init__()
self.generate_tables_fn = generate_tables_fn
self.kwargs = kwargs
@property
def iter_arrow(self):
return self._iter_arrow
def _init_state_dict(self) -> dict:
self._state_dict = {"shard_idx": 0, "shard_example_idx": 0}
return self._state_dict
def __iter__(self):
formatter = PythonFormatter()
shard_idx_start = self._state_dict["shard_idx"] if self._state_dict else 0
for gen_kwags in islice(_split_gen_kwargs(self.kwargs, max_num_jobs=self.num_shards), shard_idx_start, None):
shard_example_idx_start = self._state_dict["shard_example_idx"] if self._state_dict else 0
shard_example_idx = 0
for key, pa_table in self.generate_tables_fn(**gen_kwags):
if shard_example_idx + len(pa_table) <= shard_example_idx_start:
shard_example_idx += len(pa_table)
continue
for pa_subtable in pa_table.to_reader(max_chunksize=config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER):
formatted_batch = formatter.format_batch(pa_subtable)
for example in _batch_to_examples(formatted_batch):
if shard_example_idx >= shard_example_idx_start:
if self._state_dict:
self._state_dict["shard_example_idx"] += 1
yield key, example
shard_example_idx += 1
if self._state_dict:
self._state_dict["shard_idx"] += 1
self._state_dict["shard_example_idx"] = 0
def _iter_arrow(self):
shard_idx_start = self._state_dict["shard_idx"] if self._state_dict else 0
for gen_kwags in islice(_split_gen_kwargs(self.kwargs, max_num_jobs=self.num_shards), shard_idx_start, None):
shard_example_idx_start = self._state_dict["shard_example_idx"] if self._state_dict else 0
shard_example_idx = 0
for key, pa_table in self.generate_tables_fn(**gen_kwags):
shard_example_idx += len(pa_table)
if shard_example_idx <= shard_example_idx_start:
continue
if self._state_dict:
self._state_dict["shard_example_idx"] += len(pa_table)
yield key, pa_table
if self._state_dict:
self._state_dict["shard_idx"] += 1
self._state_dict["shard_example_idx"] = 0
def shuffle_data_sources(self, generator: np.random.Generator) -> "ArrowExamplesIterable":
return ShuffledDataSourcesArrowExamplesIterable(self.generate_tables_fn, self.kwargs, generator)
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "ArrowExamplesIterable":
"""Keep only the requested shard."""
gen_kwargs_list = _split_gen_kwargs(self.kwargs, max_num_jobs=self.num_shards)
shard_indices = self.split_shard_indices_by_worker(num_shards, index, contiguous=contiguous)
requested_gen_kwargs = _merge_gen_kwargs([gen_kwargs_list[i] for i in shard_indices])
return ArrowExamplesIterable(self.generate_tables_fn, requested_gen_kwargs)
@property
def num_shards(self) -> int:
return _number_of_shards_in_gen_kwargs(self.kwargs) | class_definition | 11,422 | 14,924 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 15 |
class ShuffledDataSourcesArrowExamplesIterable(ArrowExamplesIterable):
def __init__(
self,
generate_tables_fn: Callable[..., Tuple[Key, pa.Table]],
kwargs: dict,
generator: np.random.Generator,
):
super().__init__(generate_tables_fn, kwargs)
self.generator = deepcopy(generator)
def _init_state_dict(self) -> dict:
self._state_dict = {"shard_idx": 0, "shard_example_idx": 0}
return self._state_dict
def __iter__(self):
"""Shuffle the kwargs order to shuffle shards"""
rng = deepcopy(self.generator)
kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
formatter = PythonFormatter()
shard_idx_start = self._state_dict["shard_idx"] if self._state_dict else 0
for gen_kwags in islice(
_split_gen_kwargs(kwargs_with_shuffled_shards, max_num_jobs=self.num_shards), shard_idx_start, None
):
shard_example_idx_start = self._state_dict["shard_example_idx"] if self._state_dict else 0
shard_example_idx = 0
for key, pa_table in self.generate_tables_fn(**gen_kwags):
if shard_example_idx + len(pa_table) <= shard_example_idx_start:
shard_example_idx += len(pa_table)
continue
for pa_subtable in pa_table.to_reader(max_chunksize=config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER):
formatted_batch = formatter.format_batch(pa_subtable)
for example in _batch_to_examples(formatted_batch):
if shard_example_idx >= shard_example_idx_start:
if self._state_dict:
self._state_dict["shard_example_idx"] += 1
yield key, example
shard_example_idx += 1
if self._state_dict:
self._state_dict["shard_idx"] += 1
self._state_dict["shard_example_idx"] = 0
def _iter_arrow(self):
rng = deepcopy(self.generator)
kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
shard_idx_start = self._state_dict["shard_idx"] if self._state_dict else 0
for gen_kwags in islice(
_split_gen_kwargs(kwargs_with_shuffled_shards, max_num_jobs=self.num_shards), shard_idx_start, None
):
shard_example_idx_start = self._state_dict["shard_example_idx"] if self._state_dict else 0
shard_example_idx = 0
for key, pa_table in self.generate_tables_fn(**gen_kwags):
shard_example_idx += len(pa_table)
if shard_example_idx <= shard_example_idx_start:
continue
if self._state_dict:
self._state_dict["shard_example_idx"] += len(pa_table)
yield key, pa_table
if self._state_dict:
self._state_dict["shard_idx"] += 1
self._state_dict["shard_example_idx"] = 0
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "ArrowExamplesIterable":
"""Keep only the requested shard."""
rng = deepcopy(self.generator)
kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
return ArrowExamplesIterable(self.generate_tables_fn, kwargs_with_shuffled_shards).shard_data_sources(
num_shards, index, contiguous=contiguous
) | class_definition | 14,927 | 18,412 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 16 |
class RebatchedArrowExamplesIterable(_BaseExamplesIterable):
def __init__(self, ex_iterable: _BaseExamplesIterable, batch_size: Optional[int], drop_last_batch: bool = False):
super().__init__()
self.ex_iterable = ex_iterable
self.batch_size = batch_size
self.drop_last_batch = drop_last_batch
@property
def iter_arrow(self):
return self._iter_arrow
@property
def is_typed(self):
return self.ex_iterable.is_typed
@property
def features(self):
return self.ex_iterable.features
def _init_state_dict(self) -> dict:
self._state_dict = {
"ex_iterable": self.ex_iterable._init_state_dict(),
"previous_state": None,
"batch_idx": 0,
"num_chunks_since_previous_state": 0,
"cropped_chunk_length": 0,
}
return self._state_dict
def __iter__(self):
yield from self.ex_iterable
def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]:
"""Iterate over sub-tables of size `batch_size`."""
if self._state_dict and self._state_dict["previous_state"]:
self.ex_iterable.load_state_dict(self._state_dict["previous_state"])
if self.ex_iterable.iter_arrow:
iterator = self.ex_iterable.iter_arrow()
else:
iterator = _convert_to_arrow(self.ex_iterable, batch_size=1)
if self.batch_size is None or self.batch_size <= 0:
if self._state_dict and self._state_dict["batch_idx"] > 0:
return
all_pa_table = pa.concat_tables([pa_table for _, pa_table in iterator])
if self._state_dict:
self._state_dict["batch_idx"] = 1
yield "all", all_pa_table
return
keys_buffer = []
chunks_buffer = []
chunks_buffer_size = 0
num_chunks_to_skip = self._state_dict["num_chunks_since_previous_state"] if self._state_dict else 0
chunk_length_to_crop = self._state_dict["cropped_chunk_length"] if self._state_dict else 0
if self._state_dict:
previous_state = self.ex_iterable.state_dict()
self._state_dict["previous_state"] = previous_state
for key, pa_table in iterator:
for num_chunks_since_previous_state, chunk in enumerate(pa_table.to_reader(max_chunksize=self.batch_size)):
if num_chunks_to_skip > 1:
num_chunks_to_skip -= 1
continue
elif num_chunks_to_skip == 1 and chunk_length_to_crop == 0:
num_chunks_to_skip -= 1
continue
elif num_chunks_to_skip == 1 and chunk_length_to_crop > 0:
chunk = chunk.slice(chunk_length_to_crop, len(chunk) - chunk_length_to_crop)
num_chunks_to_skip = 0
chunk_length_to_crop = 0
if len(chunk) == 0:
continue
if chunks_buffer_size + len(chunk) < self.batch_size:
keys_buffer.append(key)
chunks_buffer.append(chunk)
chunks_buffer_size += len(chunk)
continue
elif chunks_buffer_size + len(chunk) == self.batch_size:
keys_buffer.append(key)
chunks_buffer.append(chunk)
new_key = "_".join(str(_key) for _key in keys_buffer)
if self._state_dict:
self._state_dict["batch_idx"] += 1
self._state_dict["num_chunks_since_previous_state"] += len(chunks_buffer)
self._state_dict["cropped_chunk_length"] = 0
yield new_key, pa.Table.from_batches(chunks_buffer)
keys_buffer = []
chunks_buffer = []
chunks_buffer_size = 0
if self._state_dict:
self._state_dict["previous_state"] = previous_state
self._state_dict["num_chunks_since_previous_state"] = num_chunks_since_previous_state + 1
else:
cropped_chunk_length = self.batch_size - chunks_buffer_size
keys_buffer.append(f"{key}[:{cropped_chunk_length}]")
chunks_buffer.append(chunk.slice(0, cropped_chunk_length))
new_key = "_".join(str(_key) for _key in keys_buffer)
if self._state_dict:
self._state_dict["batch_idx"] += 1
self._state_dict["num_chunks_since_previous_state"] += len(chunks_buffer)
self._state_dict["cropped_chunk_length"] = cropped_chunk_length
yield new_key, pa.Table.from_batches(chunks_buffer)
keys_buffer = [f"{key}[{cropped_chunk_length}:]"]
chunks_buffer = [chunk.slice(cropped_chunk_length, len(chunk) - cropped_chunk_length)]
chunks_buffer_size = len(chunk) - cropped_chunk_length
if self._state_dict:
self._state_dict["previous_state"] = previous_state
self._state_dict["num_chunks_since_previous_state"] = num_chunks_since_previous_state
if self._state_dict:
previous_state = self.ex_iterable.state_dict()
if not self.drop_last_batch and chunks_buffer:
new_key = "_".join(str(_key) for _key in keys_buffer)
if self._state_dict:
self._state_dict["previous_state"] = previous_state
self._state_dict["batch_idx"] += 1
self._state_dict["num_chunks_since_previous_state"] = 0
self._state_dict["cropped_chunk_length"] = 0
yield new_key, pa.Table.from_batches(chunks_buffer)
def shuffle_data_sources(self, generator: np.random.Generator) -> "RebatchedArrowExamplesIterable":
return RebatchedArrowExamplesIterable(
self.ex_iterable.shuffle_data_sources(generator), self.batch_size, self.drop_last_batch
)
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "RebatchedArrowExamplesIterable":
return RebatchedArrowExamplesIterable(
self.ex_iterable.shard_data_sources(num_shards, index, contiguous=contiguous),
self.batch_size,
self.drop_last_batch,
)
@property
def num_shards(self) -> int:
return self.ex_iterable.num_shards | class_definition | 18,415 | 24,972 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 17 |
class SelectColumnsIterable(_BaseExamplesIterable):
def __init__(self, ex_iterable: _BaseExamplesIterable, column_names: List[str]):
super().__init__()
self.ex_iterable = ex_iterable
self.column_names = column_names
@property
def iter_arrow(self):
if self.ex_iterable.iter_arrow:
return self._iter_arrow
@property
def is_typed(self):
return self.ex_iterable.is_typed
@property
def features(self):
return self.ex_iterable.features
def _init_state_dict(self) -> dict:
self._state_dict = self.ex_iterable._init_state_dict()
return self._state_dict
def __iter__(self):
for idx, row in self.ex_iterable:
yield idx, {c: row[c] for c in self.column_names}
def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]:
for idx, pa_table in self.ex_iterable.iter_arrow():
if len(pa_table) > 0: # empty tables have no schema
yield idx, pa_table.select(self.column_names)
def shuffle_data_sources(self, generator: np.random.Generator) -> "SelectColumnsIterable":
return SelectColumnsIterable(self.ex_iterable.shuffle_data_sources(generator), self.column_names)
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "SelectColumnsIterable":
return SelectColumnsIterable(
self.ex_iterable.shard_data_sources(num_shards, index, contiguous=contiguous), self.column_names
)
@property
def num_shards(self) -> int:
return self.ex_iterable.num_shards | class_definition | 24,975 | 26,567 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 18 |
class StepExamplesIterable(_BaseExamplesIterable):
def __init__(self, ex_iterable: _BaseExamplesIterable, step: int, offset: int):
super().__init__()
self.ex_iterable = ex_iterable
self.step = step
self.offset = offset
# TODO(QL): implement iter_arrow
@property
def is_typed(self):
return self.ex_iterable.is_typed
@property
def features(self):
return self.ex_iterable.features
def _init_state_dict(self) -> dict:
self._state_dict = self.ex_iterable._init_state_dict()
return self._state_dict
def __iter__(self):
ex_iterator = iter(self.ex_iterable)
while True:
batch = list(islice(ex_iterator, self.step))
if len(batch) > self.offset:
yield batch[self.offset]
else:
break
def shuffle_data_sources(self, generator: np.random.Generator) -> "StepExamplesIterable":
return StepExamplesIterable(
self.ex_iterable.shuffle_data_sources(generator), step=self.step, offset=self.offset
)
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "StepExamplesIterable":
return StepExamplesIterable(
self.ex_iterable.shard_data_sources(num_shards, index, contiguous=contiguous),
step=self.step,
offset=self.offset,
)
@property
def num_shards(self) -> int:
return self.ex_iterable.num_shards | class_definition | 26,570 | 28,065 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 19 |
class CyclingMultiSourcesExamplesIterable(_BaseExamplesIterable):
def __init__(
self,
ex_iterables: List[_BaseExamplesIterable],
stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
):
super().__init__()
self.ex_iterables = ex_iterables
self.stopping_strategy = stopping_strategy
# if undersampling ("first_exhausted"), we stop as soon as one dataset is exhausted
# if oversampling ("all_exhausted"), we stop as soons as every dataset is exhausted, i.e as soon as every samples of every dataset has been visited at least once
self.bool_strategy_func = np.all if (stopping_strategy == "all_exhausted") else np.any
# TODO(QL): implement iter_arrow
@property
def is_typed(self):
return self.ex_iterables[0].is_typed
@property
def features(self):
return self.ex_iterables[0].features
def _get_indices_iterator(self):
# this is an infinite iterator to keep track of which iterator we want to pick examples from
ex_iterable_idx = self._state_dict["ex_iterable_idx"] if self._state_dict else 0
for next_ex_iterable_idx in islice(cycle(range(len(self.ex_iterables))), ex_iterable_idx + 1, None):
if self._state_dict:
self._state_dict["ex_iterable_idx"] = next_ex_iterable_idx
yield ex_iterable_idx
ex_iterable_idx = next_ex_iterable_idx
def _init_state_dict(self) -> dict:
self._state_dict = {
"ex_iterable_idx": 0,
"ex_iterables": [ex_iterable._init_state_dict() for ex_iterable in self.ex_iterables],
"previous_states": [None] * len(self.ex_iterables),
"is_exhausted": [False] * len(self.ex_iterables),
}
return self._state_dict
def __iter__(self):
# we use this to buffer one example of each iterator to know if an iterator is exhausted
nexts = [None] * len(self.ex_iterables)
# because of that, we need to rewind 1 example when reloading the state dict
if self._state_dict:
for i in range(len(self.ex_iterables)):
if self._state_dict["previous_states"][i] is not None:
self.ex_iterables[i].load_state_dict(self._state_dict["previous_states"][i])
iterators = [iter(ex_iterable) for ex_iterable in self.ex_iterables]
indices_iterator = self._get_indices_iterator()
is_exhausted = (
np.array(self._state_dict["is_exhausted"]) if self._state_dict else np.full(len(self.ex_iterables), False)
)
for i in indices_iterator:
# if the stopping criteria is met, break the main for loop
if self.bool_strategy_func(is_exhausted):
break
# let's pick one example from the iterator at index i
if nexts[i] is None:
nexts[i] = next(iterators[i], False)
result = nexts[i]
if self._state_dict:
self._state_dict["previous_states"][i] = deepcopy(self._state_dict["ex_iterables"][i])
nexts[i] = next(iterators[i], False)
# the iterator is exhausted
if nexts[i] is False:
is_exhausted[i] = True
if self._state_dict:
self._state_dict["is_exhausted"][i] = True
# we reset it in case the stopping crtieria isn't met yet
nexts[i] = None
if self._state_dict:
self._state_dict["ex_iterables"][i] = self.ex_iterables[i]._init_state_dict()
self._state_dict["previous_states"][i] = None
iterators[i] = iter(self.ex_iterables[i])
if result is not False:
yield result
def shuffle_data_sources(self, generator: np.random.Generator) -> "CyclingMultiSourcesExamplesIterable":
"""Shuffle each underlying examples iterable."""
ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in self.ex_iterables]
return CyclingMultiSourcesExamplesIterable(ex_iterables, self.stopping_strategy)
@property
def num_shards(self) -> int:
return min(ex_iterable.num_shards for ex_iterable in self.ex_iterables)
def shard_data_sources(
self, num_shards: int, index: int, contiguous=True
) -> "CyclingMultiSourcesExamplesIterable":
"""Either keep only the requested shard, or propagate the request to the underlying iterable."""
return CyclingMultiSourcesExamplesIterable(
[iterable.shard_data_sources(num_shards, index, contiguous=contiguous) for iterable in self.ex_iterables],
stopping_strategy=self.stopping_strategy,
) | class_definition | 28,068 | 32,855 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 20 |
class VerticallyConcatenatedMultiSourcesExamplesIterable(_BaseExamplesIterable):
"""
VerticallyConcatenatedMultiSourcesExamplesIterable simply chains the input iterables.
It doesn't require the examples iterables to always yield the same columns.
Instead, this is handled by the `IterableDataset` class or `FormattedExamplesIterable`.
For information, `IterableDataset` merges the features of all the datasets to concatenate into one.
We use `IterableDataset._resolve_features` to obtain the features of all the datasets to concatenate.
Then for each example, `IterableDataset` and `FormattedExamplesIterable` automatically fill missing columns with None.
This is done with `_apply_feature_types_on_example`.
"""
def __init__(self, ex_iterables: List[_BaseExamplesIterable]):
super().__init__()
self.ex_iterables = ex_iterables
@property
def is_typed(self):
return self.ex_iterables[0].is_typed
@property
def features(self):
return self.ex_iterables[0].features
@property
def iter_arrow(self):
if all(ex_iterable.iter_arrow is not None for ex_iterable in self.ex_iterables):
return self._iter_arrow
def _init_state_dict(self) -> dict:
self._state_dict = {
"ex_iterable_idx": 0,
"ex_iterables": [ex_iterable._init_state_dict() for ex_iterable in self.ex_iterables],
}
return self._state_dict
def __iter__(self):
ex_iterable_idx_start = self._state_dict["ex_iterable_idx"] if self._state_dict else 0
for ex_iterable in islice(self.ex_iterables, ex_iterable_idx_start, None):
yield from ex_iterable
if self._state_dict:
self._state_dict["ex_iterable_idx"] += 1
def _iter_arrow(self):
ex_iterable_idx_start = self._state_dict["ex_iterable_idx"] if self._state_dict else 0
for ex_iterable in islice(self.ex_iterables, ex_iterable_idx_start, None):
yield from ex_iterable.iter_arrow()
if self._state_dict:
self._state_dict["ex_iterable_idx"] += 1
def shuffle_data_sources(
self, generator: np.random.Generator
) -> "VerticallyConcatenatedMultiSourcesExamplesIterable":
"""Shuffle the list of examples iterable, as well as each underlying examples iterable."""
rng = deepcopy(generator)
ex_iterables = list(self.ex_iterables)
rng.shuffle(ex_iterables)
ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in ex_iterables]
return VerticallyConcatenatedMultiSourcesExamplesIterable(ex_iterables)
@property
def num_shards(self) -> int:
return min(ex_iterable.num_shards for ex_iterable in self.ex_iterables)
def shard_data_sources(
self, num_shards: int, index: int, contiguous=True
) -> "VerticallyConcatenatedMultiSourcesExamplesIterable":
"""Either keep only the requested shard, or propagate the request to the underlying iterable."""
return VerticallyConcatenatedMultiSourcesExamplesIterable(
[iterable.shard_data_sources(num_shards, index, contiguous=contiguous) for iterable in self.ex_iterables]
) | class_definition | 32,858 | 36,108 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 21 |
class HorizontallyConcatenatedMultiSourcesExamplesIterable(_BaseExamplesIterable):
"""
HorizontallyConcatenatedMultiSourcesExamplesIterable merges examples together for the input list of iterables.
It also checks that there are no duplicate columns (otherwise we don't know which one to keep).
This check is done once when yielding the first example.
However it doesn't fill missing columns with None.
Instead, this is handled by the `IterableDataset` class or `FormattedExamplesIterable`.
For information, `IterableDataset` merges the features of all the datasets to concatenate into one.
We use `IterableDataset._resolve_features` to obtain the features of all the datasets to concatenate.
Then for each example, `IterableDataset` and `FormattedExamplesIterable` automatically fill missing columns with None.
This is done with `_apply_feature_types_on_example`.
"""
def __init__(self, ex_iterables: List[_BaseExamplesIterable]):
super().__init__()
self.ex_iterables = ex_iterables
# TODO(QL): implement iter_arrow
@property
def is_typed(self):
return self.ex_iterables[0].is_typed
@property
def features(self):
return self.ex_iterables[0].features
def _init_state_dict(self) -> dict:
self._state_dict = {"ex_iterables": [ex_iterable._init_state_dict() for ex_iterable in self.ex_iterables]}
return self._state_dict
def __iter__(self):
ex_iterators = [iter(ex_iterable) for ex_iterable in self.ex_iterables]
for i in itertools.count():
keys = []
examples = []
for ex_iterator in list(ex_iterators):
try:
key, example = next(ex_iterator)
keys.append(key)
examples.append(example)
except StopIteration:
ex_iterators.remove(ex_iterator)
if ex_iterators:
if i == 0:
_check_column_names([column_name for example in examples for column_name in example])
new_example = {}
for example in examples:
new_example.update(example)
new_key = "_".join(str(key) for key in keys)
yield new_key, new_example
else:
break
def shuffle_data_sources(
self, generator: np.random.Generator
) -> "HorizontallyConcatenatedMultiSourcesExamplesIterable":
"""Doesn't shuffle the wrapped examples iterable since it would break the alignment between them."""
return self
@property
def num_shards(self) -> int:
return 1
def shard_data_sources(
self, num_shards: int, index: int, contiguous=True
) -> "HorizontallyConcatenatedMultiSourcesExamplesIterable":
"""Either keep only the requested shard, or propagate the request to the underlying iterable."""
return HorizontallyConcatenatedMultiSourcesExamplesIterable(
[iterable.shard_data_sources(num_shards, index, contiguous=contiguous) for iterable in self.ex_iterables]
) | class_definition | 36,561 | 39,713 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 22 |
class RandomlyCyclingMultiSourcesExamplesIterable(CyclingMultiSourcesExamplesIterable):
def __init__(
self,
ex_iterables: List[_BaseExamplesIterable],
generator: np.random.Generator,
probabilities: Optional[List[float]] = None,
stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
):
super().__init__(ex_iterables, stopping_strategy)
self.generator = deepcopy(generator)
self.probabilities = probabilities
# TODO(QL): implement iter_arrow
@property
def is_typed(self):
return self.ex_iterables[0].is_typed
@property
def features(self):
return self.ex_iterables[0].features
def _get_indices_iterator(self):
rng = deepcopy(self.generator)
num_sources = len(self.ex_iterables)
random_batch_size = 1000
# this is an infinite iterator that randomly samples the index of the source to pick examples from
index_offset = self._state_dict["bit_generator_index_offset"] if self._state_dict else 0
if self._state_dict:
rng.bit_generator.state = self._state_dict["bit_generator_state"]
if self.probabilities is None:
while True:
for i in islice(rng.integers(0, num_sources, size=random_batch_size), index_offset, None):
index_offset = (index_offset + 1) % random_batch_size
if self._state_dict:
self._state_dict["bit_generator_index_offset"] = index_offset
if index_offset == 0:
self._state_dict["bit_generator_state"] = rng.bit_generator.state
yield int(i)
else:
while True:
for i in islice(
rng.choice(num_sources, size=random_batch_size, p=self.probabilities), index_offset, None
):
index_offset = (index_offset + 1) % random_batch_size
if self._state_dict:
self._state_dict["bit_generator_index_offset"] = index_offset
if index_offset == 0:
self._state_dict["bit_generator_state"] = rng.bit_generator.state
yield int(i)
def _init_state_dict(self) -> dict:
self._state_dict = {
"bit_generator_state": self.generator.bit_generator.state,
"bit_generator_index_offset": 0,
"ex_iterables": [ex_iterable._init_state_dict() for ex_iterable in self.ex_iterables],
"previous_states": [None] * len(self.ex_iterables),
"is_exhausted": [False] * len(self.ex_iterables),
}
return self._state_dict
def shuffle_data_sources(self, generator: np.random.Generator) -> "RandomlyCyclingMultiSourcesExamplesIterable":
"""Shuffle the data sources of each wrapped examples iterable."""
ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in self.ex_iterables]
return RandomlyCyclingMultiSourcesExamplesIterable(
ex_iterables,
generator=generator,
probabilities=self.probabilities,
stopping_strategy=self.stopping_strategy,
)
def shard_data_sources(
self, num_shards: int, index: int, contiguous=True
) -> "RandomlyCyclingMultiSourcesExamplesIterable":
"""Either keep only the requested shard, or propagate the request to the underlying iterable."""
return RandomlyCyclingMultiSourcesExamplesIterable(
[iterable.shard_data_sources(num_shards, index, contiguous=contiguous) for iterable in self.ex_iterables],
self.generator,
self.probabilities,
self.stopping_strategy,
) | class_definition | 39,716 | 43,531 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 23 |
class MappedExamplesIterable(_BaseExamplesIterable):
def __init__(
self,
ex_iterable: _BaseExamplesIterable,
function: Callable,
with_indices: bool = False,
input_columns: Optional[List[str]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
drop_last_batch: bool = False,
remove_columns: Optional[List[str]] = None,
fn_kwargs: Optional[dict] = None,
formatting: Optional["FormattingConfig"] = None,
features: Optional[Features] = None,
):
super().__init__()
self.ex_iterable = ex_iterable
self.function = function
self.batched = batched
self.batch_size = batch_size
self.drop_last_batch = drop_last_batch
self.remove_columns = remove_columns
self.with_indices = with_indices
self.input_columns = input_columns
self.fn_kwargs = fn_kwargs or {}
self.formatting = formatting # required for iter_arrow
self._features = features
# sanity checks
if formatting and formatting.format_type == "arrow":
# batch_size should match for iter_arrow
if not isinstance(ex_iterable, RebatchedArrowExamplesIterable):
raise ValueError(
"The Arrow-formatted MappedExamplesIterable has underlying iterable"
f"that is a {type(ex_iterable).__name__} instead of a RebatchedArrowExamplesIterable."
)
elif ex_iterable.batch_size != (batch_size if batched else 1):
raise ValueError(
f"The Arrow-formatted MappedExamplesIterable has batch_size={batch_size if batched else 1} which is"
f"different from {ex_iterable.batch_size=} from its underlying iterable."
)
@property
def iter_arrow(self):
if self.formatting and self.formatting.format_type == "arrow":
return self._iter_arrow
@property
def is_typed(self):
return self.features is not None # user has extracted features
@property
def features(self):
return self._features
def _init_state_dict(self) -> dict:
self._state_dict = {
"ex_iterable": self.ex_iterable._init_state_dict(),
"previous_state": None,
"num_examples_since_previous_state": 0,
"previous_state_example_idx": 0,
}
return self._state_dict
def __iter__(self):
if self.formatting and self.formatting.format_type == "arrow":
formatter = PythonFormatter()
for key, pa_table in self._iter_arrow(max_chunksize=1):
yield key, formatter.format_row(pa_table)
else:
yield from self._iter()
def _iter(self):
current_idx = self._state_dict["previous_state_example_idx"] if self._state_dict else 0
if self._state_dict and self._state_dict["previous_state"]:
self.ex_iterable.load_state_dict(self._state_dict["previous_state"])
num_examples_to_skip = self._state_dict["num_examples_since_previous_state"]
else:
num_examples_to_skip = 0
iterator = iter(self.ex_iterable)
if self.formatting:
formatter = get_formatter(self.formatting.format_type)
format_dict = (
formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
)
else:
format_dict = None
if self.batched:
if self._state_dict:
self._state_dict["previous_state"] = self.ex_iterable.state_dict()
self._state_dict["num_examples_since_previous_state"] = 0
self._state_dict["previous_state_example_idx"] = current_idx
for key, example in iterator:
# If `batched`, first build the batch, if `batch_size` is None or <=0, then the batch is the whole dataset
iterator_batch = (
iterator
if self.batch_size is None or self.batch_size <= 0
else islice(iterator, self.batch_size - 1)
)
key_examples_list = [(key, example)] + list(iterator_batch)
keys, examples = zip(*key_examples_list)
if (
self.drop_last_batch
and self.batch_size is not None
and self.batch_size > 0
and len(examples) < self.batch_size
): # ignore last batch
return
batch = _examples_to_batch(examples)
batch = format_dict(batch) if format_dict else batch
# then apply the transform
inputs = batch
function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
if self.with_indices:
function_args.append([current_idx + i for i in range(len(key_examples_list))])
inputs_to_merge = dict(batch)
processed_inputs = self.function(*function_args, **self.fn_kwargs)
# this logic mimics the one in Dataset.map
if self.remove_columns:
for c in self.remove_columns:
if c in inputs_to_merge:
del inputs_to_merge[c]
if processed_inputs is inputs and c in processed_inputs:
del processed_inputs[c]
transformed_batch = {**inputs_to_merge, **processed_inputs}
if transformed_batch:
first_col = next(iter(transformed_batch))
bad_cols = [
col
for col in transformed_batch
if len(transformed_batch[col]) != len(transformed_batch[first_col])
]
if bad_cols:
raise ValueError(
f"Column lengths mismatch: columns {bad_cols} have length {[len(transformed_batch[col]) for col in bad_cols]} "
f"while {first_col} has length {len(transformed_batch[first_col])}."
)
if self.features:
for c in self.features.keys():
if c not in transformed_batch:
transformed_batch[c] = [None] * len(transformed_batch[first_col])
transformed_batch = self.features.decode_batch(transformed_batch)
# the new key is the concatenation of the examples keys from the batch
new_key = "_".join(str(key) for key in keys)
# yield one example at a time from the transformed batch
for example in _batch_to_examples(transformed_batch):
current_idx += 1
if self._state_dict:
self._state_dict["num_examples_since_previous_state"] += 1
if num_examples_to_skip > 0:
num_examples_to_skip -= 1
continue
yield new_key, example
if self._state_dict:
self._state_dict["previous_state"] = self.ex_iterable.state_dict()
self._state_dict["num_examples_since_previous_state"] = 0
self._state_dict["previous_state_example_idx"] = current_idx
else:
for key, example in iterator:
# If not batched, we can apply the transform and yield the example directly
# first copy the example, since we might drop some keys
example = dict(example)
example = format_dict(example) if format_dict else example
# then apply the transform
inputs = example
function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
if self.with_indices:
function_args.append(current_idx)
processed_inputs = self.function(*function_args, **self.fn_kwargs)
inputs_to_merge = dict(example)
# this logic mimics the one in Dataset.map
if self.remove_columns:
for c in self.remove_columns:
if c in inputs_to_merge:
del inputs_to_merge[c]
if processed_inputs is inputs and c in processed_inputs:
del processed_inputs[c]
transformed_example = {**inputs_to_merge, **processed_inputs}
if self.features:
for c in self.features.keys():
if c not in transformed_example:
transformed_example[c] = None
transformed_example = self.features.decode_example(transformed_example)
current_idx += 1
if self._state_dict:
self._state_dict["previous_state_example_idx"] += 1
yield key, transformed_example
def _iter_arrow(self, max_chunksize: Optional[int] = None) -> Iterator[Tuple[Key, pa.Table]]:
if self.ex_iterable.iter_arrow:
iterator = self.ex_iterable.iter_arrow()
else:
iterator = _convert_to_arrow(
self.ex_iterable,
batch_size=self.batch_size if self.batched else 1,
drop_last_batch=self.drop_last_batch,
)
if self._state_dict and self._state_dict["previous_state"]:
self.ex_iterable.load_state_dict(self._state_dict["previous_state"])
num_examples_to_skip = self._state_dict["num_examples_since_previous_state"]
else:
num_examples_to_skip = 0
if self._state_dict and max_chunksize is not None:
self._state_dict["previous_state"] = self.ex_iterable.state_dict()
self._state_dict["num_examples_since_previous_state"] = 0
current_idx = self._state_dict["previous_state_example_idx"] if self._state_dict else 0
for key, pa_table in iterator:
if (
self.batched
and self.batch_size is not None
and len(pa_table) < self.batch_size
and self.drop_last_batch
):
return
# first build the batch
function_args = [pa_table] if self.input_columns is None else [pa_table[col] for col in self.input_columns]
if self.with_indices:
if self.batched:
function_args.append([current_idx + i for i in range(len(pa_table))])
else:
function_args.append(current_idx)
# then apply the transform
output_table = self.function(*function_args, **self.fn_kwargs)
if not isinstance(output_table, pa.Table):
raise TypeError(
f"Provided `function` which is applied to pyarrow tables returns a variable of type "
f"{type(output_table)}. Make sure provided `function` returns a a pyarrow table to update the dataset."
)
# we don't need to merge results for consistency with Dataset.map which merges iif both input and output are dicts
# then remove the unwanted columns
if self.remove_columns:
for column in self.remove_columns:
if column in output_table.column_names:
output_table = output_table.remove_column(output_table.column_names.index(column))
# return output
if max_chunksize is None:
current_idx += len(pa_table)
if self._state_dict:
self._state_dict["previous_state_example_idx"] += len(pa_table)
yield key, output_table
else:
for i, pa_subtable in enumerate(output_table.to_reader(max_chunksize=max_chunksize)):
current_idx += 1
if self._state_dict:
self._state_dict["num_examples_since_previous_state"] += 1
if num_examples_to_skip > 0:
num_examples_to_skip -= 1
continue
yield f"{key}_{i}", pa_subtable
if self._state_dict:
self._state_dict["previous_state"] = self.ex_iterable.state_dict()
self._state_dict["num_examples_since_previous_state"] = 0
self._state_dict["previous_state_example_idx"] += len(pa_table)
def shuffle_data_sources(self, generator: np.random.Generator) -> "MappedExamplesIterable":
"""Shuffle the wrapped examples iterable."""
return MappedExamplesIterable(
self.ex_iterable.shuffle_data_sources(generator),
function=self.function,
with_indices=self.with_indices,
input_columns=self.input_columns,
batched=self.batched,
batch_size=self.batch_size,
drop_last_batch=self.drop_last_batch,
remove_columns=self.remove_columns,
fn_kwargs=self.fn_kwargs,
formatting=self.formatting,
features=self.features,
)
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "MappedExamplesIterable":
"""Keep only the requested shard."""
return MappedExamplesIterable(
self.ex_iterable.shard_data_sources(num_shards, index, contiguous=contiguous),
function=self.function,
with_indices=self.with_indices,
input_columns=self.input_columns,
batched=self.batched,
batch_size=self.batch_size,
drop_last_batch=self.drop_last_batch,
remove_columns=self.remove_columns,
fn_kwargs=self.fn_kwargs,
formatting=self.formatting,
features=self.features,
)
@property
def num_shards(self) -> int:
return self.ex_iterable.num_shards | class_definition | 43,534 | 57,830 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 24 |
class FilteredExamplesIterable(_BaseExamplesIterable):
def __init__(
self,
ex_iterable: _BaseExamplesIterable,
function: Callable,
with_indices: bool = False,
input_columns: Optional[List[str]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
fn_kwargs: Optional[dict] = None,
formatting: Optional["FormattingConfig"] = None,
):
super().__init__()
self.ex_iterable = ex_iterable
self.function = function
self.batched = batched
self.batch_size = batch_size
self.with_indices = with_indices
self.input_columns = input_columns
self.fn_kwargs = fn_kwargs or {}
self.formatting = formatting # required for iter_arrow
# sanity checks
if formatting and formatting.format_type == "arrow":
# batch_size should match for iter_arrow
if not isinstance(ex_iterable, RebatchedArrowExamplesIterable):
raise ValueError(
"The Arrow-formatted FilteredExamplesIterable has underlying iterable"
f"that is a {type(ex_iterable).__name__} instead of a RebatchedArrowExamplesIterable."
)
elif ex_iterable.batch_size != (batch_size if batched else 1):
raise ValueError(
f"The Arrow-formatted FilteredExamplesIterable has batch_size={batch_size if batched else 1} which is"
f"different from {ex_iterable.batch_size=} from its underlying iterable."
)
@property
def iter_arrow(self):
if self.formatting and self.formatting.format_type == "arrow":
return self._iter_arrow
@property
def is_typed(self):
return self.ex_iterable.is_typed
@property
def features(self):
return self.ex_iterable.features
def _init_state_dict(self) -> dict:
self._state_dict = {
"ex_iterable": self.ex_iterable._init_state_dict(),
"previous_state": None,
"num_examples_since_previous_state": 0,
"previous_state_example_idx": 0,
}
return self._state_dict
def __iter__(self):
if self.formatting and self.formatting.format_type == "arrow":
formatter = PythonFormatter()
for key, pa_table in self._iter_arrow(max_chunksize=1):
yield key, formatter.format_row(pa_table)
else:
yield from self._iter()
def _iter(self):
current_idx = self._state_dict["previous_state_example_idx"] if self._state_dict else 0
if self._state_dict and self._state_dict["previous_state"]:
self.ex_iterable.load_state_dict(self._state_dict["previous_state"])
num_examples_to_skip = self._state_dict["num_examples_since_previous_state"]
else:
num_examples_to_skip = 0
iterator = iter(self.ex_iterable)
if self.formatting:
formatter = get_formatter(self.formatting.format_type)
format_dict = (
formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
)
else:
format_dict = None
if self.batched:
if self._state_dict:
self._state_dict["previous_state"] = self.ex_iterable.state_dict()
self._state_dict["num_examples_since_previous_state"] = 0
self._state_dict["previous_state_example_idx"] = current_idx
for key, example in iterator:
# If `batched`, first build the batch, if `batch_size` is None or <=0, then the batch is the whole dataset
iterator_batch = (
iterator
if self.batch_size is None or self.batch_size <= 0
else islice(iterator, self.batch_size - 1)
)
key_examples_list = [(key, example)] + list(iterator_batch)
keys, examples = zip(*key_examples_list)
batch = _examples_to_batch(examples)
batch = format_dict(batch) if format_dict else batch
# then compute the mask for the batch
inputs = batch
function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
if self.with_indices:
function_args.append([current_idx + i for i in range(len(key_examples_list))])
mask = self.function(*function_args, **self.fn_kwargs)
# yield one example at a time from the batch
for key_example, to_keep in zip(key_examples_list, mask):
current_idx += 1
if self._state_dict:
self._state_dict["num_examples_since_previous_state"] += 1
if num_examples_to_skip > 0:
num_examples_to_skip -= 1
continue
if to_keep:
yield key_example
if self._state_dict:
self._state_dict["previous_state"] = self.ex_iterable.state_dict()
self._state_dict["num_examples_since_previous_state"] = 0
self._state_dict["previous_state_example_idx"] = current_idx
else:
for key, example in iterator:
# If not batched, we can apply the filtering function direcly
example = dict(example)
inputs = format_dict(example) if format_dict else example
function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
if self.with_indices:
function_args.append(current_idx)
to_keep = self.function(*function_args, **self.fn_kwargs)
current_idx += 1
if self._state_dict:
self._state_dict["previous_state_example_idx"] += 1
if to_keep:
yield key, example
def _iter_arrow(self, max_chunksize: Optional[int] = None):
if self.ex_iterable.iter_arrow:
iterator = self.ex_iterable.iter_arrow()
else:
iterator = _convert_to_arrow(self.ex_iterable, batch_size=self.batch_size if self.batched else 1)
if self._state_dict and self._state_dict["previous_state"]:
self.ex_iterable.load_state_dict(self._state_dict["previous_state"])
num_examples_to_skip = self._state_dict["num_examples_since_previous_state"]
else:
num_examples_to_skip = 0
if self._state_dict and max_chunksize is not None:
self._state_dict["previous_state"] = self.ex_iterable.state_dict()
self._state_dict["num_examples_since_previous_state"] = 0
current_idx = self._state_dict["previous_state_example_idx"] if self._state_dict else 0
for key, pa_table in iterator:
if (
self.batched
and self.batch_size is not None
and len(pa_table) < self.batch_size
and self.drop_last_batch
):
return
function_args = [pa_table] if self.input_columns is None else [pa_table[col] for col in self.input_columns]
if self.with_indices:
if self.batched:
function_args.append([current_idx + i for i in range(len(pa_table))])
else:
function_args.append(current_idx)
# then apply the transform
mask = self.function(*function_args, **self.fn_kwargs)
# return output
if self.batched:
output_table = pa_table.filter(mask)
elif mask.as_py() if isinstance(mask, pa.BooleanScalar) else mask:
output_table = pa_table
else:
output_table = pa_table.slice(0, 0)
if max_chunksize is None:
current_idx += len(pa_table)
if self._state_dict:
self._state_dict["previous_state_example_idx"] += len(pa_table)
if len(output_table) > 0:
yield key, output_table
else:
for i, pa_subtable in enumerate(output_table.to_reader(max_chunksize=max_chunksize)):
current_idx += 1
if self._state_dict:
self._state_dict["num_examples_since_previous_state"] += 1
if num_examples_to_skip > 0:
num_examples_to_skip -= 1
continue
yield f"{key}_{i}", pa_subtable
if self._state_dict:
self._state_dict["previous_state"] = self.ex_iterable.state_dict()
self._state_dict["num_examples_since_previous_state"] = 0
self._state_dict["previous_state_example_idx"] += len(pa_table)
def shuffle_data_sources(self, seed: Optional[int]) -> "FilteredExamplesIterable":
"""Shuffle the wrapped examples iterable."""
return FilteredExamplesIterable(
self.ex_iterable.shuffle_data_sources(seed),
function=self.function,
with_indices=self.with_indices,
input_columns=self.input_columns,
batched=self.batched,
batch_size=self.batch_size,
formatting=self.formatting,
)
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "FilteredExamplesIterable":
"""Keep only the requested shard."""
return FilteredExamplesIterable(
self.ex_iterable.shard_data_sources(num_shards, index, contiguous=contiguous),
function=self.function,
with_indices=self.with_indices,
input_columns=self.input_columns,
batched=self.batched,
batch_size=self.batch_size,
formatting=self.formatting,
)
@property
def num_shards(self) -> int:
return self.ex_iterable.num_shards | class_definition | 57,833 | 68,029 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 25 |
class BufferShuffledExamplesIterable(_BaseExamplesIterable):
def __init__(self, ex_iterable: _BaseExamplesIterable, buffer_size: int, generator: np.random.Generator):
super().__init__()
self.ex_iterable = ex_iterable
self.buffer_size = buffer_size
self.generator = generator
# TODO(QL): implement iter_arrow
@property
def is_typed(self):
return self.ex_iterable.is_typed
@property
def features(self):
return self.ex_iterable.features
def _init_state_dict(self) -> dict:
self._state_dict = self.ex_iterable._init_state_dict()
self._original_state_dict = self.state_dict()
return self._state_dict
def load_state_dict(self, state_dict: dict) -> dict:
if self._state_dict:
if state_dict != self._original_state_dict:
logger.warning(
"Loading a state dict of a shuffle buffer of a dataset without the buffer content."
"The shuffle buffer will be refilled before starting to yield new examples."
)
return super().load_state_dict(state_dict)
@staticmethod
def _iter_random_indices(rng: np.random.Generator, buffer_size: int, random_batch_size=1000) -> Iterator[int]:
while True:
yield from (int(i) for i in rng.integers(0, buffer_size, size=random_batch_size))
def __iter__(self):
buffer_size = self.buffer_size
rng = deepcopy(self.generator)
indices_iterator = self._iter_random_indices(rng, buffer_size)
# this is the shuffle buffer that we keep in memory
mem_buffer = []
for x in self.ex_iterable:
if len(mem_buffer) == buffer_size: # if the buffer is full, pick and example from it
i = next(indices_iterator)
yield mem_buffer[i]
mem_buffer[i] = x # replace the picked example by a new one
else: # otherwise, keep filling the buffer
mem_buffer.append(x)
# when we run out of examples, we shuffle the remaining examples in the buffer and yield them
rng.shuffle(mem_buffer)
yield from mem_buffer
def shuffle_data_sources(self, generator: np.random.Generator) -> "BufferShuffledExamplesIterable":
"""Shuffle the wrapped examples iterable as well as the shuffling buffer."""
return BufferShuffledExamplesIterable(
self.ex_iterable.shuffle_data_sources(generator), buffer_size=self.buffer_size, generator=generator
)
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "BufferShuffledExamplesIterable":
"""Keep only the requested shard."""
return BufferShuffledExamplesIterable(
self.ex_iterable.shard_data_sources(num_shards, index, contiguous=contiguous),
buffer_size=self.buffer_size,
generator=self.generator,
)
@property
def num_shards(self) -> int:
return self.ex_iterable.num_shards | class_definition | 68,032 | 71,070 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 26 |
class SkipExamplesIterable(_BaseExamplesIterable):
def __init__(
self,
ex_iterable: _BaseExamplesIterable,
n: int,
block_sources_order_when_shuffling: bool = True,
split_when_sharding: bool = True,
):
super().__init__()
self.ex_iterable = ex_iterable
self.n = n
self.block_sources_order_when_shuffling = block_sources_order_when_shuffling
self.split_when_sharding = split_when_sharding
# TODO(QL): implement iter_arrow
@property
def is_typed(self):
return self.ex_iterable.is_typed
@property
def features(self):
return self.ex_iterable.features
def _init_state_dict(self) -> dict:
self._state_dict = {"skipped": False, "ex_iterable": self.ex_iterable._init_state_dict()}
return self._state_dict
def __iter__(self):
ex_iterable_idx_start = 0 if self._state_dict and self._state_dict["skipped"] else self.n
if self._state_dict:
self._state_dict["skipped"] = True
yield from islice(self.ex_iterable, ex_iterable_idx_start, None)
@staticmethod
def split_number(num, n):
quotient = num // n
remainder = num % n
result = [quotient] * n
for i in range(remainder):
result[i] += 1
return result
def shuffle_data_sources(self, generator: np.random.Generator) -> "SkipExamplesIterable":
"""May not shuffle the wrapped examples iterable since it would skip examples from other shards instead."""
if self.block_sources_order_when_shuffling:
return self
else:
return SkipExamplesIterable(
self.ex_iterable.shuffle_data_sources(generator),
n=self.n,
block_sources_order_when_shuffling=self.block_sources_order_when_shuffling,
split_when_sharding=self.split_when_sharding,
)
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "SkipExamplesIterable":
"""Keep only the requested shard."""
if self.split_when_sharding:
return SkipExamplesIterable(
self.ex_iterable.shard_data_sources(num_shards, index, contiguous=contiguous),
n=self.split_number(self.n, num_shards)[index],
block_sources_order_when_shuffling=self.block_sources_order_when_shuffling,
split_when_sharding=self.split_when_sharding,
)
else:
return self
@property
def num_shards(self) -> int:
return self.ex_iterable.num_shards | class_definition | 71,073 | 73,699 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 27 |
class TakeExamplesIterable(_BaseExamplesIterable):
def __init__(
self,
ex_iterable: _BaseExamplesIterable,
n: int,
block_sources_order_when_shuffling: bool = True,
split_when_sharding: bool = True,
):
super().__init__()
self.ex_iterable = ex_iterable
self.n = n
self.block_sources_order_when_shuffling = block_sources_order_when_shuffling
self.split_when_sharding = split_when_sharding
# TODO(QL): implement iter_arrow
@property
def is_typed(self):
return self.ex_iterable.is_typed
@property
def features(self):
return self.ex_iterable.features
def _init_state_dict(self) -> dict:
self._state_dict = {"num_taken": 0, "ex_iterable": self.ex_iterable._init_state_dict()}
return self._state_dict
def __iter__(self):
ex_iterable_num_taken = self._state_dict["num_taken"] if self._state_dict else 0
for key_example in islice(self.ex_iterable, self.n - ex_iterable_num_taken):
if self._state_dict:
self._state_dict["num_taken"] += 1
yield key_example
@staticmethod
def split_number(num, n):
quotient = num // n
remainder = num % n
result = [quotient] * n
for i in range(remainder):
result[i] += 1
return result
def shuffle_data_sources(self, generator: np.random.Generator) -> "TakeExamplesIterable":
"""May not shuffle the wrapped examples iterable since it would take examples from other shards instead."""
if self.block_sources_order_when_shuffling:
return self
else:
return TakeExamplesIterable(
self.ex_iterable.shuffle_data_sources(generator),
n=self.n,
block_sources_order_when_shuffling=self.block_sources_order_when_shuffling,
split_when_sharding=self.split_when_sharding,
)
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "TakeExamplesIterable":
"""Keep only the requested shard."""
if self.split_when_sharding:
return TakeExamplesIterable(
self.ex_iterable.shard_data_sources(num_shards, index, contiguous=contiguous),
n=self.split_number(self.n, num_shards)[index],
block_sources_order_when_shuffling=self.block_sources_order_when_shuffling,
split_when_sharding=self.split_when_sharding,
)
else:
return TakeExamplesIterable(
self.ex_iterable.shard_data_sources(num_shards, index, contiguous=contiguous),
n=self.n,
block_sources_order_when_shuffling=self.block_sources_order_when_shuffling,
split_when_sharding=self.split_when_sharding,
)
@property
def num_shards(self) -> int:
return self.ex_iterable.num_shards | class_definition | 73,702 | 76,673 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 28 |
class FormattingConfig:
format_type: Optional[str]
def __post_init__(self):
if self.format_type == "pandas":
raise NotImplementedError(
"The 'pandas' formatting is not implemented for iterable datasets. You can use 'numpy' or 'arrow' instead."
) | class_definition | 77,927 | 78,229 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 29 |
class FormattedExamplesIterable(_BaseExamplesIterable):
def __init__(
self,
ex_iterable: _BaseExamplesIterable,
formatting: Optional[FormattingConfig],
features: Optional[Features],
token_per_repo_id: Dict[str, Union[str, bool, None]],
):
super().__init__()
self.ex_iterable = ex_iterable
self._features = features
self.formatting = formatting
self.token_per_repo_id = token_per_repo_id
@property
def iter_arrow(self):
if self.ex_iterable.iter_arrow and (not self.formatting or self.formatting.format_type == "arrow"):
return self._iter_arrow
@property
def is_typed(self):
return self.ex_iterable.is_typed or self._features is not None
@property
def features(self):
return self._features
def _init_state_dict(self) -> dict:
self._state_dict = self.ex_iterable._init_state_dict()
return self._state_dict
def __iter__(self):
if not self.formatting or self.formatting.format_type == "arrow":
formatter = PythonFormatter()
else:
formatter = get_formatter(
self.formatting.format_type,
features=self._features if not self.ex_iterable.is_typed else None,
token_per_repo_id=self.token_per_repo_id,
)
if self.ex_iterable.iter_arrow:
# feature casting (inc column addition) handled within self._iter_arrow()
for key, pa_table in self._iter_arrow():
batch = formatter.format_batch(pa_table)
for example in _batch_to_examples(batch):
yield key, example
else:
format_dict = (
formatter.recursive_tensorize
if isinstance(formatter, TensorFormatter)
else cast_to_python_objects # cast in case features is None
)
for key, example in self.ex_iterable:
# don't apply feature types if already applied by ex_iterable (e.g. in case of chained with_format)
if self.features and not self.ex_iterable.is_typed:
example = _apply_feature_types_on_example(
example, self.features, token_per_repo_id=self.token_per_repo_id
)
yield key, format_dict(example)
def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]:
if not self.features:
yield from self.ex_iterable._iter_arrow()
for key, pa_table in self.ex_iterable._iter_arrow():
columns = set(pa_table.column_names)
schema = self.features.arrow_schema
# add missing columns
for column_name in self.features:
if column_name not in columns:
col = pa.NullArray.from_buffers(pa.null(), len(pa_table), [None])
pa_table = pa_table.append_column(column_name, col)
if pa_table.schema != schema:
pa_table = cast_table_to_features(pa_table, self.features)
yield key, pa_table
def shuffle_data_sources(self, generator: np.random.Generator) -> "FormattedExamplesIterable":
"""Shuffle the wrapped examples iterable."""
return FormattedExamplesIterable(
self.ex_iterable.shuffle_data_sources(generator),
features=self.features,
token_per_repo_id=self.token_per_repo_id,
formatting=self.formatting,
)
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "FormattedExamplesIterable":
"""Keep only the requested shard."""
return FormattedExamplesIterable(
self.ex_iterable.shard_data_sources(num_shards, index, contiguous=contiguous),
features=self.features,
token_per_repo_id=self.token_per_repo_id,
formatting=self.formatting,
)
@property
def num_shards(self) -> int:
return self.ex_iterable.num_shards | class_definition | 78,232 | 82,283 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 30 |
class ShufflingConfig:
generator: np.random.Generator
_original_seed: Optional[int] = None | class_definition | 82,297 | 82,395 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 31 |
class DistributedConfig:
rank: int
world_size: int | class_definition | 82,409 | 82,467 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 32 |
class IterableDataset(DatasetInfoMixin):
"""A Dataset backed by an iterable."""
def __init__(
self,
ex_iterable: _BaseExamplesIterable,
info: Optional[DatasetInfo] = None,
split: Optional[NamedSplit] = None,
formatting: Optional[FormattingConfig] = None,
shuffling: Optional[ShufflingConfig] = None,
distributed: Optional[DistributedConfig] = None,
token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None,
):
if distributed and distributed.world_size > 1 and shuffling and shuffling._original_seed is None:
raise RuntimeError(
"The dataset doesn't have a fixed random seed across nodes to shuffle and split the list of dataset shards by node. "
"Please pass e.g. `seed=42` in `.shuffle()` to make all the nodes use the same seed. "
)
info = info.copy() if info is not None else DatasetInfo()
DatasetInfoMixin.__init__(self, info=info, split=split)
self._ex_iterable = copy.copy(ex_iterable)
self._formatting = formatting
self._shuffling = shuffling
self._distributed = distributed
self._token_per_repo_id: Dict[str, Union[str, bool, None]] = token_per_repo_id or {}
self._epoch: Union[int, "torch.Tensor"] = _maybe_share_with_torch_persistent_workers(0)
self._starting_state_dict: Optional[dict] = None
self._prepared_ex_iterable = self._prepare_ex_iterable_for_iteration()
self._state_dict = self._prepared_ex_iterable._init_state_dict()
_maybe_add_torch_iterable_dataset_parent_class(self.__class__)
def state_dict(self) -> dict:
"""Get the current state_dict of the dataset.
It corresponds to the state at the latest example it yielded.
Resuming returns exactly where the checkpoint was saved except in two cases:
1. examples from shuffle buffers are lost when resuming and the buffers are refilled with new data
2. combinations of `.with_format(arrow)` and batched `.map()` may skip one batch.
Returns:
`dict`
Example:
```py
>>> from datasets import Dataset, concatenate_datasets
>>> ds = Dataset.from_dict({"a": range(6)}).to_iterable_dataset(num_shards=3)
>>> for idx, example in enumerate(ds):
... print(example)
... if idx == 2:
... state_dict = ds.state_dict()
... print("checkpoint")
... break
>>> ds.load_state_dict(state_dict)
>>> print(f"restart from checkpoint")
>>> for example in ds:
... print(example)
```
which returns:
```
{'a': 0}
{'a': 1}
{'a': 2}
checkpoint
restart from checkpoint
{'a': 3}
{'a': 4}
{'a': 5}
```
```py
>>> from torchdata.stateful_dataloader import StatefulDataLoader
>>> ds = load_dataset("deepmind/code_contests", streaming=True, split="train")
>>> dataloader = StatefulDataLoader(ds, batch_size=32, num_workers=4)
>>> # checkpoint
>>> state_dict = dataloader.state_dict() # uses ds.state_dict() under the hood
>>> # resume from checkpoint
>>> dataloader.load_state_dict(state_dict) # uses ds.load_state_dict() under the hood
```
"""
return copy.deepcopy(self._state_dict)
def load_state_dict(self, state_dict: dict) -> None:
"""Load the state_dict of the dataset.
The iteration will restart at the next example from when the state was saved.
Resuming returns exactly where the checkpoint was saved except in two cases:
1. examples from shuffle buffers are lost when resuming and the buffers are refilled with new data
2. combinations of `.with_format(arrow)` and batched `.map()` may skip one batch.
Example:
```py
>>> from datasets import Dataset, concatenate_datasets
>>> ds = Dataset.from_dict({"a": range(6)}).to_iterable_dataset(num_shards=3)
>>> for idx, example in enumerate(ds):
... print(example)
... if idx == 2:
... state_dict = ds.state_dict()
... print("checkpoint")
... break
>>> ds.load_state_dict(state_dict)
>>> print(f"restart from checkpoint")
>>> for example in ds:
... print(example)
```
which returns:
```
{'a': 0}
{'a': 1}
{'a': 2}
checkpoint
restart from checkpoint
{'a': 3}
{'a': 4}
{'a': 5}
```
```py
>>> from torchdata.stateful_dataloader import StatefulDataLoader
>>> ds = load_dataset("deepmind/code_contests", streaming=True, split="train")
>>> dataloader = StatefulDataLoader(ds, batch_size=32, num_workers=4)
>>> # checkpoint
>>> state_dict = dataloader.state_dict() # uses ds.state_dict() under the hood
>>> # resume from checkpoint
>>> dataloader.load_state_dict(state_dict) # uses ds.load_state_dict() under the hood
```
"""
self._prepared_ex_iterable.load_state_dict(state_dict)
self._starting_state_dict = state_dict
def __repr__(self):
return f"IterableDataset({{\n features: {list(self._info.features.keys()) if self._info.features is not None else 'Unknown'},\n num_shards: {self.num_shards}\n}})"
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__ = d
# Re-add torch shared memory, since shared memory is not always kept when pickling
self._epoch = _maybe_share_with_torch_persistent_workers(self._epoch)
# Re-add torch iterable dataset as a parent class, since dynamically added parent classes are not kept when pickling
_maybe_add_torch_iterable_dataset_parent_class(self.__class__)
def _head(self, n=5):
return _examples_to_batch(list(self.take(n)))
@property
def epoch(self) -> int:
return int(self._epoch)
def _effective_generator(self):
if self._shuffling and self.epoch == 0:
return self._shuffling.generator
elif self._shuffling:
# Create effective seed using self.epoch (we subtract in order to avoir overflow in long_scalars)
effective_seed = deepcopy(self._shuffling.generator).integers(0, 1 << 63) - self.epoch
effective_seed = (1 << 63) + effective_seed if effective_seed < 0 else effective_seed
return np.random.default_rng(effective_seed)
else:
raise ValueError("This dataset is not shuffled")
@property
def num_shards(self) -> int:
if self._distributed and self._ex_iterable.num_shards % self._distributed.world_size == 0:
return self._ex_iterable.num_shards // self._distributed.world_size
return self._ex_iterable.num_shards
@property
def n_shards(self) -> int: # backward compatibility
return self.num_shards
def _iter_pytorch(self):
ex_iterable = self._prepare_ex_iterable_for_iteration()
# Fix for fsspec when using multiprocess to avoid hanging in the ML training loop. (only required for fsspec >= 0.9.0)
# See https://github.com/fsspec/gcsfs/issues/379
fsspec.asyn.reset_lock()
# check if there aren't too many workers
import torch.utils.data
worker_info = torch.utils.data.get_worker_info()
if self._is_main_process() and ex_iterable.num_shards < worker_info.num_workers:
logger.warning(
f"Too many dataloader workers: {worker_info.num_workers} (max is dataset.num_shards={ex_iterable.num_shards}). "
f"Stopping {worker_info.num_workers - ex_iterable.num_shards} dataloader workers."
)
logger.info(
f"To parallelize data loading, we give each process some shards (or data sources) to process. "
f"Therefore it's unnecessary to have a number of workers greater than dataset.num_shards={ex_iterable.num_shards}. "
f"To enable more parallelism, please split the dataset in more files than {ex_iterable.num_shards}."
)
# split workload
_log_prefix = f"node#{self._distributed.rank} " if self._distributed else ""
shards_indices = ex_iterable.split_shard_indices_by_worker(
num_shards=worker_info.num_workers, index=worker_info.id, contiguous=False
)
if shards_indices:
logger.debug(
f"{_log_prefix}dataloader worker#{worker_info.id}, ': Starting to iterate over {len(shards_indices)}/{ex_iterable.num_shards} shards."
)
ex_iterable = ex_iterable.shard_data_sources(
num_shards=worker_info.num_workers, index=worker_info.id, contiguous=False
)
self._state_dict = ex_iterable._init_state_dict()
if self._starting_state_dict:
ex_iterable.load_state_dict(self._starting_state_dict)
if self._formatting:
formatter = get_formatter(self._formatting.format_type, features=self.features)
format_dict = (
formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
)
else:
format_dict = None
if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"):
if ex_iterable.iter_arrow:
iterator = ex_iterable.iter_arrow()
else:
iterator = _convert_to_arrow(ex_iterable, batch_size=1)
for key, pa_table in iterator:
yield formatter.format_row(pa_table)
return
else:
for key, example in ex_iterable:
if self.features and not ex_iterable.is_typed:
# `IterableDataset` automatically fills missing columns with None.
# This is done with `_apply_feature_types_on_example`.
example = _apply_feature_types_on_example(
example, self.features, token_per_repo_id=self._token_per_repo_id
)
yield format_dict(example) if format_dict else example
logger.debug(
f"{_log_prefix}dataloader worker#{worker_info.id}, ': Finished iterating over {len(shards_indices)}/{ex_iterable.num_shards} shards."
)
else:
logger.debug(
f"{_log_prefix}dataloader worker#{worker_info.id}, ': Stopping... Number of dataset shards < num_workers ({ex_iterable.num_shards}<{worker_info.num_workers})."
)
def _is_main_process(self):
if self._distributed and self._distributed.rank > 0:
return False
if "torch" in sys.modules:
import torch.utils.data
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None and worker_info.id > 0:
return False
return True
def _prepare_ex_iterable_for_iteration(
self, batch_size: int = 1, drop_last_batch: bool = False
) -> _BaseExamplesIterable:
ex_iterable = self._ex_iterable
if self._formatting and (ex_iterable.iter_arrow or self._formatting.format_type == "arrow"):
ex_iterable = RebatchedArrowExamplesIterable(
ex_iterable, batch_size=batch_size, drop_last_batch=drop_last_batch
)
if self._shuffling:
ex_iterable = ex_iterable.shuffle_data_sources(self._effective_generator())
else:
ex_iterable = ex_iterable
if self._distributed:
rank = self._distributed.rank
world_size = self._distributed.world_size
if ex_iterable.num_shards % world_size == 0:
if self._is_main_process():
num_shards_per_node = ex_iterable.num_shards // world_size
plural = "s" if num_shards_per_node > 1 else ""
logger.info(
f"Assigning {num_shards_per_node} shard{plural} (or data source{plural}) of the dataset to each node."
)
ex_iterable = ex_iterable.shard_data_sources(num_shards=world_size, index=rank, contiguous=False)
else:
if self._is_main_process():
logger.info(
f"Assigning 1 out of {world_size} examples of the dataset to each node. The others are skipped during the iteration."
)
logger.info(
f"It is more optimized to distribute the dataset shards (or data sources) across nodes. "
f"You can do that by using a dataset with number of shards that is a factor of world_size={world_size}. "
f"The current dataset has {ex_iterable.num_shards} which is not a factor of {world_size}"
)
ex_iterable = StepExamplesIterable(ex_iterable, step=world_size, offset=rank)
self._state_dict = ex_iterable._init_state_dict()
if self._starting_state_dict:
ex_iterable.load_state_dict(self._starting_state_dict)
return ex_iterable
def __iter__(self):
if "torch" in sys.modules:
import torch.utils.data
worker_info = torch.utils.data.get_worker_info()
if isinstance(self, torch.utils.data.IterableDataset) and worker_info is not None:
# We're a torch.utils.data.IterableDataset in a PyTorch worker process
yield from self._iter_pytorch()
return
ex_iterable = self._prepare_ex_iterable_for_iteration()
if self._formatting:
formatter = get_formatter(self._formatting.format_type, features=self.features)
format_dict = (
formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
)
else:
format_dict = None
if self._formatting and (ex_iterable.iter_arrow or self._formatting.format_type == "arrow"):
if ex_iterable.iter_arrow:
iterator = ex_iterable.iter_arrow()
else:
iterator = _convert_to_arrow(ex_iterable, batch_size=1)
for key, pa_table in iterator:
yield formatter.format_row(pa_table)
return
for key, example in ex_iterable:
if self.features and not ex_iterable.is_typed:
# `IterableDataset` automatically fills missing columns with None.
# This is done with `_apply_feature_types_on_example`.
example = _apply_feature_types_on_example(
example, self.features, token_per_repo_id=self._token_per_repo_id
)
yield format_dict(example) if format_dict else example
def iter(self, batch_size: int, drop_last_batch: bool = False):
"""Iterate through the batches of size `batch_size`.
Args:
batch_size (:obj:`int`): size of each batch to yield.
drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be
dropped
"""
if self._formatting:
formatter = get_formatter(self._formatting.format_type, features=self.features)
format_dict = (
formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
)
else:
format_dict = None
ex_iterable = self._prepare_ex_iterable_for_iteration(batch_size=batch_size, drop_last_batch=drop_last_batch)
if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"):
if ex_iterable.iter_arrow:
iterator = ex_iterable.iter_arrow()
else:
iterator = _convert_to_arrow(ex_iterable, batch_size=batch_size, drop_last_batch=drop_last_batch)
for key, pa_table in iterator:
yield formatter.format_batch(pa_table)
return
iterator = iter(ex_iterable)
for key, example in iterator:
# If batched, first build the batch
examples = [example] + [example for key, example in islice(iterator, batch_size - 1)]
if drop_last_batch and len(examples) < batch_size: # ignore last batch
return
batch = _examples_to_batch(examples)
if self.features and not ex_iterable.is_typed:
# `IterableDataset` automatically fills missing columns with None.
# This is done with `_apply_feature_types_on_batch`.
batch = _apply_feature_types_on_batch(batch, self.features, token_per_repo_id=self._token_per_repo_id)
yield format_dict(batch) if format_dict else batch
@staticmethod
def from_generator(
generator: Callable,
features: Optional[Features] = None,
gen_kwargs: Optional[dict] = None,
split: NamedSplit = Split.TRAIN,
) -> "IterableDataset":
"""Create an Iterable Dataset from a generator.
Args:
generator (`Callable`):
A generator function that `yields` examples.
features (`Features`, *optional*):
Dataset features.
gen_kwargs(`dict`, *optional*):
Keyword arguments to be passed to the `generator` callable.
You can define a sharded iterable dataset by passing the list of shards in `gen_kwargs`.
This can be used to improve shuffling and when iterating over the dataset with multiple workers.
split ([`NamedSplit`], defaults to `Split.TRAIN`):
Split name to be assigned to the dataset.
<Added version="2.21.0"/>
Returns:
`IterableDataset`
Example:
```py
>>> def gen():
... yield {"text": "Good", "label": 0}
... yield {"text": "Bad", "label": 1}
...
>>> ds = IterableDataset.from_generator(gen)
```
```py
>>> def gen(shards):
... for shard in shards:
... with open(shard) as f:
... for line in f:
... yield {"line": line}
...
>>> shards = [f"data{i}.txt" for i in range(32)]
>>> ds = IterableDataset.from_generator(gen, gen_kwargs={"shards": shards})
>>> ds = ds.shuffle(seed=42, buffer_size=10_000) # shuffles the shards order + uses a shuffle buffer
>>> from torch.utils.data import DataLoader
>>> dataloader = DataLoader(ds.with_format("torch"), num_workers=4) # give each worker a subset of 32/4=8 shards
```
"""
from .io.generator import GeneratorDatasetInputStream
return GeneratorDatasetInputStream(
generator=generator, features=features, gen_kwargs=gen_kwargs, streaming=True, split=split
).read()
@staticmethod
def from_spark(
df: "pyspark.sql.DataFrame",
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
**kwargs,
) -> "IterableDataset":
"""Create an IterableDataset from Spark DataFrame. The dataset is streamed to the driver in batches.
Args:
df (`pyspark.sql.DataFrame`):
The DataFrame containing the desired data.
split (`NamedSplit`, *optional*):
Split name to be assigned to the dataset.
features (`Features`, *optional*):
Dataset features.
Returns:
[`IterableDataset`]
Example:
```py
>>> df = spark.createDataFrame(
>>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]],
>>> columns=["id", "name"],
>>> )
>>> ds = IterableDataset.from_spark(df)
```
"""
from .io.spark import SparkDatasetReader
if sys.platform == "win32":
raise EnvironmentError("IterableDataset.from_spark is not currently supported on Windows")
return SparkDatasetReader(
df,
split=split,
features=features,
streaming=True,
**kwargs,
).read()
@staticmethod
def from_file(filename: str) -> "IterableDataset":
"""Instantiate a IterableDataset from Arrow table at filename.
Args:
filename (`str`):
File name of the dataset.
Returns:
[`IterableDataset`]
"""
pa_table_schema = read_schema_from_file(filename)
inferred_features = Features.from_arrow_schema(pa_table_schema)
ex_iterable = ArrowExamplesIterable(Dataset._generate_tables_from_cache_file, kwargs={"filename": filename})
return IterableDataset(ex_iterable=ex_iterable, info=DatasetInfo(features=inferred_features))
def with_format(
self,
type: Optional[str] = None,
) -> "IterableDataset":
"""
Return a dataset with the specified format.
The 'pandas' format is currently not implemented.
Args:
type (`str`, *optional*):
Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'arrow', 'jax']`.
`None` means it returns python objects (default).
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes", split="validation", streaming=True)
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
>>> ds = ds.with_format("torch")
>>> next(iter(ds))
{'text': 'compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .',
'label': tensor(1),
'input_ids': tensor([ 101, 18027, 16310, 16001, 1103, 9321, 178, 11604, 7235, 6617,
1742, 2165, 2820, 1206, 6588, 22572, 12937, 1811, 2153, 1105,
1147, 12890, 19587, 6463, 1105, 15026, 1482, 119, 102, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0]),
'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])}
```
"""
type = get_format_type_from_alias(type)
# TODO(QL): add format_kwargs
# TODO(QL): add format_columns and return_all_columns
# TODO(QL): add pandas format
return IterableDataset(
ex_iterable=self._ex_iterable,
info=self._info.copy(),
split=self._split,
formatting=FormattingConfig(format_type=type),
shuffling=copy.deepcopy(self._shuffling),
distributed=copy.deepcopy(self._distributed),
token_per_repo_id=self._token_per_repo_id,
)
def map(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
drop_last_batch: bool = False,
remove_columns: Optional[Union[str, List[str]]] = None,
features: Optional[Features] = None,
fn_kwargs: Optional[dict] = None,
) -> "IterableDataset":
"""
Apply a function to all the examples in the iterable dataset (individually or in batches) and update them.
If your function returns a column that already exists, then it overwrites it.
The function is applied on-the-fly on the examples when iterating over the dataset.
You can specify whether the function should be batched or not with the `batched` parameter:
- If batched is `False`, then the function takes 1 example in and should return 1 example.
An example is a dictionary, e.g. `{"text": "Hello there !"}`.
- If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
A batch is a dictionary, e.g. a batch of 1 example is {"text": ["Hello there !"]}.
- If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
Note that the last batch may have less than `n` examples.
A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
Args:
function (`Callable`, *optional*, defaults to `None`):
Function applied on-the-fly on the examples when you iterate on the dataset.
It must have one of the following signatures:
- `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
- `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
- `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
- `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
For advanced usage, the function can also return a `pyarrow.Table`.
Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
If no function is provided, default to identity function: `lambda x: x`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`):
The columns to be passed into `function`
as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`.
`batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
drop_last_batch (`bool`, defaults to `False`):
Whether a last batch smaller than the batch_size should be
dropped instead of being processed by the function.
remove_columns (`[List[str]]`, *optional*, defaults to `None`):
Remove a selection of columns while doing the mapping.
Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
columns with names in `remove_columns`, these columns will be kept.
features (`[Features]`, *optional*, defaults to `None`):
Feature types of the resulting dataset.
fn_kwargs (`Dict`, *optional*, default `None`):
Keyword arguments to be passed to `function`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> def add_prefix(example):
... example["text"] = "Review: " + example["text"]
... return example
>>> ds = ds.map(add_prefix)
>>> list(ds.take(3))
[{'label': 1,
'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
{'label': 1, 'text': 'Review: effective but too-tepid biopic'}]
```
"""
if isinstance(input_columns, str):
input_columns = [input_columns]
if isinstance(remove_columns, str):
remove_columns = [remove_columns]
if function is None:
function = identity_func
if fn_kwargs is None:
fn_kwargs = {}
ex_iterable = self._ex_iterable
# no need to apply features if ex_iterable is typed and if there was no cast_column()
input_features = (
None
if (ex_iterable.is_typed and (self._info.features is None or self._info.features == ex_iterable.features))
else self._info.features
)
if self._formatting and self._formatting.format_type == "arrow":
# apply formatting before iter_arrow to keep map examples iterable happy
ex_iterable = FormattedExamplesIterable(
ex_iterable,
formatting=copy.deepcopy(self._formatting),
features=input_features,
token_per_repo_id=self._token_per_repo_id,
)
ex_iterable = RebatchedArrowExamplesIterable(
ex_iterable, batch_size=batch_size if batched else 1, drop_last_batch=drop_last_batch
)
else:
if self._formatting and self._ex_iterable.iter_arrow:
ex_iterable = RebatchedArrowExamplesIterable(
self._ex_iterable, batch_size=batch_size if batched else 1, drop_last_batch=drop_last_batch
)
if self._formatting or input_features:
# apply formatting after iter_arrow to avoid re-encoding the examples
ex_iterable = FormattedExamplesIterable(
ex_iterable,
formatting=copy.deepcopy(self._formatting),
features=input_features,
token_per_repo_id=self._token_per_repo_id,
)
ex_iterable = MappedExamplesIterable(
ex_iterable,
function=function,
with_indices=with_indices,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
drop_last_batch=drop_last_batch,
remove_columns=remove_columns,
fn_kwargs=fn_kwargs,
formatting=self._formatting,
features=features,
)
info = self.info.copy()
info.features = features
return IterableDataset(
ex_iterable=ex_iterable,
info=info,
split=self._split,
formatting=self._formatting,
shuffling=copy.deepcopy(self._shuffling),
distributed=copy.deepcopy(self._distributed),
token_per_repo_id=self._token_per_repo_id,
)
def filter(
self,
function: Optional[Callable] = None,
with_indices=False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
fn_kwargs: Optional[dict] = None,
) -> "IterableDataset":
"""Apply a filter function to all the elements so that the dataset only includes examples according to the filter function.
The filtering is done on-the-fly when iterating over the dataset.
Args:
function (`Callable`):
Callable with one of the following signatures:
- `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False`
- `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False`
- `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True`
- `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True`
If no function is provided, defaults to an always True function: `lambda x: True`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
input_columns (`str` or `List[str]`, *optional*):
The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, default `1000`):
Number of examples per batch provided to `function` if `batched=True`.
fn_kwargs (`Dict`, *optional*, default `None`):
Keyword arguments to be passed to `function`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> ds = ds.filter(lambda x: x["label"] == 0)
>>> list(ds.take(3))
[{'label': 0, 'movie_review': 'simplistic , silly and tedious .'},
{'label': 0,
'movie_review': "it's so laddish and juvenile , only teenage boys could possibly find it funny ."},
{'label': 0,
'movie_review': 'exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}]
```
"""
if isinstance(input_columns, str):
input_columns = [input_columns]
# We need the examples to be decoded for certain feature types like Image or Audio,
# format and type before filtering
ex_iterable = self._ex_iterable
if self._info.features or self._formatting:
ex_iterable = FormattedExamplesIterable(
ex_iterable,
formatting=self._formatting,
features=None if ex_iterable.is_typed else self._info.features,
token_per_repo_id=self._token_per_repo_id,
)
ex_iterable = FilteredExamplesIterable(
ex_iterable,
function=function,
with_indices=with_indices,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
fn_kwargs=fn_kwargs,
formatting=self._formatting,
)
return IterableDataset(
ex_iterable=ex_iterable,
info=self._info,
split=self._split,
formatting=self._formatting,
shuffling=copy.deepcopy(self._shuffling),
distributed=copy.deepcopy(self._distributed),
token_per_repo_id=self._token_per_repo_id,
)
def shuffle(
self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000
) -> "IterableDataset":
"""
Randomly shuffles the elements of this dataset.
This dataset fills a buffer with `buffer_size` elements, then randomly samples elements from this buffer,
replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or
equal to the full size of the dataset is required.
For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will
initially select a random element from only the first 1000 elements in the buffer. Once an element is
selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element,
maintaining the 1000 element buffer.
If the dataset is made of several shards, it also does shuffle the order of the shards.
However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`]
then the order of the shards is kept unchanged.
Args:
seed (`int`, *optional*, defaults to `None`):
Random seed that will be used to shuffle the dataset.
It is used to sample from the shuffle buffer and also to shuffle the data shards.
generator (`numpy.random.Generator`, *optional*):
Numpy random Generator to use to compute the permutation of the dataset rows.
If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
buffer_size (`int`, defaults to `1000`):
Size of the buffer.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> list(ds.take(3))
[{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
{'label': 1, 'text': 'effective but too-tepid biopic'}]
>>> shuffled_ds = ds.shuffle(seed=42)
>>> list(shuffled_ds.take(3))
[{'label': 1,
'text': "a sports movie with action that's exciting on the field and a story you care about off it ."},
{'label': 1,
'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'},
{'label': 1,
'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}]
```
"""
if generator is None:
generator = np.random.default_rng(seed)
else:
generator = deepcopy(generator)
shuffling = ShufflingConfig(generator=generator, _original_seed=seed)
return IterableDataset(
ex_iterable=BufferShuffledExamplesIterable(
self._ex_iterable, buffer_size=buffer_size, generator=generator
),
info=self._info.copy(),
split=self._split,
formatting=self._formatting,
shuffling=shuffling,
distributed=copy.deepcopy(self._distributed),
token_per_repo_id=self._token_per_repo_id,
)
def set_epoch(self, epoch: int):
self._epoch += epoch - self._epoch # update torch value in shared memory in-place
def skip(self, n: int) -> "IterableDataset":
"""
Create a new [`IterableDataset`] that skips the first `n` elements.
Args:
n (`int`):
Number of elements to skip.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> list(ds.take(3))
[{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
{'label': 1, 'text': 'effective but too-tepid biopic'}]
>>> ds = ds.skip(1)
>>> list(ds.take(3))
[{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
{'label': 1, 'text': 'effective but too-tepid biopic'},
{'label': 1,
'text': 'if you sometimes like to go to the movies to have fun , wasabi is a good place to start .'}]
```
"""
ex_iterable = SkipExamplesIterable(
self._ex_iterable,
n,
block_sources_order_when_shuffling=self._shuffling is None,
split_when_sharding=self._distributed is None,
)
return IterableDataset(
ex_iterable=ex_iterable,
info=self._info.copy(),
split=self._split,
formatting=self._formatting,
shuffling=copy.deepcopy(self._shuffling),
distributed=copy.deepcopy(self._distributed),
token_per_repo_id=self._token_per_repo_id,
)
def take(self, n: int) -> "IterableDataset":
"""
Create a new [`IterableDataset`] with only the first `n` elements.
Args:
n (`int`):
Number of elements to take.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> small_ds = ds.take(2)
>>> list(small_ds)
[{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}]
```
"""
ex_iterable = TakeExamplesIterable(
self._ex_iterable,
n,
block_sources_order_when_shuffling=self._shuffling is None,
split_when_sharding=self._distributed is None,
)
return IterableDataset(
ex_iterable=ex_iterable,
info=self._info.copy(),
split=self._split,
formatting=self._formatting,
shuffling=copy.deepcopy(self._shuffling),
distributed=copy.deepcopy(self._distributed),
token_per_repo_id=self._token_per_repo_id,
)
def shard(
self,
num_shards: int,
index: int,
contiguous: bool = True,
) -> "IterableDataset":
"""Return the `index`-nth shard from dataset split into `num_shards` pieces.
This shards deterministically. `dataset.shard(n, i)` splits the dataset into contiguous chunks,
so it can be easily concatenated back together after processing. If `dataset.num_shards % n == l`, then the
first `l` datasets each have `(dataset.num_shards // n) + 1` shards, and the remaining datasets have `(dataset.num_shards // n)` shards.
`datasets.concatenate_datasets([dset.shard(n, i) for i in range(n)])` returns a dataset with the same order as the original.
In particular, `dataset.shard(dataset.num_shards, i)` returns a dataset with 1 shard.
Note: n should be less or equal to the number of shards in the dataset `dataset.num_shards`.
On the other hand, `dataset.shard(n, i, contiguous=False)` contains all the shards of the dataset whose index mod `n = i`.
Be sure to shard before using any randomizing operator (such as `shuffle`).
It is best if the shard operator is used early in the dataset pipeline.
Args:
num_shards (`int`):
How many shards to split the dataset into.
index (`int`):
Which shard to select and return.
contiguous: (`bool`, defaults to `True`):
Whether to select contiguous blocks of indices for shards.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("amazon_polarity", split="train", streaming=True)
>>> ds
Dataset({
features: ['label', 'title', 'content'],
num_shards: 4
})
>>> ds.shard(num_shards=2, index=0)
Dataset({
features: ['label', 'title', 'content'],
num_shards: 2
})
```
"""
ex_iterable = self._ex_iterable.shard_data_sources(num_shards=num_shards, index=index, contiguous=contiguous)
return IterableDataset(
ex_iterable=ex_iterable,
info=self._info.copy(),
split=self._split,
formatting=self._formatting,
shuffling=copy.deepcopy(self._shuffling),
distributed=copy.deepcopy(self._distributed),
token_per_repo_id=self._token_per_repo_id,
)
@property
def column_names(self) -> Optional[List[str]]:
"""Names of the columns in the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation", streaming=True)
>>> ds.column_names
['text', 'label']
```
"""
return list(self._info.features.keys()) if self._info.features is not None else None
def add_column(self, name: str, column: Union[list, np.array]) -> "IterableDataset":
"""Add column to Dataset.
Args:
name (str): Column name.
column (list or np.array): Column data to be added.
Returns:
`IterableDataset`
"""
return self.map(partial(add_column_fn, name=name, column=column), with_indices=True)
def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDataset":
"""
Rename a column in the dataset, and move the features associated to the original column under the new column
name.
Args:
original_column_name (`str`):
Name of the column to rename.
new_column_name (`str`):
New name for the column.
Returns:
`IterableDataset`: A copy of the dataset with a renamed column.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> next(iter(ds))
{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
>>> ds = ds.rename_column("text", "movie_review")
>>> next(iter(ds))
{'label': 1,
'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return self.rename_columns({original_column_name: new_column_name})
def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDataset":
"""
Rename several columns in the dataset, and move the features associated to the original columns under
the new column names.
Args:
column_mapping (`Dict[str, str]`): A mapping of columns to rename to their new names
Returns:
`IterableDataset`: A copy of the dataset with renamed columns
"""
original_features = self._info.features.copy() if self._info.features else None
ds_iterable = self.map(
partial(_rename_columns_fn, column_mapping=column_mapping), remove_columns=list(column_mapping)
)
if original_features is not None:
ds_iterable._info.features = Features(
{
column_mapping[col] if col in column_mapping.keys() else col: feature
for col, feature in original_features.items()
}
)
return ds_iterable
def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset":
"""
Remove one or several column(s) in the dataset and the features associated to them.
The removal is done on-the-fly on the examples when iterating over the dataset.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to remove.
Returns:
`IterableDataset`: A copy of the dataset object without the columns to remove.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> next(iter(ds))
{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1}
>>> ds = ds.remove_columns("label")
>>> next(iter(ds))
{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
original_features = self._info.features.copy() if self._info.features else None
ds_iterable = self.map(remove_columns=column_names)
if original_features is not None:
ds_iterable._info.features = original_features.copy()
for col, _ in original_features.items():
if col in column_names:
del ds_iterable._info.features[col]
return ds_iterable
def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset":
"""Select one or several column(s) in the dataset and the features
associated to them. The selection is done on-the-fly on the examples
when iterating over the dataset.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to select.
Returns:
`IterableDataset`: A copy of the dataset object with selected columns.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> next(iter(ds))
{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1}
>>> ds = ds.select_columns("text")
>>> next(iter(ds))
{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
if isinstance(column_names, str):
column_names = [column_names]
if self._info:
info = copy.deepcopy(self._info)
if self._info.features is not None:
missing_columns = set(column_names) - set(self._info.features.keys())
if missing_columns:
raise ValueError(
f"Column name {list(missing_columns)} not in the "
"dataset. Columns in the dataset: "
f"{list(self._info.features.keys())}."
)
info.features = Features({c: info.features[c] for c in column_names})
ex_iterable = SelectColumnsIterable(self._ex_iterable, column_names)
return IterableDataset(
ex_iterable=ex_iterable,
info=info,
split=self._split,
formatting=self._formatting,
shuffling=self._shuffling,
distributed=self._distributed,
token_per_repo_id=self._token_per_repo_id,
)
def cast_column(self, column: str, feature: FeatureType) -> "IterableDataset":
"""Cast column to feature for decoding.
Args:
column (`str`):
Column name.
feature (`Feature`):
Target feature.
Returns:
`IterableDataset`
Example:
```py
>>> from datasets import load_dataset, Audio
>>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train", streaming=True)
>>> ds.features
{'audio': Audio(sampling_rate=8000, mono=True, decode=True, id=None),
'english_transcription': Value(dtype='string', id=None),
'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None),
'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None),
'path': Value(dtype='string', id=None),
'transcription': Value(dtype='string', id=None)}
>>> ds = ds.cast_column("audio", Audio(sampling_rate=16000))
>>> ds.features
{'audio': Audio(sampling_rate=16000, mono=True, decode=True, id=None),
'english_transcription': Value(dtype='string', id=None),
'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None),
'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None),
'path': Value(dtype='string', id=None),
'transcription': Value(dtype='string', id=None)}
```
"""
info = self._info.copy()
info.features[column] = feature
return IterableDataset(
ex_iterable=self._ex_iterable,
info=info,
split=self._split,
formatting=self._formatting,
shuffling=copy.deepcopy(self._shuffling),
distributed=copy.deepcopy(self._distributed),
token_per_repo_id=self._token_per_repo_id,
)
def cast(
self,
features: Features,
) -> "IterableDataset":
"""
Cast the dataset to a new set of features.
Args:
features ([`Features`]):
New features to cast the dataset to.
The name of the fields in the features must match the current column names.
The type of the data must also be convertible from one type to the other.
For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~Dataset.map`] to update the Dataset.
Returns:
`IterableDataset`: A copy of the dataset with casted features.
Example:
```py
>>> from datasets import load_dataset, ClassLabel, Value
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> ds.features
{'label': ClassLabel(names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> new_features = ds.features.copy()
>>> new_features["label"] = ClassLabel(names=["bad", "good"])
>>> new_features["text"] = Value("large_string")
>>> ds = ds.cast(new_features)
>>> ds.features
{'label': ClassLabel(names=['bad', 'good'], id=None),
'text': Value(dtype='large_string', id=None)}
```
"""
info = self._info.copy()
info.features = features
return IterableDataset(
ex_iterable=self._ex_iterable,
info=info,
split=self._split,
formatting=self._formatting,
shuffling=copy.deepcopy(self._shuffling),
distributed=copy.deepcopy(self._distributed),
token_per_repo_id=self._token_per_repo_id,
)
def _step(self, step: int, offset: int) -> "IterableDataset":
ex_iterable = StepExamplesIterable(self._ex_iterable, step=step, offset=offset)
return IterableDataset(
ex_iterable=ex_iterable,
info=self._info.copy(),
split=self._split,
formatting=self._formatting,
shuffling=copy.deepcopy(self._shuffling),
distributed=copy.deepcopy(self._distributed),
token_per_repo_id=self._token_per_repo_id,
)
def _resolve_features(self):
if self.features is not None:
return self
elif self._ex_iterable.is_typed:
features = self._ex_iterable.features
else:
features = _infer_features_from_batch(self.with_format(None)._head())
info = self.info.copy()
info.features = features
return IterableDataset(
ex_iterable=self._ex_iterable,
info=info,
split=self._split,
formatting=self._formatting,
shuffling=copy.deepcopy(self._shuffling),
distributed=copy.deepcopy(self._distributed),
token_per_repo_id=self._token_per_repo_id,
)
def batch(self, batch_size: int, drop_last_batch: bool = False) -> "IterableDataset":
"""
Group samples from the dataset into batches.
Args:
batch_size (`int`): The number of samples in each batch.
drop_last_batch (`bool`, defaults to `False`): Whether to drop the last incomplete batch.
Example:
```py
>>> ds = load_dataset("some_dataset", streaming=True)
>>> batched_ds = ds.batch(batch_size=32)
```
"""
def batch_fn(unbatched):
return {k: [v] for k, v in unbatched.items()}
return self.map(batch_fn, batched=True, batch_size=batch_size, drop_last_batch=drop_last_batch) | class_definition | 83,166 | 142,854 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py | null | 33 |
class DatasetDict(dict):
"""A dictionary (dict of str: datasets.Dataset) with dataset transforms methods (map, filter, etc.)"""
def _check_values_type(self):
for dataset in self.values():
if not isinstance(dataset, Dataset):
raise TypeError(f"Values in `DatasetDict` should be of type `Dataset` but got type '{type(dataset)}'")
def _check_values_features(self):
items = list(self.items())
for item_a, item_b in zip(items[:-1], items[1:]):
if item_a[1].features != item_b[1].features:
raise ValueError(
f"All datasets in `DatasetDict` should have the same features but features for '{item_a[0]}' and '{item_b[0]}' don't match: {item_a[1].features} != {item_b[1].features}"
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables
for dataset in self.values():
if hasattr(dataset, "_data"):
del dataset._data
if hasattr(dataset, "_indices"):
del dataset._indices
def __getitem__(self, k) -> Dataset:
if isinstance(k, (str, NamedSplit)) or len(self) == 0:
return super().__getitem__(k)
else:
available_suggested_splits = [
split for split in (Split.TRAIN, Split.TEST, Split.VALIDATION) if split in self
]
suggested_split = available_suggested_splits[0] if available_suggested_splits else list(self)[0]
raise KeyError(
f"Invalid key: {k}. Please first select a split. For example: "
f"`my_dataset_dictionary['{suggested_split}'][{k}]`. "
f"Available splits: {sorted(self)}"
)
@property
def data(self) -> Dict[str, Table]:
"""The Apache Arrow tables backing each split.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.data
```
"""
self._check_values_type()
return {k: dataset.data for k, dataset in self.items()}
@property
def cache_files(self) -> Dict[str, Dict]:
"""The cache files containing the Apache Arrow table backing each split.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.cache_files
{'test': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-test.arrow'}],
'train': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-train.arrow'}],
'validation': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}]}
```
"""
self._check_values_type()
return {k: dataset.cache_files for k, dataset in self.items()}
@property
def num_columns(self) -> Dict[str, int]:
"""Number of columns in each split of the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.num_columns
{'test': 2, 'train': 2, 'validation': 2}
```
"""
self._check_values_type()
return {k: dataset.num_columns for k, dataset in self.items()}
@property
def num_rows(self) -> Dict[str, int]:
"""Number of rows in each split of the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.num_rows
{'test': 1066, 'train': 8530, 'validation': 1066}
```
"""
self._check_values_type()
return {k: dataset.num_rows for k, dataset in self.items()}
@property
def column_names(self) -> Dict[str, List[str]]:
"""Names of the columns in each split of the dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.column_names
{'test': ['text', 'label'],
'train': ['text', 'label'],
'validation': ['text', 'label']}
```
"""
self._check_values_type()
return {k: dataset.column_names for k, dataset in self.items()}
@property
def shape(self) -> Dict[str, Tuple[int]]:
"""Shape of each split of the dataset (number of rows, number of columns).
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.shape
{'test': (1066, 2), 'train': (8530, 2), 'validation': (1066, 2)}
```
"""
self._check_values_type()
return {k: dataset.shape for k, dataset in self.items()}
def flatten(self, max_depth=16) -> "DatasetDict":
"""Flatten the Apache Arrow Table of each split (nested features are flatten).
Each column with a struct type is flattened into one column per struct field.
Other columns are left unchanged.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("squad")
>>> ds["train"].features
{'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None),
'context': Value(dtype='string', id=None),
'id': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None),
'title': Value(dtype='string', id=None)}
>>> ds.flatten()
DatasetDict({
train: Dataset({
features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
num_rows: 87599
})
validation: Dataset({
features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
num_rows: 10570
})
})
```
"""
self._check_values_type()
return DatasetDict({k: dataset.flatten(max_depth=max_depth) for k, dataset in self.items()})
def unique(self, column: str) -> Dict[str, List]:
"""Return a list of the unique elements in a column for each split.
This is implemented in the low-level backend and as such, very fast.
Args:
column (`str`):
column name (list all the column names with [`~datasets.DatasetDict.column_names`])
Returns:
Dict[`str`, `list`]: Dictionary of unique elements in the given column.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.unique("label")
{'test': [1, 0], 'train': [1, 0], 'validation': [1, 0]}
```
"""
self._check_values_type()
return {k: dataset.unique(column) for k, dataset in self.items()}
def cleanup_cache_files(self) -> Dict[str, int]:
"""Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one.
Be careful when running this command that no other process is currently using other cache files.
Return:
`Dict` with the number of removed files for each split
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.cleanup_cache_files()
{'test': 0, 'train': 0, 'validation': 0}
```
"""
self._check_values_type()
return {k: dataset.cleanup_cache_files() for k, dataset in self.items()}
def __repr__(self):
repr = "\n".join([f"{k}: {v}" for k, v in self.items()])
repr = re.sub(r"^", " " * 4, repr, 0, re.M)
return f"DatasetDict({{\n{repr}\n}})"
def cast(self, features: Features) -> "DatasetDict":
"""
Cast the dataset to a new set of features.
The transformation is applied to all the datasets of the dataset dictionary.
Args:
features ([`Features`]):
New features to cast the dataset to.
The name and order of the fields in the features must match the current column names.
The type of the data must also be convertible from one type to the other.
For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~DatasetDict.map`] to update the dataset.
Example:
```py
>>> from datasets import load_dataset, ClassLabel, Value
>>> ds = load_dataset("rotten_tomatoes")
>>> ds["train"].features
{'label': ClassLabel(names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> new_features = ds["train"].features.copy()
>>> new_features['label'] = ClassLabel(names=['bad', 'good'])
>>> new_features['text'] = Value('large_string')
>>> ds = ds.cast(new_features)
>>> ds["train"].features
{'label': ClassLabel(names=['bad', 'good'], id=None),
'text': Value(dtype='large_string', id=None)}
```
"""
self._check_values_type()
return DatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()})
def cast_column(self, column: str, feature) -> "DatasetDict":
"""Cast column to feature for decoding.
Args:
column (`str`):
Column name.
feature ([`Feature`]):
Target feature.
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import load_dataset, ClassLabel
>>> ds = load_dataset("rotten_tomatoes")
>>> ds["train"].features
{'label': ClassLabel(names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
>>> ds["train"].features
{'label': ClassLabel(names=['bad', 'good'], id=None),
'text': Value(dtype='string', id=None)}
```
"""
self._check_values_type()
return DatasetDict({k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()})
def remove_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict":
"""
Remove one or several column(s) from each split in the dataset
and the features associated to the column(s).
The transformation is applied to all the splits of the dataset dictionary.
You can also remove a column using [`~DatasetDict.map`] with `remove_columns` but the present method
doesn't copy the data of the remaining columns and is thus faster.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to remove.
Returns:
[`DatasetDict`]: A copy of the dataset object without the columns to remove.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds = ds.remove_columns("label")
DatasetDict({
train: Dataset({
features: ['text'],
num_rows: 8530
})
validation: Dataset({
features: ['text'],
num_rows: 1066
})
test: Dataset({
features: ['text'],
num_rows: 1066
})
})
```
"""
self._check_values_type()
return DatasetDict({k: dataset.remove_columns(column_names=column_names) for k, dataset in self.items()})
def rename_column(self, original_column_name: str, new_column_name: str) -> "DatasetDict":
"""
Rename a column in the dataset and move the features associated to the original column under the new column name.
The transformation is applied to all the datasets of the dataset dictionary.
You can also rename a column using [`~DatasetDict.map`] with `remove_columns` but the present method:
- takes care of moving the original features under the new column name.
- doesn't copy the data to a new dataset and is thus much faster.
Args:
original_column_name (`str`):
Name of the column to rename.
new_column_name (`str`):
New name for the column.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds = ds.rename_column("label", "label_new")
DatasetDict({
train: Dataset({
features: ['text', 'label_new'],
num_rows: 8530
})
validation: Dataset({
features: ['text', 'label_new'],
num_rows: 1066
})
test: Dataset({
features: ['text', 'label_new'],
num_rows: 1066
})
})
```
"""
self._check_values_type()
return DatasetDict(
{
k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name)
for k, dataset in self.items()
}
)
def rename_columns(self, column_mapping: Dict[str, str]) -> "DatasetDict":
"""
Rename several columns in the dataset, and move the features associated to the original columns under
the new column names.
The transformation is applied to all the datasets of the dataset dictionary.
Args:
column_mapping (`Dict[str, str]`):
A mapping of columns to rename to their new names.
Returns:
[`DatasetDict`]: A copy of the dataset with renamed columns.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.rename_columns({'text': 'text_new', 'label': 'label_new'})
DatasetDict({
train: Dataset({
features: ['text_new', 'label_new'],
num_rows: 8530
})
validation: Dataset({
features: ['text_new', 'label_new'],
num_rows: 1066
})
test: Dataset({
features: ['text_new', 'label_new'],
num_rows: 1066
})
})
```
"""
self._check_values_type()
return DatasetDict({k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()})
def select_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict":
"""Select one or several column(s) from each split in the dataset and
the features associated to the column(s).
The transformation is applied to all the splits of the dataset
dictionary.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to keep.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.select_columns("text")
DatasetDict({
train: Dataset({
features: ['text'],
num_rows: 8530
})
validation: Dataset({
features: ['text'],
num_rows: 1066
})
test: Dataset({
features: ['text'],
num_rows: 1066
})
})
```
"""
self._check_values_type()
return DatasetDict({k: dataset.select_columns(column_names=column_names) for k, dataset in self.items()})
def class_encode_column(self, column: str, include_nulls: bool = False) -> "DatasetDict":
"""Casts the given column as [`~datasets.features.ClassLabel`] and updates the tables.
Args:
column (`str`):
The name of the column to cast.
include_nulls (`bool`, defaults to `False`):
Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label.
<Added version="1.14.2"/>
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("boolq")
>>> ds["train"].features
{'answer': Value(dtype='bool', id=None),
'passage': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None)}
>>> ds = ds.class_encode_column("answer")
>>> ds["train"].features
{'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None),
'passage': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None)}
```
"""
self._check_values_type()
return DatasetDict(
{k: dataset.class_encode_column(column=column, include_nulls=include_nulls) for k, dataset in self.items()}
)
@contextlib.contextmanager
def formatted_as(
self,
type: Optional[str] = None,
columns: Optional[List] = None,
output_all_columns: bool = False,
**format_kwargs,
):
"""To be used in a `with` statement. Set `__getitem__` return format (type and columns).
The transformation is applied to all the datasets of the dataset dictionary.
Args:
type (`str`, *optional*):
Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
`None` means `__getitem__` returns python objects (default).
columns (`List[str]`, *optional*):
Columns to format in the output.
`None` means `__getitem__` returns all columns (default).
output_all_columns (`bool`, defaults to False):
Keep un-formatted columns as well in the output (as python objects).
**format_kwargs (additional keyword arguments):
Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
"""
self._check_values_type()
old_format_type = {k: dataset._format_type for k, dataset in self.items()}
old_format_kwargs = {k: dataset._format_kwargs for k, dataset in self.items()}
old_format_columns = {k: dataset._format_columns for k, dataset in self.items()}
old_output_all_columns = {k: dataset._output_all_columns for k, dataset in self.items()}
try:
self.set_format(type, columns, output_all_columns, **format_kwargs)
yield
finally:
for k, dataset in self.items():
dataset.set_format(
old_format_type[k], old_format_columns[k], old_output_all_columns[k], **old_format_kwargs[k]
)
def set_format(
self,
type: Optional[str] = None,
columns: Optional[List] = None,
output_all_columns: bool = False,
**format_kwargs,
):
"""Set `__getitem__` return format (type and columns).
The format is set for every dataset in the dataset dictionary.
Args:
type (`str`, *optional*):
Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
`None` means `__getitem__` returns python objects (default).
columns (`List[str]`, *optional*):
Columns to format in the output.
`None` means `__getitem__` returns all columns (default).
output_all_columns (`bool`, defaults to False):
Keep un-formatted columns as well in the output (as python objects),
**format_kwargs (additional keyword arguments):
Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
It is possible to call `map` after calling `set_format`. Since `map` may add new columns, then the list of formatted columns
gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted:
`new formatted columns = (all columns - previously unformatted columns)`
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True)
>>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
>>> ds["train"].format
{'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
'format_kwargs': {},
'output_all_columns': False,
'type': 'numpy'}
```
"""
self._check_values_type()
for dataset in self.values():
dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)
def reset_format(self):
"""Reset `__getitem__` return format to python objects and all columns.
The transformation is applied to all the datasets of the dataset dictionary.
Same as `self.set_format()`
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True)
>>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
>>> ds["train"].format
{'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
'format_kwargs': {},
'output_all_columns': False,
'type': 'numpy'}
>>> ds.reset_format()
>>> ds["train"].format
{'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
'format_kwargs': {},
'output_all_columns': False,
'type': None}
```
"""
self._check_values_type()
for dataset in self.values():
dataset.set_format()
def set_transform(
self,
transform: Optional[Callable],
columns: Optional[List] = None,
output_all_columns: bool = False,
):
"""Set ``__getitem__`` return format using this transform. The transform is applied on-the-fly on batches when ``__getitem__`` is called.
The transform is set for every dataset in the dataset dictionary
As :func:`datasets.Dataset.set_format`, this can be reset using :func:`datasets.Dataset.reset_format`
Args:
transform (`Callable`, optional): user-defined formatting transform, replaces the format defined by :func:`datasets.Dataset.set_format`
A formatting function is a callable that takes a batch (as a dict) as input and returns a batch.
This function is applied right before returning the objects in ``__getitem__``.
columns (`List[str]`, optional): columns to format in the output
If specified, then the input batch of the transform only contains those columns.
output_all_columns (`bool`, default to False): keep un-formatted columns as well in the output (as python objects)
If set to True, then the other un-formatted columns are kept with the output of the transform.
"""
self._check_values_type()
for dataset in self.values():
dataset.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform)
def with_format(
self,
type: Optional[str] = None,
columns: Optional[List] = None,
output_all_columns: bool = False,
**format_kwargs,
) -> "DatasetDict":
"""Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly.
The format `type` (for example "numpy") is used to format batches when using `__getitem__`.
The format is set for every dataset in the dataset dictionary.
It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`].
Contrary to [`~datasets.DatasetDict.set_format`], `with_format` returns a new [`DatasetDict`] object with new [`Dataset`] objects.
Args:
type (`str`, *optional*):
Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
`None` means `__getitem__` returns python objects (default).
columns (`List[str]`, *optional*):
Columns to format in the output.
`None` means `__getitem__` returns all columns (default).
output_all_columns (`bool`, defaults to `False`):
Keep un-formatted columns as well in the output (as python objects).
**format_kwargs (additional keyword arguments):
Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
>>> ds["train"].format
{'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
'format_kwargs': {},
'output_all_columns': False,
'type': None}
>>> ds = ds.with_format("torch")
>>> ds["train"].format
{'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
'format_kwargs': {},
'output_all_columns': False,
'type': 'torch'}
>>> ds["train"][0]
{'text': 'compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .',
'label': tensor(1),
'input_ids': tensor([ 101, 18027, 16310, 16001, 1103, 9321, 178, 11604, 7235, 6617,
1742, 2165, 2820, 1206, 6588, 22572, 12937, 1811, 2153, 1105,
1147, 12890, 19587, 6463, 1105, 15026, 1482, 119, 102, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0]),
'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])}
```
"""
dataset = copy.deepcopy(self)
dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)
return dataset
def with_transform(
self,
transform: Optional[Callable],
columns: Optional[List] = None,
output_all_columns: bool = False,
) -> "DatasetDict":
"""Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called.
The transform is set for every dataset in the dataset dictionary
As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].
Contrary to [`~datasets.DatasetDict.set_transform`], `with_transform` returns a new [`DatasetDict`] object with new [`Dataset`] objects.
Args:
transform (`Callable`, *optional*):
User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`].
A formatting function is a callable that takes a batch (as a dict) as input and returns a batch.
This function is applied right before returning the objects in `__getitem__`.
columns (`List[str]`, *optional*):
Columns to format in the output.
If specified, then the input batch of the transform only contains those columns.
output_all_columns (`bool`, defaults to False):
Keep un-formatted columns as well in the output (as python objects).
If set to `True`, then the other un-formatted columns are kept with the output of the transform.
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> def encode(example):
... return tokenizer(example['text'], truncation=True, padding=True, return_tensors="pt")
>>> ds = ds.with_transform(encode)
>>> ds["train"][0]
{'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1]),
'input_ids': tensor([ 101, 1103, 2067, 1110, 17348, 1106, 1129, 1103, 6880, 1432,
112, 188, 1207, 107, 14255, 1389, 107, 1105, 1115, 1119,
112, 188, 1280, 1106, 1294, 170, 24194, 1256, 3407, 1190,
170, 11791, 5253, 188, 1732, 7200, 10947, 12606, 2895, 117,
179, 7766, 118, 172, 15554, 1181, 3498, 6961, 3263, 1137,
188, 1566, 7912, 14516, 6997, 119, 102]),
'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0])}
```
"""
dataset = copy.deepcopy(self)
dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns)
return dataset
def map(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
with_rank: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
drop_last_batch: bool = False,
remove_columns: Optional[Union[str, List[str]]] = None,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
cache_file_names: Optional[Dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
features: Optional[Features] = None,
disable_nullable: bool = False,
fn_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
desc: Optional[str] = None,
) -> "DatasetDict":
"""Apply a function to all the elements in the table (individually or in batches)
and update the table (if function does updated examples).
The transformation is applied to all the datasets of the dataset dictionary.
Args:
function (`callable`): with one of the following signature:
- `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
- `function(example: Dict[str, Any], indices: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
- `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
- `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
For advanced usage, the function can also return a `pyarrow.Table`.
Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
with_rank (`bool`, defaults to `False`):
Provide process rank to `function`. Note that in this case the
signature of `function` should be `def function(example[, idx], rank): ...`.
input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`,
`batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
drop_last_batch (`bool`, defaults to `False`):
Whether a last batch smaller than the batch_size should be
dropped instead of being processed by the function.
remove_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
Remove a selection of columns while doing the mapping.
Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
columns with names in `remove_columns`, these columns will be kept.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, default `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
features (`[datasets.Features]`, *optional*, defaults to `None`):
Use a specific [`Features`] to store the cache file
instead of the automatically generated one.
disable_nullable (`bool`, defaults to `False`):
Disallow null values in the table.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`
num_proc (`int`, *optional*, defaults to `None`):
Number of processes for multiprocessing. By default it doesn't
use multiprocessing.
desc (`str`, *optional*, defaults to `None`):
Meaningful description to be displayed alongside with the progress bar while mapping examples.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> def add_prefix(example):
... example["text"] = "Review: " + example["text"]
... return example
>>> ds = ds.map(add_prefix)
>>> ds["train"][0:3]["text"]
['Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .',
'Review: effective but too-tepid biopic']
# process a batch of examples
>>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True)
# set number of processors
>>> ds = ds.map(add_prefix, num_proc=4)
```
"""
self._check_values_type()
if cache_file_names is None:
cache_file_names = {k: None for k in self}
return DatasetDict(
{
k: dataset.map(
function=function,
with_indices=with_indices,
with_rank=with_rank,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
drop_last_batch=drop_last_batch,
remove_columns=remove_columns,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
cache_file_name=cache_file_names[k],
writer_batch_size=writer_batch_size,
features=features,
disable_nullable=disable_nullable,
fn_kwargs=fn_kwargs,
num_proc=num_proc,
desc=desc,
)
for k, dataset in self.items()
}
)
def filter(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
with_rank: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
cache_file_names: Optional[Dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
fn_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
desc: Optional[str] = None,
) -> "DatasetDict":
"""Apply a filter function to all the elements in the table in batches
and update the table so that the dataset only includes examples according to the filter function.
The transformation is applied to all the datasets of the dataset dictionary.
Args:
function (`Callable`): Callable with one of the following signatures:
- `function(example: Dict[str, Any]) -> bool` if `batched=False` and `with_indices=False` and `with_rank=False`
- `function(example: Dict[str, Any], *extra_args) -> bool` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
- `function(batch: Dict[str, List]) -> List[bool]` if `batched=True` and `with_indices=False` and `with_rank=False`
- `function(batch: Dict[str, List], *extra_args) -> List[bool]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
If no function is provided, defaults to an always `True` function: `lambda x: True`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the
signature of `function` should be `def function(example, idx[, rank]): ...`.
with_rank (`bool`, defaults to `False`):
Provide process rank to `function`. Note that in this case the
signature of `function` should be `def function(example[, idx], rank): ...`.
input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`
`batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`
num_proc (`int`, *optional*, defaults to `None`):
Number of processes for multiprocessing. By default it doesn't
use multiprocessing.
desc (`str`, *optional*, defaults to `None`):
Meaningful description to be displayed alongside with the progress bar while filtering examples.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds.filter(lambda x: x["label"] == 1)
DatasetDict({
train: Dataset({
features: ['text', 'label'],
num_rows: 4265
})
validation: Dataset({
features: ['text', 'label'],
num_rows: 533
})
test: Dataset({
features: ['text', 'label'],
num_rows: 533
})
})
```
"""
self._check_values_type()
if cache_file_names is None:
cache_file_names = {k: None for k in self}
return DatasetDict(
{
k: dataset.filter(
function=function,
with_indices=with_indices,
with_rank=with_rank,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
cache_file_name=cache_file_names[k],
writer_batch_size=writer_batch_size,
fn_kwargs=fn_kwargs,
num_proc=num_proc,
desc=desc,
)
for k, dataset in self.items()
}
)
def flatten_indices(
self,
keep_in_memory: bool = False,
cache_file_names: Optional[Dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
features: Optional[Features] = None,
disable_nullable: bool = False,
num_proc: Optional[int] = None,
new_fingerprint: Optional[str] = None,
) -> "DatasetDict":
"""Create and cache a new Dataset by flattening the indices mapping.
Args:
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
cache_file_names (`Dict[str, str]`, *optional*, default `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
features (`Optional[datasets.Features]`, defaults to `None`):
Use a specific [`Features`] to store the cache file
instead of the automatically generated one.
disable_nullable (`bool`, defaults to `False`):
Allow null values in the table.
num_proc (`int`, optional, default `None`):
Max number of processes when generating cache. Already cached shards are loaded sequentially
new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
"""
self._check_values_type()
if cache_file_names is None:
cache_file_names = {k: None for k in self}
return DatasetDict(
{
k: dataset.flatten_indices(
keep_in_memory=keep_in_memory,
cache_file_name=cache_file_names[k],
writer_batch_size=writer_batch_size,
features=features,
disable_nullable=disable_nullable,
num_proc=num_proc,
new_fingerprint=new_fingerprint,
)
for k, dataset in self.items()
}
)
def sort(
self,
column_names: Union[str, Sequence[str]],
reverse: Union[bool, Sequence[bool]] = False,
null_placement: str = "at_end",
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
indices_cache_file_names: Optional[Dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
) -> "DatasetDict":
"""Create a new dataset sorted according to a single or multiple columns.
Args:
column_names (`Union[str, Sequence[str]]`):
Column name(s) to sort by.
reverse (`Union[bool, Sequence[bool]]`, defaults to `False`):
If `True`, sort by descending order rather than ascending. If a single bool is provided,
the value is applied to the sorting of all column names. Otherwise a list of bools with the
same length and order as column_names must be provided.
null_placement (`str`, defaults to `at_end`):
Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last`
keep_in_memory (`bool`, defaults to `False`):
Keep the sorted indices in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the sorted indices
can be identified, use it instead of recomputing.
indices_cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
indices mapping instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
Higher value gives smaller cache files, lower value consume less temporary memory.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset('rotten_tomatoes')
>>> ds['train']['label'][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
>>> sorted_ds = ds.sort('label')
>>> sorted_ds['train']['label'][:10]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
>>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False])
>>> another_sorted_ds['train']['label'][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
```
"""
self._check_values_type()
if indices_cache_file_names is None:
indices_cache_file_names = {k: None for k in self}
return DatasetDict(
{
k: dataset.sort(
column_names=column_names,
reverse=reverse,
null_placement=null_placement,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
indices_cache_file_name=indices_cache_file_names[k],
writer_batch_size=writer_batch_size,
)
for k, dataset in self.items()
}
)
def shuffle(
self,
seeds: Optional[Union[int, Dict[str, Optional[int]]]] = None,
seed: Optional[int] = None,
generators: Optional[Dict[str, np.random.Generator]] = None,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
indices_cache_file_names: Optional[Dict[str, Optional[str]]] = None,
writer_batch_size: Optional[int] = 1000,
) -> "DatasetDict":
"""Create a new Dataset where the rows are shuffled.
The transformation is applied to all the datasets of the dataset dictionary.
Currently shuffling uses numpy random generators.
You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64).
Args:
seeds (`Dict[str, int]` or `int`, *optional*):
A seed to initialize the default BitGenerator if `generator=None`.
If `None`, then fresh, unpredictable entropy will be pulled from the OS.
If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
You can provide one `seed` per dataset in the dataset dictionary.
seed (`int`, *optional*):
A seed to initialize the default BitGenerator if `generator=None`. Alias for seeds (a `ValueError` is raised if both are provided).
generators (`Dict[str, *optional*, np.random.Generator]`):
Numpy random Generator to use to compute the permutation of the dataset rows.
If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
You have to provide one `generator` per dataset in the dataset dictionary.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
indices_cache_file_names (`Dict[str, str]`, *optional*):
Provide the name of a path for the cache file. It is used to store the
indices mappings instead of the automatically generated cache file name.
You have to provide one `cache_file_name` per dataset in the dataset dictionary.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes")
>>> ds["train"]["label"][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# set a seed
>>> shuffled_ds = ds.shuffle(seed=42)
>>> shuffled_ds["train"]["label"][:10]
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0]
```
"""
self._check_values_type()
if seed is not None and seeds is not None:
raise ValueError("Please specify seed or seeds, but not both")
seeds = seed if seed is not None else seeds
if seeds is None:
seeds = {k: None for k in self}
elif not isinstance(seeds, dict):
seeds = {k: seeds for k in self}
if generators is None:
generators = {k: None for k in self}
if indices_cache_file_names is None:
indices_cache_file_names = {k: None for k in self}
return DatasetDict(
{
k: dataset.shuffle(
seed=seeds[k],
generator=generators[k],
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
indices_cache_file_name=indices_cache_file_names[k],
writer_batch_size=writer_batch_size,
)
for k, dataset in self.items()
}
)
def save_to_disk(
self,
dataset_dict_path: PathLike,
max_shard_size: Optional[Union[str, int]] = None,
num_shards: Optional[Dict[str, int]] = None,
num_proc: Optional[int] = None,
storage_options: Optional[dict] = None,
):
"""
Saves a dataset dict to a filesystem using `fsspec.spec.AbstractFileSystem`.
For [`Image`], [`Audio`] and [`Video`] data:
All the Image(), Audio() and Video() data are stored in the arrow files.
If you want to store paths or urls, please use the Value("string") type.
Args:
dataset_dict_path (`path-like`):
Path (e.g. `dataset/train`) or remote URI (e.g. `s3://my-bucket/dataset/train`)
of the dataset dict directory where the dataset dict will be saved to.
max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
(like `"50MB"`).
num_shards (`Dict[str, int]`, *optional*):
Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`.
You need to provide the number of shards for each dataset in the dataset dictionary.
Use a dictionary to define a different num_shards for each split.
<Added version="2.8.0"/>
num_proc (`int`, *optional*, default `None`):
Number of processes when downloading and generating the dataset locally.
Multiprocessing is disabled by default.
<Added version="2.8.0"/>
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.8.0"/>
Example:
```python
>>> dataset_dict.save_to_disk("path/to/dataset/directory")
>>> dataset_dict.save_to_disk("path/to/dataset/directory", max_shard_size="1GB")
>>> dataset_dict.save_to_disk("path/to/dataset/directory", num_shards={"train": 1024, "test": 8})
```
"""
fs: fsspec.AbstractFileSystem
fs, _ = url_to_fs(dataset_dict_path, **(storage_options or {}))
if num_shards is None:
num_shards = {k: None for k in self}
elif not isinstance(num_shards, dict):
raise ValueError(
"Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}"
)
fs.makedirs(dataset_dict_path, exist_ok=True)
with fs.open(posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME), "w", encoding="utf-8") as f:
json.dump({"splits": list(self)}, f)
for k, dataset in self.items():
dataset.save_to_disk(
posixpath.join(dataset_dict_path, k),
num_shards=num_shards.get(k),
max_shard_size=max_shard_size,
num_proc=num_proc,
storage_options=storage_options,
)
@staticmethod
def load_from_disk(
dataset_dict_path: PathLike,
keep_in_memory: Optional[bool] = None,
storage_options: Optional[dict] = None,
) -> "DatasetDict":
"""
Load a dataset that was previously saved using [`save_to_disk`] from a filesystem using `fsspec.spec.AbstractFileSystem`.
Args:
dataset_dict_path (`path-like`):
Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`)
of the dataset dict directory where the dataset dict will be loaded from.
keep_in_memory (`bool`, defaults to `None`):
Whether to copy the dataset in-memory. If `None`, the
dataset will not be copied in-memory unless explicitly enabled by setting
`datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the
[improve performance](../cache#improve-performance) section.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.8.0"/>
Returns:
[`DatasetDict`]
Example:
```py
>>> ds = load_from_disk('path/to/dataset/directory')
```
"""
fs: fsspec.AbstractFileSystem
fs, dataset_dict_path = url_to_fs(dataset_dict_path, **(storage_options or {}))
dataset_dict_json_path = posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME)
dataset_state_json_path = posixpath.join(dataset_dict_path, config.DATASET_STATE_JSON_FILENAME)
dataset_info_path = posixpath.join(dataset_dict_path, config.DATASET_INFO_FILENAME)
if not fs.isfile(dataset_dict_json_path):
if fs.isfile(dataset_info_path) and fs.isfile(dataset_state_json_path):
raise FileNotFoundError(
f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but got a `Dataset`. Please use either `datasets.load_from_disk` or `Dataset.load_from_disk` instead."
)
raise FileNotFoundError(
f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but provided path is not a `DatasetDict`."
)
with fs.open(dataset_dict_json_path, "r", encoding="utf-8") as f:
splits = json.load(f)["splits"]
dataset_dict = DatasetDict()
for k in splits:
dataset_dict_split_path = posixpath.join(fs.unstrip_protocol(dataset_dict_path), k)
dataset_dict[k] = Dataset.load_from_disk(
dataset_dict_split_path, keep_in_memory=keep_in_memory, storage_options=storage_options
)
return dataset_dict
@staticmethod
def from_csv(
path_or_paths: Dict[str, PathLike],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
) -> "DatasetDict":
"""Create [`DatasetDict`] from CSV file(s).
Args:
path_or_paths (`dict` of path-like):
Path(s) of the CSV file(s).
features ([`Features`], *optional*):
Dataset features.
cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`pandas.read_csv`].
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import DatasetDict
>>> ds = DatasetDict.from_csv({'train': 'path/to/dataset.csv'})
```
"""
# Dynamic import to avoid circular dependency
from .io.csv import CsvDatasetReader
return CsvDatasetReader(
path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
).read()
@staticmethod
def from_json(
path_or_paths: Dict[str, PathLike],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
) -> "DatasetDict":
"""Create [`DatasetDict`] from JSON Lines file(s).
Args:
path_or_paths (`path-like` or list of `path-like`):
Path(s) of the JSON Lines file(s).
features ([`Features`], *optional*):
Dataset features.
cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`JsonConfig`].
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import DatasetDict
>>> ds = DatasetDict.from_json({'train': 'path/to/dataset.json'})
```
"""
# Dynamic import to avoid circular dependency
from .io.json import JsonDatasetReader
return JsonDatasetReader(
path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
).read()
@staticmethod
def from_parquet(
path_or_paths: Dict[str, PathLike],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
columns: Optional[List[str]] = None,
**kwargs,
) -> "DatasetDict":
"""Create [`DatasetDict`] from Parquet file(s).
Args:
path_or_paths (`dict` of path-like):
Path(s) of the CSV file(s).
features ([`Features`], *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
columns (`List[str]`, *optional*):
If not `None`, only these columns will be read from the file.
A column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`ParquetConfig`].
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import DatasetDict
>>> ds = DatasetDict.from_parquet({'train': 'path/to/dataset/parquet'})
```
"""
# Dynamic import to avoid circular dependency
from .io.parquet import ParquetDatasetReader
return ParquetDatasetReader(
path_or_paths,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
columns=columns,
**kwargs,
).read()
@staticmethod
def from_text(
path_or_paths: Dict[str, PathLike],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
) -> "DatasetDict":
"""Create [`DatasetDict`] from text file(s).
Args:
path_or_paths (`dict` of path-like):
Path(s) of the text file(s).
features ([`Features`], *optional*):
Dataset features.
cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
Directory to cache data.
keep_in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
**kwargs (additional keyword arguments):
Keyword arguments to be passed to [`TextConfig`].
Returns:
[`DatasetDict`]
Example:
```py
>>> from datasets import DatasetDict
>>> ds = DatasetDict.from_text({'train': 'path/to/dataset.txt'})
```
"""
# Dynamic import to avoid circular dependency
from .io.text import TextDatasetReader
return TextDatasetReader(
path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
).read()
@is_documented_by(Dataset.align_labels_with_mapping)
def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "DatasetDict":
self._check_values_type()
return DatasetDict(
{
k: dataset.align_labels_with_mapping(label2id=label2id, label_column=label_column)
for k, dataset in self.items()
}
)
def push_to_hub(
self,
repo_id,
config_name: str = "default",
set_default: Optional[bool] = None,
data_dir: Optional[str] = None,
commit_message: Optional[str] = None,
commit_description: Optional[str] = None,
private: Optional[bool] = None,
token: Optional[str] = None,
revision: Optional[str] = None,
create_pr: Optional[bool] = False,
max_shard_size: Optional[Union[int, str]] = None,
num_shards: Optional[Dict[str, int]] = None,
embed_external_files: bool = True,
) -> CommitInfo:
"""Pushes the [`DatasetDict`] to the hub as a Parquet dataset.
The [`DatasetDict`] is pushed using HTTP requests and does not need to have neither git or git-lfs installed.
Each dataset split will be pushed independently. The pushed dataset will keep the original split names.
The resulting Parquet files are self-contained by default: if your dataset contains [`Image`] or [`Audio`]
data, the Parquet files will store the bytes of your images or audio files.
You can disable this by setting `embed_external_files` to False.
Args:
repo_id (`str`):
The ID of the repository to push to in the following format: `<user>/<dataset_name>` or
`<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace
of the logged-in user.
config_name (`str`):
Configuration name of a dataset. Defaults to "default".
set_default (`bool`, *optional*):
Whether to set this configuration as the default one. Otherwise, the default configuration is the one
named "default".
data_dir (`str`, *optional*):
Directory name that will contain the uploaded data files. Defaults to the `config_name` if different
from "default", else "data".
<Added version="2.17.0"/>
commit_message (`str`, *optional*):
Message to commit while pushing. Will default to `"Upload dataset"`.
commit_description (`str`, *optional*):
Description of the commit that will be created.
Additionally, description of the PR if a PR is created (`create_pr` is True).
<Added version="2.16.0"/>
private (`bool`, *optional*):
Whether to make the repo private. If `None` (default), the repo will be public unless the
organization's default is private. This value is ignored if the repo already exists.
token (`str`, *optional*):
An optional authentication token for the Hugging Face Hub. If no token is passed, will default
to the token saved locally when logging in with `huggingface-cli login`. Will raise an error
if no token is passed and the user is not logged-in.
revision (`str`, *optional*):
Branch to push the uploaded files to. Defaults to the `"main"` branch.
<Added version="2.15.0"/>
create_pr (`bool`, *optional*, defaults to `False`):
Whether to create a PR with the uploaded files or directly commit.
<Added version="2.15.0"/>
max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
(like `"500MB"` or `"1GB"`).
num_shards (`Dict[str, int]`, *optional*):
Number of shards to write. By default, the number of shards depends on `max_shard_size`.
Use a dictionary to define a different num_shards for each split.
<Added version="2.8.0"/>
embed_external_files (`bool`, defaults to `True`):
Whether to embed file bytes in the shards.
In particular, this will do the following before the push for the fields of type:
- [`Audio`] and [`Image`] removes local path information and embed file content in the Parquet files.
Return:
huggingface_hub.CommitInfo
Example:
```python
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>")
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>", private=True)
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>", max_shard_size="1GB")
>>> dataset_dict.push_to_hub("<organization>/<dataset_id>", num_shards={"train": 1024, "test": 8})
```
If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages):
```python
>>> english_dataset.push_to_hub("<organization>/<dataset_id>", "en")
>>> french_dataset.push_to_hub("<organization>/<dataset_id>", "fr")
>>> # later
>>> english_dataset = load_dataset("<organization>/<dataset_id>", "en")
>>> french_dataset = load_dataset("<organization>/<dataset_id>", "fr")
```
"""
if num_shards is None:
num_shards = {k: None for k in self}
elif not isinstance(num_shards, dict):
raise ValueError(
"Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}"
)
self._check_values_type()
self._check_values_features()
total_uploaded_size = 0
total_dataset_nbytes = 0
info_to_dump: DatasetInfo = next(iter(self.values())).info.copy()
info_to_dump.config_name = config_name
info_to_dump.splits = SplitDict()
for split in self.keys():
if not re.match(_split_re, split):
raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.")
api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
repo_url = api.create_repo(
repo_id,
token=token,
repo_type="dataset",
private=private,
exist_ok=True,
)
repo_id = repo_url.repo_id
if revision is not None and not revision.startswith("refs/pr/"):
# We do not call create_branch for a PR reference: 400 Bad Request
api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True)
if not data_dir:
data_dir = config_name if config_name != "default" else "data" # for backward compatibility
additions = []
for split in self.keys():
logger.info(f"Pushing split {split} to the Hub.")
# The split=key needs to be removed before merging
split_additions, uploaded_size, dataset_nbytes = self[split]._push_parquet_shards_to_hub(
repo_id,
data_dir=data_dir,
split=split,
token=token,
revision=revision,
create_pr=create_pr,
max_shard_size=max_shard_size,
num_shards=num_shards.get(split),
embed_external_files=embed_external_files,
)
additions += split_additions
total_uploaded_size += uploaded_size
total_dataset_nbytes += dataset_nbytes
info_to_dump.splits[split] = SplitInfo(str(split), num_bytes=dataset_nbytes, num_examples=len(self[split]))
info_to_dump.download_checksums = None
info_to_dump.download_size = total_uploaded_size
info_to_dump.dataset_size = total_dataset_nbytes
info_to_dump.size_in_bytes = total_uploaded_size + total_dataset_nbytes
# Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern)
# and delete old split shards (if they exist)
repo_with_dataset_card, repo_with_dataset_infos = False, False
repo_splits = [] # use a list to keep the order of the splits
deletions = []
repo_files_to_add = [addition.path_in_repo for addition in additions]
for repo_file in api.list_repo_tree(
repo_id=repo_id, revision=revision, repo_type="dataset", token=token, recursive=True
):
if not isinstance(repo_file, RepoFile):
continue
if repo_file.rfilename == config.REPOCARD_FILENAME:
repo_with_dataset_card = True
elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME:
repo_with_dataset_infos = True
elif (
repo_file.rfilename.startswith(tuple(f"{data_dir}/{split}-" for split in self.keys()))
and repo_file.rfilename not in repo_files_to_add
):
deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename))
elif fnmatch.fnmatch(
repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*")
):
repo_split = string_to_dict(
repo_file.rfilename,
glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED),
)["split"]
if repo_split not in repo_splits:
repo_splits.append(split)
# get the info from the README to update them
if repo_with_dataset_card:
dataset_card_path = api.hf_hub_download(
repo_id, config.REPOCARD_FILENAME, repo_type="dataset", revision=revision
)
dataset_card = DatasetCard.load(Path(dataset_card_path))
dataset_card_data = dataset_card.data
metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
# get the deprecated dataset_infos.json to update them
elif repo_with_dataset_infos:
dataset_card = None
dataset_card_data = DatasetCardData()
metadata_configs = MetadataConfigs()
else:
dataset_card = None
dataset_card_data = DatasetCardData()
metadata_configs = MetadataConfigs()
# create the metadata configs if it was uploaded with push_to_hub before metadata configs existed
if not metadata_configs and repo_splits:
default_metadata_configs_to_dump = {
"data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits]
}
MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data)
metadata_config_to_dump = {
"data_files": [{"split": split, "path": f"{data_dir}/{split}-*"} for split in self.keys()],
}
if set_default and config_name != "default":
if metadata_configs:
default_config_name = metadata_configs.get_default_config_name()
if default_config_name == "default":
raise ValueError(
"There exists a configuration named 'default'. To set a different configuration as default, "
"rename the 'default' one first."
)
else:
_ = metadata_configs[default_config_name].pop("default")
metadata_config_to_dump["default"] = True
# push to the deprecated dataset_infos.json
if repo_with_dataset_infos:
dataset_infos_path = api.hf_hub_download(
repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision
)
with open(dataset_infos_path, encoding="utf-8") as f:
dataset_infos: dict = json.load(f)
dataset_infos[config_name] = asdict(info_to_dump)
buffer = BytesIO()
buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8"))
additions.append(
CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer)
)
# push to README
DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data)
MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data)
dataset_card = DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card
additions.append(
CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode())
)
commit_message = commit_message if commit_message is not None else "Upload dataset"
if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT:
commit_info = api.create_commit(
repo_id,
operations=additions + deletions,
commit_message=commit_message,
commit_description=commit_description,
token=token,
repo_type="dataset",
revision=revision,
create_pr=create_pr,
)
else:
logger.info(
f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits."
)
num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT)
for i in range(0, num_commits):
operations = additions[
i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT
] + (deletions if i == 0 else [])
commit_info = api.create_commit(
repo_id,
operations=operations,
commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})",
commit_description=commit_description,
token=token,
repo_type="dataset",
revision=revision,
create_pr=create_pr,
)
logger.info(
f"Commit #{i + 1} completed"
+ (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "")
+ "."
)
return commit_info | class_definition | 1,100 | 83,252 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py | null | 34 |
class IterableDatasetDict(dict):
def __repr__(self):
repr = "\n".join([f"{k}: {v}" for k, v in self.items()])
repr = re.sub(r"^", " " * 4, repr, 0, re.M)
return f"IterableDatasetDict({{\n{repr}\n}})"
def with_format(
self,
type: Optional[str] = None,
) -> "IterableDatasetDict":
"""
Return a dataset with the specified format.
The 'pandas' format is currently not implemented.
Args:
type (`str`, *optional*):
Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'arrow', 'jax']`.
`None` means it returns python objects (default).
Example:
```py
>>> from datasets import load_dataset
>>> from transformers import AutoTokenizer
>>> ds = load_dataset("rotten_tomatoes", split="validation", streaming=True)
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
>>> ds = ds.with_format("torch")
>>> next(iter(ds))
{'text': 'compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .',
'label': tensor(1),
'input_ids': tensor([ 101, 18027, 16310, 16001, 1103, 9321, 178, 11604, 7235, 6617,
1742, 2165, 2820, 1206, 6588, 22572, 12937, 1811, 2153, 1105,
1147, 12890, 19587, 6463, 1105, 15026, 1482, 119, 102, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0]),
'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])}
```
"""
return IterableDatasetDict({k: dataset.with_format(type=type) for k, dataset in self.items()})
def map(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: int = 1000,
drop_last_batch: bool = False,
remove_columns: Optional[Union[str, List[str]]] = None,
fn_kwargs: Optional[dict] = None,
) -> "IterableDatasetDict":
"""
Apply a function to all the examples in the iterable dataset (individually or in batches) and update them.
If your function returns a column that already exists, then it overwrites it.
The function is applied on-the-fly on the examples when iterating over the dataset.
The transformation is applied to all the datasets of the dataset dictionary.
You can specify whether the function should be batched or not with the `batched` parameter:
- If batched is `False`, then the function takes 1 example in and should return 1 example.
An example is a dictionary, e.g. `{"text": "Hello there !"}`.
- If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`.
- If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
Note that the last batch may have less than `n` examples.
A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
Args:
function (`Callable`, *optional*, defaults to `None`):
Function applied on-the-fly on the examples when you iterate on the dataset.
It must have one of the following signatures:
- `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
- `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
- `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
- `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
For advanced usage, the function can also return a `pyarrow.Table`.
Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
If no function is provided, default to identity function: `lambda x: x`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
The columns to be passed into `function`
as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`.
drop_last_batch (`bool`, defaults to `False`):
Whether a last batch smaller than the `batch_size` should be
dropped instead of being processed by the function.
remove_columns (`[List[str]]`, *optional*, defaults to `None`):
Remove a selection of columns while doing the mapping.
Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
columns with names in `remove_columns`, these columns will be kept.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> def add_prefix(example):
... example["text"] = "Review: " + example["text"]
... return example
>>> ds = ds.map(add_prefix)
>>> next(iter(ds["train"]))
{'label': 1,
'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict(
{
k: dataset.map(
function=function,
with_indices=with_indices,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
drop_last_batch=drop_last_batch,
remove_columns=remove_columns,
fn_kwargs=fn_kwargs,
)
for k, dataset in self.items()
}
)
def filter(
self,
function: Optional[Callable] = None,
with_indices=False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
fn_kwargs: Optional[dict] = None,
) -> "IterableDatasetDict":
"""Apply a filter function to all the elements so that the dataset only includes examples according to the filter function.
The filtering is done on-the-fly when iterating over the dataset.
The filtering is applied to all the datasets of the dataset dictionary.
Args:
function (`Callable`):
Callable with one of the following signatures:
- `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False`
- `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False`
- `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True`
- `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True`
If no function is provided, defaults to an always True function: `lambda x: True`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
input_columns (`str` or `List[str]`, *optional*):
The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`
batch_size (`int`, *optional*, defaults to `1000`):
Number of examples per batch provided to `function` if `batched=True`.
fn_kwargs (`Dict`, *optional*, defaults to `None`):
Keyword arguments to be passed to `function`
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.filter(lambda x: x["label"] == 0)
>>> list(ds["train"].take(3))
[{'label': 0, 'text': 'Review: simplistic , silly and tedious .'},
{'label': 0,
'text': "Review: it's so laddish and juvenile , only teenage boys could possibly find it funny ."},
{'label': 0,
'text': 'Review: exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}]
```
"""
return IterableDatasetDict(
{
k: dataset.filter(
function=function,
with_indices=with_indices,
input_columns=input_columns,
batched=batched,
batch_size=batch_size,
fn_kwargs=fn_kwargs,
)
for k, dataset in self.items()
}
)
def shuffle(
self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000
) -> "IterableDatasetDict":
"""
Randomly shuffles the elements of this dataset.
The shuffling is applied to all the datasets of the dataset dictionary.
This dataset fills a buffer with buffer_size elements, then randomly samples elements from this buffer,
replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or
equal to the full size of the dataset is required.
For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will
initially select a random element from only the first 1000 elements in the buffer. Once an element is
selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element,
maintaining the 1000 element buffer.
If the dataset is made of several shards, it also does `shuffle` the order of the shards.
However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`]
then the order of the shards is kept unchanged.
Args:
seed (`int`, *optional*, defaults to `None`):
Random seed that will be used to shuffle the dataset.
It is used to sample from the shuffle buffer and also to shuffle the data shards.
generator (`numpy.random.Generator`, *optional*):
Numpy random Generator to use to compute the permutation of the dataset rows.
If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
buffer_size (`int`, defaults to `1000`):
Size of the buffer.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> list(ds["train"].take(3))
[{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
{'label': 1, 'text': 'effective but too-tepid biopic'}]
>>> ds = ds.shuffle(seed=42)
>>> list(ds["train"].take(3))
[{'label': 1,
'text': "a sports movie with action that's exciting on the field and a story you care about off it ."},
{'label': 1,
'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'},
{'label': 1,
'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}]
```
"""
return IterableDatasetDict(
{
k: dataset.shuffle(seed=seed, generator=generator, buffer_size=buffer_size)
for k, dataset in self.items()
}
)
def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDatasetDict":
"""
Rename a column in the dataset, and move the features associated to the original column under the new column
name.
The renaming is applied to all the datasets of the dataset dictionary.
Args:
original_column_name (`str`):
Name of the column to rename.
new_column_name (`str`):
New name for the column.
Returns:
[`IterableDatasetDict`]: A copy of the dataset with a renamed column.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.rename_column("text", "movie_review")
>>> next(iter(ds["train"]))
{'label': 1,
'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict(
{
k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name)
for k, dataset in self.items()
}
)
def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDatasetDict":
"""
Rename several columns in the dataset, and move the features associated to the original columns under
the new column names.
The renaming is applied to all the datasets of the dataset dictionary.
Args:
column_mapping (`Dict[str, str]`):
A mapping of columns to rename to their new names.
Returns:
[`IterableDatasetDict`]: A copy of the dataset with renamed columns
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.rename_columns({"text": "movie_review", "label": "rating"})
>>> next(iter(ds["train"]))
{'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
'rating': 1}
```
"""
return IterableDatasetDict(
{k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()}
)
def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict":
"""
Remove one or several column(s) in the dataset and the features associated to them.
The removal is done on-the-fly on the examples when iterating over the dataset.
The removal is applied to all the datasets of the dataset dictionary.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to remove.
Returns:
[`IterableDatasetDict`]: A copy of the dataset object without the columns to remove.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.remove_columns("label")
>>> next(iter(ds["train"]))
{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict({k: dataset.remove_columns(column_names) for k, dataset in self.items()})
def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict":
"""Select one or several column(s) in the dataset and the features
associated to them. The selection is done on-the-fly on the examples
when iterating over the dataset. The selection is applied to all the
datasets of the dataset dictionary.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to keep.
Returns:
[`IterableDatasetDict`]: A copy of the dataset object with only selected columns.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.select("text")
>>> next(iter(ds["train"]))
{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict({k: dataset.select_columns(column_names) for k, dataset in self.items()})
def cast_column(self, column: str, feature: FeatureType) -> "IterableDatasetDict":
"""Cast column to feature for decoding.
The type casting is applied to all the datasets of the dataset dictionary.
Args:
column (`str`):
Column name.
feature ([`Feature`]):
Target feature.
Returns:
[`IterableDatasetDict`]
Example:
```py
>>> from datasets import load_dataset, ClassLabel
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds["train"].features
{'label': ClassLabel(names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
>>> ds["train"].features
{'label': ClassLabel(names=['bad', 'good'], id=None),
'text': Value(dtype='string', id=None)}
```
"""
return IterableDatasetDict(
{k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()}
)
def cast(
self,
features: Features,
) -> "IterableDatasetDict":
"""
Cast the dataset to a new set of features.
The type casting is applied to all the datasets of the dataset dictionary.
Args:
features (`Features`):
New features to cast the dataset to.
The name of the fields in the features must match the current column names.
The type of the data must also be convertible from one type to the other.
For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`map`] to update the Dataset.
Returns:
[`IterableDatasetDict`]: A copy of the dataset with casted features.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds["train"].features
{'label': ClassLabel(names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> new_features = ds["train"].features.copy()
>>> new_features['label'] = ClassLabel(names=['bad', 'good'])
>>> new_features['text'] = Value('large_string')
>>> ds = ds.cast(new_features)
>>> ds["train"].features
{'label': ClassLabel(names=['bad', 'good'], id=None),
'text': Value(dtype='large_string', id=None)}
```
"""
return IterableDatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()}) | class_definition | 83,255 | 105,395 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py | null | 35 |
class _TempCacheDir:
"""
A temporary directory for storing cached Arrow files with a cleanup that frees references to the Arrow files
before deleting the directory itself to avoid permission errors on Windows.
"""
def __init__(self):
self.name = tempfile.mkdtemp(prefix=config.TEMP_CACHE_DIR_PREFIX)
self._finalizer = weakref.finalize(self, self._cleanup)
def _cleanup(self):
for dset in get_datasets_with_cache_file_in_temp_dir():
dset.__del__()
if os.path.exists(self.name):
try:
shutil.rmtree(self.name)
except Exception as e:
raise OSError(
f"An error occured while trying to delete temporary cache directory {self.name}. Please delete it manually."
) from e
def cleanup(self):
if self._finalizer.detach():
self._cleanup() | class_definition | 1,190 | 2,103 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/fingerprint.py | null | 36 |
class Hasher:
"""Hasher that accepts python objects as inputs."""
dispatch: Dict = {}
def __init__(self):
self.m = xxhash.xxh64()
@classmethod
def hash_bytes(cls, value: Union[bytes, List[bytes]]) -> str:
value = [value] if isinstance(value, bytes) else value
m = xxhash.xxh64()
for x in value:
m.update(x)
return m.hexdigest()
@classmethod
def hash(cls, value: Any) -> str:
return cls.hash_bytes(dumps(value))
def update(self, value: Any) -> None:
header_for_update = f"=={type(value)}=="
value_for_update = self.hash(value)
self.m.update(header_for_update.encode("utf8"))
self.m.update(value_for_update.encode("utf-8"))
def hexdigest(self) -> str:
return self.m.hexdigest() | class_definition | 6,698 | 7,515 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/fingerprint.py | null | 37 |
class SchemaInferenceError(ValueError):
pass | class_definition | 3,235 | 3,283 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py | null | 38 |
class TypedSequence:
"""
This data container generalizes the typing when instantiating pyarrow arrays, tables or batches.
More specifically it adds several features:
- Support extension types like ``datasets.features.Array2DExtensionType``:
By default pyarrow arrays don't return extension arrays. One has to call
``pa.ExtensionArray.from_storage(type, pa.array(data, type.storage_type))``
in order to get an extension array.
- Support for ``try_type`` parameter that can be used instead of ``type``:
When an array is transformed, we like to keep the same type as before if possible.
For example when calling :func:`datasets.Dataset.map`, we don't want to change the type
of each column by default.
- Better error message when a pyarrow array overflows.
Example::
from datasets.features import Array2D, Array2DExtensionType, Value
from datasets.arrow_writer import TypedSequence
import pyarrow as pa
arr = pa.array(TypedSequence([1, 2, 3], type=Value("int32")))
assert arr.type == pa.int32()
arr = pa.array(TypedSequence([1, 2, 3], try_type=Value("int32")))
assert arr.type == pa.int32()
arr = pa.array(TypedSequence(["foo", "bar"], try_type=Value("int32")))
assert arr.type == pa.string()
arr = pa.array(TypedSequence([[[1, 2, 3]]], type=Array2D((1, 3), "int64")))
assert arr.type == Array2DExtensionType((1, 3), "int64")
table = pa.Table.from_pydict({
"image": TypedSequence([[[1, 2, 3]]], type=Array2D((1, 3), "int64"))
})
assert table["image"].type == Array2DExtensionType((1, 3), "int64")
"""
def __init__(
self,
data: Iterable,
type: Optional[FeatureType] = None,
try_type: Optional[FeatureType] = None,
optimized_int_type: Optional[FeatureType] = None,
):
# assert type is None or try_type is None,
if type is not None and try_type is not None:
raise ValueError("You cannot specify both type and try_type")
# set attributes
self.data = data
self.type = type
self.try_type = try_type # is ignored if it doesn't match the data
self.optimized_int_type = optimized_int_type
# when trying a type (is ignored if data is not compatible)
self.trying_type = self.try_type is not None
self.trying_int_optimization = optimized_int_type is not None and type is None and try_type is None
# used to get back the inferred type after __arrow_array__() is called once
self._inferred_type = None
def get_inferred_type(self) -> FeatureType:
"""Return the inferred feature type.
This is done by converting the sequence to an Arrow array, and getting the corresponding
feature type.
Since building the Arrow array can be expensive, the value of the inferred type is cached
as soon as pa.array is called on the typed sequence.
Returns:
FeatureType: inferred feature type of the sequence.
"""
if self._inferred_type is None:
self._inferred_type = generate_from_arrow_type(pa.array(self).type)
return self._inferred_type
@staticmethod
def _infer_custom_type_and_encode(data: Iterable) -> Tuple[Iterable, Optional[FeatureType]]:
"""Implement type inference for custom objects like PIL.Image.Image -> Image type.
This function is only used for custom python objects that can't be direclty passed to build
an Arrow array. In such cases is infers the feature type to use, and it encodes the data so
that they can be passed to an Arrow array.
Args:
data (Iterable): array of data to infer the type, e.g. a list of PIL images.
Returns:
Tuple[Iterable, Optional[FeatureType]]: a tuple with:
- the (possibly encoded) array, if the inferred feature type requires encoding
- the inferred feature type if the array is made of supported custom objects like
PIL images, else None.
"""
if config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
non_null_idx, non_null_value = first_non_null_value(data)
if isinstance(non_null_value, PIL.Image.Image):
return [Image().encode_example(value) if value is not None else None for value in data], Image()
return data, None
def __arrow_array__(self, type: Optional[pa.DataType] = None):
"""This function is called when calling pa.array(typed_sequence)"""
if type is not None:
raise ValueError("TypedSequence is supposed to be used with pa.array(typed_sequence, type=None)")
del type # make sure we don't use it
data = self.data
# automatic type inference for custom objects
if self.type is None and self.try_type is None:
data, self._inferred_type = self._infer_custom_type_and_encode(data)
if self._inferred_type is None:
type = self.try_type if self.trying_type else self.type
else:
type = self._inferred_type
pa_type = get_nested_type(type) if type is not None else None
optimized_int_pa_type = (
get_nested_type(self.optimized_int_type) if self.optimized_int_type is not None else None
)
trying_cast_to_python_objects = False
try:
# custom pyarrow types
if isinstance(pa_type, _ArrayXDExtensionType):
storage = to_pyarrow_listarray(data, pa_type)
return pa.ExtensionArray.from_storage(pa_type, storage)
# efficient np array to pyarrow array
if isinstance(data, np.ndarray):
out = numpy_to_pyarrow_listarray(data)
elif isinstance(data, list) and data and isinstance(first_non_null_value(data)[1], np.ndarray):
out = list_of_np_array_to_pyarrow_listarray(data)
else:
trying_cast_to_python_objects = True
out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True))
# use smaller integer precisions if possible
if self.trying_int_optimization:
if pa.types.is_int64(out.type):
out = out.cast(optimized_int_pa_type)
elif pa.types.is_list(out.type):
if pa.types.is_int64(out.type.value_type):
out = array_cast(out, pa.list_(optimized_int_pa_type))
elif pa.types.is_list(out.type.value_type) and pa.types.is_int64(out.type.value_type.value_type):
out = array_cast(out, pa.list_(pa.list_(optimized_int_pa_type)))
# otherwise we can finally use the user's type
elif type is not None:
# We use cast_array_to_feature to support casting to custom types like Audio and Image
# Also, when trying type "string", we don't want to convert integers or floats to "string".
# We only do it if trying_type is False - since this is what the user asks for.
out = cast_array_to_feature(
out, type, allow_primitive_to_str=not self.trying_type, allow_decimal_to_str=not self.trying_type
)
return out
except (
TypeError,
pa.lib.ArrowInvalid,
pa.lib.ArrowNotImplementedError,
) as e: # handle type errors and overflows
# Ignore ArrowNotImplementedError caused by trying type, otherwise re-raise
if not self.trying_type and isinstance(e, pa.lib.ArrowNotImplementedError):
raise
if self.trying_type:
try: # second chance
if isinstance(data, np.ndarray):
return numpy_to_pyarrow_listarray(data)
elif isinstance(data, list) and data and any(isinstance(value, np.ndarray) for value in data):
return list_of_np_array_to_pyarrow_listarray(data)
else:
trying_cast_to_python_objects = True
return pa.array(cast_to_python_objects(data, only_1d_for_numpy=True))
except pa.lib.ArrowInvalid as e:
if "overflow" in str(e):
raise OverflowError(
f"There was an overflow with type {type_(data)}. Try to reduce writer_batch_size to have batches smaller than 2GB.\n({e})"
) from None
elif self.trying_int_optimization and "not in range" in str(e):
optimized_int_pa_type_str = np.dtype(optimized_int_pa_type.to_pandas_dtype()).name
logger.info(
f"Failed to cast a sequence to {optimized_int_pa_type_str}. Falling back to int64."
)
return out
elif trying_cast_to_python_objects and "Could not convert" in str(e):
out = pa.array(
cast_to_python_objects(data, only_1d_for_numpy=True, optimize_list_casting=False)
)
if type is not None:
out = cast_array_to_feature(
out, type, allow_primitive_to_str=True, allow_decimal_to_str=True
)
return out
else:
raise
elif "overflow" in str(e):
raise OverflowError(
f"There was an overflow with type {type_(data)}. Try to reduce writer_batch_size to have batches smaller than 2GB.\n({e})"
) from None
elif self.trying_int_optimization and "not in range" in str(e):
optimized_int_pa_type_str = np.dtype(optimized_int_pa_type.to_pandas_dtype()).name
logger.info(f"Failed to cast a sequence to {optimized_int_pa_type_str}. Falling back to int64.")
return out
elif trying_cast_to_python_objects and "Could not convert" in str(e):
out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True, optimize_list_casting=False))
if type is not None:
out = cast_array_to_feature(out, type, allow_primitive_to_str=True, allow_decimal_to_str=True)
return out
else:
raise | class_definition | 3,286 | 13,936 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py | null | 39 |
class OptimizedTypedSequence(TypedSequence):
def __init__(
self,
data,
type: Optional[FeatureType] = None,
try_type: Optional[FeatureType] = None,
col: Optional[str] = None,
optimized_int_type: Optional[FeatureType] = None,
):
optimized_int_type_by_col = {
"attention_mask": Value("int8"), # binary tensor
"special_tokens_mask": Value("int8"),
"input_ids": Value("int32"), # typical vocab size: 0-50k (max ~500k, never > 1M)
"token_type_ids": Value(
"int8"
), # binary mask; some (XLNetModel) use an additional token represented by a 2
}
if type is None and try_type is None:
optimized_int_type = optimized_int_type_by_col.get(col, None)
super().__init__(data, type=type, try_type=try_type, optimized_int_type=optimized_int_type) | class_definition | 13,939 | 14,847 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py | null | 40 |
class ArrowWriter:
"""Shuffles and writes Examples to Arrow files."""
_WRITER_CLASS = pa.RecordBatchStreamWriter
def __init__(
self,
schema: Optional[pa.Schema] = None,
features: Optional[Features] = None,
path: Optional[str] = None,
stream: Optional[pa.NativeFile] = None,
fingerprint: Optional[str] = None,
writer_batch_size: Optional[int] = None,
hash_salt: Optional[str] = None,
check_duplicates: Optional[bool] = False,
disable_nullable: bool = False,
update_features: bool = False,
with_metadata: bool = True,
unit: str = "examples",
embed_local_files: bool = False,
storage_options: Optional[dict] = None,
):
if path is None and stream is None:
raise ValueError("At least one of path and stream must be provided.")
if features is not None:
self._features = features
self._schema = None
elif schema is not None:
self._schema: pa.Schema = schema
self._features = Features.from_arrow_schema(self._schema)
else:
self._features = None
self._schema = None
if hash_salt is not None:
# Create KeyHasher instance using split name as hash salt
self._hasher = KeyHasher(hash_salt)
else:
self._hasher = KeyHasher("")
self._check_duplicates = check_duplicates
self._disable_nullable = disable_nullable
if stream is None:
fs, path = url_to_fs(path, **(storage_options or {}))
self._fs: fsspec.AbstractFileSystem = fs
self._path = path if not is_remote_filesystem(self._fs) else self._fs.unstrip_protocol(path)
self.stream = self._fs.open(path, "wb")
self._closable_stream = True
else:
self._fs = None
self._path = None
self.stream = stream
self._closable_stream = False
self.fingerprint = fingerprint
self.disable_nullable = disable_nullable
self.writer_batch_size = (
writer_batch_size or get_writer_batch_size(self._features) or config.DEFAULT_MAX_BATCH_SIZE
)
self.update_features = update_features
self.with_metadata = with_metadata
self.unit = unit
self.embed_local_files = embed_local_files
self._num_examples = 0
self._num_bytes = 0
self.current_examples: List[Tuple[Dict[str, Any], str]] = []
self.current_rows: List[pa.Table] = []
self.pa_writer: Optional[pa.RecordBatchStreamWriter] = None
self.hkey_record = []
def __len__(self):
"""Return the number of writed and staged examples"""
return self._num_examples + len(self.current_examples) + len(self.current_rows)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
# Try closing if opened; if closed: pyarrow.lib.ArrowInvalid: Invalid operation on closed file
if self.pa_writer: # it might be None
try:
self.pa_writer.close()
except Exception: # pyarrow.lib.ArrowInvalid, OSError
pass
if self._closable_stream and not self.stream.closed:
self.stream.close() # This also closes self.pa_writer if it is opened
def _build_writer(self, inferred_schema: pa.Schema):
schema = self.schema
inferred_features = Features.from_arrow_schema(inferred_schema)
if self._features is not None:
if self.update_features: # keep original features it they match, or update them
fields = {field.name: field for field in self._features.type}
for inferred_field in inferred_features.type:
name = inferred_field.name
if name in fields:
if inferred_field == fields[name]:
inferred_features[name] = self._features[name]
self._features = inferred_features
schema: pa.Schema = inferred_schema
else:
self._features = inferred_features
schema: pa.Schema = inferred_features.arrow_schema
if self.disable_nullable:
schema = pa.schema(pa.field(field.name, field.type, nullable=False) for field in schema)
if self.with_metadata:
schema = schema.with_metadata(self._build_metadata(DatasetInfo(features=self._features), self.fingerprint))
else:
schema = schema.with_metadata({})
self._schema = schema
self.pa_writer = self._WRITER_CLASS(self.stream, schema)
@property
def schema(self):
_schema = (
self._schema
if self._schema is not None
else (pa.schema(self._features.type) if self._features is not None else None)
)
if self._disable_nullable and _schema is not None:
_schema = pa.schema(pa.field(field.name, field.type, nullable=False) for field in _schema)
return _schema if _schema is not None else []
@staticmethod
def _build_metadata(info: DatasetInfo, fingerprint: Optional[str] = None) -> Dict[str, str]:
info_keys = ["features"] # we can add support for more DatasetInfo keys in the future
info_as_dict = asdict(info)
metadata = {}
metadata["info"] = {key: info_as_dict[key] for key in info_keys}
if fingerprint is not None:
metadata["fingerprint"] = fingerprint
return {"huggingface": json.dumps(metadata)}
def write_examples_on_file(self):
"""Write stored examples from the write-pool of examples. It makes a table out of the examples and write it."""
if not self.current_examples:
return
# preserve the order the columns
if self.schema:
schema_cols = set(self.schema.names)
examples_cols = self.current_examples[0][0].keys() # .keys() preserves the order (unlike set)
common_cols = [col for col in self.schema.names if col in examples_cols]
extra_cols = [col for col in examples_cols if col not in schema_cols]
cols = common_cols + extra_cols
else:
cols = list(self.current_examples[0][0])
batch_examples = {}
for col in cols:
# We use row[0][col] since current_examples contains (example, key) tuples.
# Morever, examples could be Arrow arrays of 1 element.
# This can happen in `.map()` when we want to re-write the same Arrow data
if all(isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) for row in self.current_examples):
arrays = [row[0][col] for row in self.current_examples]
arrays = [
chunk
for array in arrays
for chunk in (array.chunks if isinstance(array, pa.ChunkedArray) else [array])
]
batch_examples[col] = pa.concat_arrays(arrays)
else:
batch_examples[col] = [
row[0][col].to_pylist()[0] if isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) else row[0][col]
for row in self.current_examples
]
self.write_batch(batch_examples=batch_examples)
self.current_examples = []
def write_rows_on_file(self):
"""Write stored rows from the write-pool of rows. It concatenates the single-row tables and it writes the resulting table."""
if not self.current_rows:
return
table = pa.concat_tables(self.current_rows)
self.write_table(table)
self.current_rows = []
def write(
self,
example: Dict[str, Any],
key: Optional[Union[str, int, bytes]] = None,
writer_batch_size: Optional[int] = None,
):
"""Add a given (Example,Key) pair to the write-pool of examples which is written to file.
Args:
example: the Example to add.
key: Optional, a unique identifier(str, int or bytes) associated with each example
"""
# Utilize the keys and duplicate checking when `self._check_duplicates` is passed True
if self._check_duplicates:
# Create unique hash from key and store as (key, example) pairs
hash = self._hasher.hash(key)
self.current_examples.append((example, hash))
# Maintain record of keys and their respective hashes for checking duplicates
self.hkey_record.append((hash, key))
else:
# Store example as a tuple so as to keep the structure of `self.current_examples` uniform
self.current_examples.append((example, ""))
if writer_batch_size is None:
writer_batch_size = self.writer_batch_size
if writer_batch_size is not None and len(self.current_examples) >= writer_batch_size:
if self._check_duplicates:
self.check_duplicate_keys()
# Re-intializing to empty list for next batch
self.hkey_record = []
self.write_examples_on_file()
def check_duplicate_keys(self):
"""Raises error if duplicates found in a batch"""
tmp_record = set()
for hash, key in self.hkey_record:
if hash in tmp_record:
duplicate_key_indices = [
str(self._num_examples + index)
for index, (duplicate_hash, _) in enumerate(self.hkey_record)
if duplicate_hash == hash
]
raise DuplicatedKeysError(key, duplicate_key_indices)
else:
tmp_record.add(hash)
def write_row(self, row: pa.Table, writer_batch_size: Optional[int] = None):
"""Add a given single-row Table to the write-pool of rows which is written to file.
Args:
row: the row to add.
"""
if len(row) != 1:
raise ValueError(f"Only single-row pyarrow tables are allowed but got table with {len(row)} rows.")
self.current_rows.append(row)
if writer_batch_size is None:
writer_batch_size = self.writer_batch_size
if writer_batch_size is not None and len(self.current_rows) >= writer_batch_size:
self.write_rows_on_file()
def write_batch(
self,
batch_examples: Dict[str, List],
writer_batch_size: Optional[int] = None,
):
"""Write a batch of Example to file.
Ignores the batch if it appears to be empty,
preventing a potential schema update of unknown types.
Args:
batch_examples: the batch of examples to add.
"""
if batch_examples and len(next(iter(batch_examples.values()))) == 0:
return
features = None if self.pa_writer is None and self.update_features else self._features
try_features = self._features if self.pa_writer is None and self.update_features else None
arrays = []
inferred_features = Features()
# preserve the order the columns
if self.schema:
schema_cols = set(self.schema.names)
batch_cols = batch_examples.keys() # .keys() preserves the order (unlike set)
common_cols = [col for col in self.schema.names if col in batch_cols]
extra_cols = [col for col in batch_cols if col not in schema_cols]
cols = common_cols + extra_cols
else:
cols = list(batch_examples)
for col in cols:
col_values = batch_examples[col]
col_type = features[col] if features else None
if isinstance(col_values, (pa.Array, pa.ChunkedArray)):
array = cast_array_to_feature(col_values, col_type) if col_type is not None else col_values
arrays.append(array)
inferred_features[col] = generate_from_arrow_type(col_values.type)
else:
col_try_type = try_features[col] if try_features is not None and col in try_features else None
typed_sequence = OptimizedTypedSequence(col_values, type=col_type, try_type=col_try_type, col=col)
arrays.append(pa.array(typed_sequence))
inferred_features[col] = typed_sequence.get_inferred_type()
schema = inferred_features.arrow_schema if self.pa_writer is None else self.schema
pa_table = pa.Table.from_arrays(arrays, schema=schema)
self.write_table(pa_table, writer_batch_size)
def write_table(self, pa_table: pa.Table, writer_batch_size: Optional[int] = None):
"""Write a Table to file.
Args:
example: the Table to add.
"""
if writer_batch_size is None:
writer_batch_size = self.writer_batch_size
if self.pa_writer is None:
self._build_writer(inferred_schema=pa_table.schema)
pa_table = pa_table.combine_chunks()
pa_table = table_cast(pa_table, self._schema)
if self.embed_local_files:
pa_table = embed_table_storage(pa_table)
self._num_bytes += pa_table.nbytes
self._num_examples += pa_table.num_rows
self.pa_writer.write_table(pa_table, writer_batch_size)
def finalize(self, close_stream=True):
self.write_rows_on_file()
# In case current_examples < writer_batch_size, but user uses finalize()
if self._check_duplicates:
self.check_duplicate_keys()
# Re-intializing to empty list for next batch
self.hkey_record = []
self.write_examples_on_file()
# If schema is known, infer features even if no examples were written
if self.pa_writer is None and self.schema:
self._build_writer(self.schema)
if self.pa_writer is not None:
self.pa_writer.close()
self.pa_writer = None
if close_stream:
self.stream.close()
else:
if close_stream:
self.stream.close()
raise SchemaInferenceError("Please pass `features` or at least one example when writing data")
logger.debug(
f"Done writing {self._num_examples} {self.unit} in {self._num_bytes} bytes {self._path if self._path else ''}."
)
return self._num_examples, self._num_bytes | class_definition | 14,850 | 29,359 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py | null | 41 |
class ParquetWriter(ArrowWriter):
_WRITER_CLASS = pq.ParquetWriter | class_definition | 29,362 | 29,432 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py | null | 42 |
class DatasetNotOnHfGcsError(ConnectionError):
"""When you can't get the dataset from the Hf google cloud storage"""
pass | class_definition | 1,733 | 1,863 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py | null | 43 |
class MissingFilesOnHfGcsError(ConnectionError):
"""When some files are missing on the Hf oogle cloud storage"""
pass | class_definition | 1,866 | 1,992 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py | null | 44 |
class FileInstructions:
"""The file instructions associated with a split ReadInstruction.
Attributes:
num_examples: `int`, The total number of examples
file_instructions: List[dict(filename, skip, take)], the files information.
The filenames contains the relative path, not absolute.
skip/take indicates which example read in the file: `ds.slice(skip, take)`
"""
num_examples: int
file_instructions: List[dict] | class_definition | 2,019 | 2,491 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py | null | 45 |
class BaseReader:
"""
Build a Dataset object out of Instruction instance(s).
"""
def __init__(self, path: str, info: Optional["DatasetInfo"]):
"""Initializes ArrowReader.
Args:
path (str): path where tfrecords are stored.
info (DatasetInfo): info about the dataset.
"""
self._path: str = path
self._info: Optional["DatasetInfo"] = info
self._filetype_suffix: Optional[str] = None
def _get_table_from_filename(self, filename_skip_take, in_memory=False) -> Table:
"""Returns a Dataset instance from given (filename, skip, take)."""
raise NotImplementedError
def _read_files(self, files, in_memory=False) -> Table:
"""Returns Dataset for given file instructions.
Args:
files: List[dict(filename, skip, take)], the files information.
The filenames contain the absolute path, not relative.
skip/take indicates which example read in the file: `ds.slice(skip, take)`
in_memory (bool, default False): Whether to copy the data in-memory.
"""
if len(files) == 0 or not all(isinstance(f, dict) for f in files):
raise ValueError("please provide valid file informations")
files = copy.deepcopy(files)
for f in files:
f["filename"] = os.path.join(self._path, f["filename"])
pa_tables = thread_map(
partial(self._get_table_from_filename, in_memory=in_memory),
files,
tqdm_class=hf_tqdm,
desc="Loading dataset shards",
# set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached
disable=len(files) <= 16 or None,
)
pa_tables = [t for t in pa_tables if len(t) > 0]
if not pa_tables and (self._info is None or self._info.features is None):
raise ValueError(
"Tried to read an empty table. Please specify at least info.features to create an empty table with the right type."
)
pa_tables = pa_tables or [InMemoryTable.from_batches([], schema=pa.schema(self._info.features.type))]
pa_table = concat_tables(pa_tables) if len(pa_tables) != 1 else pa_tables[0]
return pa_table
def get_file_instructions(self, name, instruction, split_infos):
"""Return list of dict {'filename': str, 'skip': int, 'take': int}"""
file_instructions = make_file_instructions(
name, split_infos, instruction, filetype_suffix=self._filetype_suffix, prefix_path=self._path
)
files = file_instructions.file_instructions
return files
def read(
self,
name,
instructions,
split_infos,
in_memory=False,
):
"""Returns Dataset instance(s).
Args:
name (str): name of the dataset.
instructions (ReadInstruction): instructions to read.
Instruction can be string and will then be passed to the Instruction
constructor as it.
split_infos (list of SplitInfo proto): the available splits for dataset.
in_memory (bool, default False): Whether to copy the data in-memory.
Returns:
kwargs to build a single Dataset instance.
"""
files = self.get_file_instructions(name, instructions, split_infos)
if not files:
msg = f'Instruction "{instructions}" corresponds to no data!'
raise ValueError(msg)
return self.read_files(files=files, original_instructions=instructions, in_memory=in_memory)
def read_files(
self,
files: List[dict],
original_instructions: Union[None, "ReadInstruction", "Split"] = None,
in_memory=False,
):
"""Returns single Dataset instance for the set of file instructions.
Args:
files: List[dict(filename, skip, take)], the files information.
The filenames contains the relative path, not absolute.
skip/take indicates which example read in the file: `ds.skip().take()`
original_instructions: store the original instructions used to build the dataset split in the dataset.
in_memory (bool, default False): Whether to copy the data in-memory.
Returns:
kwargs to build a Dataset instance.
"""
# Prepend path to filename
pa_table = self._read_files(files, in_memory=in_memory)
# If original_instructions is not None, convert it to a human-readable NamedSplit
if original_instructions is not None:
from .splits import Split # noqa
split = Split(str(original_instructions))
else:
split = None
dataset_kwargs = {"arrow_table": pa_table, "info": self._info, "split": split}
return dataset_kwargs | class_definition | 5,799 | 10,729 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py | null | 46 |
class ArrowReader(BaseReader):
"""
Build a Dataset object out of Instruction instance(s).
This Reader uses either memory mapping or file descriptors (in-memory) on arrow files.
"""
def __init__(self, path: str, info: Optional["DatasetInfo"]):
"""Initializes ArrowReader.
Args:
path (str): path where Arrow files are stored.
info (DatasetInfo): info about the dataset.
"""
super().__init__(path, info)
self._filetype_suffix = "arrow"
def _get_table_from_filename(self, filename_skip_take, in_memory=False) -> Table:
"""Returns a Dataset instance from given (filename, skip, take)."""
filename, skip, take = (
filename_skip_take["filename"],
filename_skip_take["skip"] if "skip" in filename_skip_take else None,
filename_skip_take["take"] if "take" in filename_skip_take else None,
)
table = ArrowReader.read_table(filename, in_memory=in_memory)
if take == -1:
take = len(table) - skip
# here we don't want to slice an empty table, or it may segfault
if skip is not None and take is not None and not (skip == 0 and take == len(table)):
table = table.slice(skip, take)
return table
@staticmethod
def read_table(filename, in_memory=False) -> Table:
"""
Read table from file.
Args:
filename (str): File name of the table.
in_memory (bool, default=False): Whether to copy the data in-memory.
Returns:
pyarrow.Table
"""
table_cls = InMemoryTable if in_memory else MemoryMappedTable
return table_cls.from_file(filename) | class_definition | 10,732 | 12,461 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py | null | 47 |
class ParquetReader(BaseReader):
"""
Build a Dataset object out of Instruction instance(s).
This Reader uses memory mapping on parquet files.
"""
def __init__(self, path: str, info: Optional["DatasetInfo"]):
"""Initializes ParquetReader.
Args:
path (str): path where tfrecords are stored.
info (DatasetInfo): info about the dataset.
"""
super().__init__(path, info)
self._filetype_suffix = "parquet"
def _get_table_from_filename(self, filename_skip_take, **kwargs):
"""Returns a Dataset instance from given (filename, skip, take)."""
filename, skip, take = (
filename_skip_take["filename"],
filename_skip_take["skip"] if "skip" in filename_skip_take else None,
filename_skip_take["take"] if "take" in filename_skip_take else None,
)
# Parquet read_table always loads data in memory, independently of memory_map
pa_table = pq.read_table(filename, memory_map=True)
# here we don't want to slice an empty table, or it may segfault
if skip is not None and take is not None and not (skip == 0 and take == len(pa_table)):
pa_table = pa_table.slice(skip, take)
return pa_table | class_definition | 12,464 | 13,736 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py | null | 48 |
class _AbsoluteInstruction:
"""A machine friendly slice: defined absolute positive boundaries."""
splitname: str
from_: int # uint (starting index).
to: int # uint (ending index). | class_definition | 13,763 | 13,961 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py | null | 49 |
class _RelativeInstruction:
"""Represents a single parsed slicing instruction, can use % and negatives."""
splitname: str
from_: Optional[int] = None # int (starting index) or None if no lower boundary.
to: Optional[int] = None # int (ending index) or None if no upper boundary.
unit: Optional[str] = None
rounding: Optional[str] = None
def __post_init__(self):
if self.unit is not None and self.unit not in ["%", "abs"]:
raise ValueError("unit must be either % or abs")
if self.rounding is not None and self.rounding not in ["closest", "pct1_dropremainder"]:
raise ValueError("rounding must be either closest or pct1_dropremainder")
if self.unit != "%" and self.rounding is not None:
raise ValueError("It is forbidden to specify rounding if not using percent slicing.")
if self.unit == "%" and self.from_ is not None and abs(self.from_) > 100:
raise ValueError("Percent slice boundaries must be > -100 and < 100.")
if self.unit == "%" and self.to is not None and abs(self.to) > 100:
raise ValueError("Percent slice boundaries must be > -100 and < 100.")
# Update via __dict__ due to instance being "frozen"
self.__dict__["rounding"] = "closest" if self.rounding is None and self.unit == "%" else self.rounding | class_definition | 13,988 | 15,346 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py | null | 50 |
class ReadInstruction:
"""Reading instruction for a dataset.
Examples::
# The following lines are equivalent:
ds = datasets.load_dataset('mnist', split='test[:33%]')
ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec('test[:33%]'))
ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction('test', to=33, unit='%'))
ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction(
'test', from_=0, to=33, unit='%'))
# The following lines are equivalent:
ds = datasets.load_dataset('mnist', split='test[:33%]+train[1:-1]')
ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec(
'test[:33%]+train[1:-1]'))
ds = datasets.load_dataset('mnist', split=(
datasets.ReadInstruction('test', to=33, unit='%') +
datasets.ReadInstruction('train', from_=1, to=-1, unit='abs')))
# The following lines are equivalent:
ds = datasets.load_dataset('mnist', split='test[:33%](pct1_dropremainder)')
ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec(
'test[:33%](pct1_dropremainder)'))
ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction(
'test', from_=0, to=33, unit='%', rounding="pct1_dropremainder"))
# 10-fold validation:
tests = datasets.load_dataset(
'mnist',
[datasets.ReadInstruction('train', from_=k, to=k+10, unit='%')
for k in range(0, 100, 10)])
trains = datasets.load_dataset(
'mnist',
[datasets.ReadInstruction('train', to=k, unit='%') + datasets.ReadInstruction('train', from_=k+10, unit='%')
for k in range(0, 100, 10)])
"""
def _init(self, relative_instructions):
# Private initializer.
self._relative_instructions = relative_instructions
@classmethod
def _read_instruction_from_relative_instructions(cls, relative_instructions):
"""Returns ReadInstruction obj initialized with relative_instructions."""
# Use __new__ to bypass __init__ used by public API and not conveniant here.
result = cls.__new__(cls)
result._init(relative_instructions) # pylint: disable=protected-access
return result
def __init__(self, split_name, rounding=None, from_=None, to=None, unit=None):
"""Initialize ReadInstruction.
Args:
split_name (str): name of the split to read. Eg: 'train'.
rounding (str, optional): The rounding behaviour to use when percent slicing is
used. Ignored when slicing with absolute indices.
Possible values:
- 'closest' (default): The specified percentages are rounded to the
closest value. Use this if you want specified percents to be as
much exact as possible.
- 'pct1_dropremainder': the specified percentages are treated as
multiple of 1%. Use this option if you want consistency. Eg:
len(5%) == 5 * len(1%).
Using this option, one might not be able to use the full set of
examples, if the number of those is not a multiple of 100.
from_ (int):
to (int): alternative way of specifying slicing boundaries. If any of
{from_, to, unit} argument is used, slicing cannot be specified as
string.
unit (str): optional, one of:
'%': to set the slicing unit as percents of the split size.
'abs': to set the slicing unit as absolute numbers.
"""
# This constructor is not always called. See factory method
# `_read_instruction_from_relative_instructions`. Common init instructions
# MUST be placed in the _init method.
self._init([_RelativeInstruction(split_name, from_, to, unit, rounding)])
@classmethod
def from_spec(cls, spec):
"""Creates a `ReadInstruction` instance out of a string spec.
Args:
spec (`str`):
Split(s) + optional slice(s) to read + optional rounding
if percents are used as the slicing unit. A slice can be specified,
using absolute numbers (`int`) or percentages (`int`).
Examples:
```
test: test split.
test + validation: test split + validation split.
test[10:]: test split, minus its first 10 records.
test[:10%]: first 10% records of test split.
test[:20%](pct1_dropremainder): first 10% records, rounded with the pct1_dropremainder rounding.
test[:-5%]+train[40%:60%]: first 95% of test + middle 20% of train.
```
Returns:
ReadInstruction instance.
"""
spec = str(spec) # Need to convert to str in case of NamedSplit instance.
subs = _ADDITION_SEP_RE.split(spec)
if not subs:
raise ValueError(f"No instructions could be built out of {spec}")
instruction = _str_to_read_instruction(subs[0])
return sum((_str_to_read_instruction(sub) for sub in subs[1:]), instruction)
def to_spec(self):
rel_instr_specs = []
for rel_instr in self._relative_instructions:
rel_instr_spec = rel_instr.splitname
if rel_instr.from_ is not None or rel_instr.to is not None:
from_ = rel_instr.from_
to = rel_instr.to
unit = rel_instr.unit
rounding = rel_instr.rounding
unit = unit if unit == "%" else ""
from_ = str(from_) + unit if from_ is not None else ""
to = str(to) + unit if to is not None else ""
slice_str = f"[{from_}:{to}]"
rounding_str = (
f"({rounding})" if unit == "%" and rounding is not None and rounding != "closest" else ""
)
rel_instr_spec += slice_str + rounding_str
rel_instr_specs.append(rel_instr_spec)
return "+".join(rel_instr_specs)
def __add__(self, other):
"""Returns a new ReadInstruction obj, result of appending other to self."""
if not isinstance(other, ReadInstruction):
msg = "ReadInstruction can only be added to another ReadInstruction obj."
raise TypeError(msg)
self_ris = self._relative_instructions
other_ris = other._relative_instructions # pylint: disable=protected-access
if (
self_ris[0].unit != "abs"
and other_ris[0].unit != "abs"
and self._relative_instructions[0].rounding != other_ris[0].rounding
):
raise ValueError("It is forbidden to sum ReadInstruction instances with different rounding values.")
return self._read_instruction_from_relative_instructions(self_ris + other_ris)
def __str__(self):
return self.to_spec()
def __repr__(self):
return f"ReadInstruction({self._relative_instructions})"
def to_absolute(self, name2len):
"""Translate instruction into a list of absolute instructions.
Those absolute instructions are then to be added together.
Args:
name2len (`dict`):
Associating split names to number of examples.
Returns:
list of _AbsoluteInstruction instances (corresponds to the + in spec).
"""
return [_rel_to_abs_instr(rel_instr, name2len) for rel_instr in self._relative_instructions] | class_definition | 17,502 | 25,130 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py | null | 51 |
class InvalidConfigName(ValueError):
pass | class_definition | 3,042 | 3,087 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py | null | 52 |
class BuilderConfig:
"""Base class for `DatasetBuilder` data configuration.
`DatasetBuilder` subclasses with data configuration options should subclass
`BuilderConfig` and add their own properties.
Attributes:
name (`str`, defaults to `default`):
The name of the configuration.
version (`Version` or `str`, defaults to `0.0.0`):
The version of the configuration.
data_dir (`str`, *optional*):
Path to the directory containing the source data.
data_files (`str` or `Sequence` or `Mapping`, *optional*):
Path(s) to source data file(s).
description (`str`, *optional*):
A human description of the configuration.
"""
name: str = "default"
version: Optional[Union[utils.Version, str]] = utils.Version("0.0.0")
data_dir: Optional[str] = None
data_files: Optional[Union[DataFilesDict, DataFilesPatternsDict]] = None
description: Optional[str] = None
def __post_init__(self):
# The config name is used to name the cache directory.
for invalid_char in INVALID_WINDOWS_CHARACTERS_IN_PATH:
if invalid_char in self.name:
raise InvalidConfigName(
f"Bad characters from black list '{INVALID_WINDOWS_CHARACTERS_IN_PATH}' found in '{self.name}'. "
f"They could create issues when creating a directory for this config on Windows filesystem."
)
if self.data_files is not None and not isinstance(self.data_files, (DataFilesDict, DataFilesPatternsDict)):
raise ValueError(f"Expected a DataFilesDict in data_files but got {self.data_files}")
def __eq__(self, o):
# we need to override the default dataclass __eq__ since it doesn't check for
# other attributes that the ones of the signature.
if set(self.__dict__.keys()) != set(o.__dict__.keys()):
return False
return all((k, getattr(self, k)) == (k, getattr(o, k)) for k in self.__dict__.keys())
def create_config_id(
self,
config_kwargs: dict,
custom_features: Optional[Features] = None,
) -> str:
"""
The config id is used to build the cache directory.
By default it is equal to the config name.
However the name of a config is not sufficient to have a unique identifier for the dataset being generated
since it doesn't take into account:
- the config kwargs that can be used to overwrite attributes
- the custom features used to write the dataset
- the data_files for json/text/csv/pandas datasets
Therefore the config id is just the config name with an optional suffix based on these.
"""
# Possibly add a suffix to the name to handle custom features/data_files/config_kwargs
suffix: Optional[str] = None
config_kwargs_to_add_to_suffix = config_kwargs.copy()
# name and version are already used to build the cache directory
config_kwargs_to_add_to_suffix.pop("name", None)
config_kwargs_to_add_to_suffix.pop("version", None)
# data dir handling (when specified it points to the manually downloaded data):
# it was previously ignored before the introduction of config id because we didn't want
# to change the config name. Now it's fine to take it into account for the config id.
# config_kwargs_to_add_to_suffix.pop("data_dir", None)
if "data_dir" in config_kwargs_to_add_to_suffix:
if config_kwargs_to_add_to_suffix["data_dir"] is None:
config_kwargs_to_add_to_suffix.pop("data_dir", None)
else:
# canonicalize the data dir to avoid two paths to the same location having different
# hashes
data_dir = config_kwargs_to_add_to_suffix["data_dir"]
data_dir = os.path.normpath(data_dir)
config_kwargs_to_add_to_suffix["data_dir"] = data_dir
if config_kwargs_to_add_to_suffix:
# we don't care about the order of the kwargs
config_kwargs_to_add_to_suffix = {
k: config_kwargs_to_add_to_suffix[k] for k in sorted(config_kwargs_to_add_to_suffix)
}
if all(isinstance(v, (str, bool, int, float)) for v in config_kwargs_to_add_to_suffix.values()):
suffix = ",".join(
str(k) + "=" + urllib.parse.quote_plus(str(v)) for k, v in config_kwargs_to_add_to_suffix.items()
)
if len(suffix) > 32: # hash if too long
suffix = Hasher.hash(config_kwargs_to_add_to_suffix)
else:
suffix = Hasher.hash(config_kwargs_to_add_to_suffix)
if custom_features is not None:
m = Hasher()
if suffix:
m.update(suffix)
m.update(custom_features)
suffix = m.hexdigest()
if suffix:
config_id = self.name + "-" + suffix
if len(config_id) > config.MAX_DATASET_CONFIG_ID_READABLE_LENGTH:
config_id = self.name + "-" + Hasher.hash(suffix)
return config_id
else:
return self.name
def _resolve_data_files(self, base_path: str, download_config: DownloadConfig) -> None:
if isinstance(self.data_files, DataFilesPatternsDict):
base_path = xjoin(base_path, self.data_dir) if self.data_dir else base_path
self.data_files = self.data_files.resolve(base_path, download_config) | class_definition | 3,101 | 8,676 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py | null | 53 |
class DatasetBuilder:
"""Abstract base class for all datasets.
`DatasetBuilder` has 3 key methods:
- [`DatasetBuilder.info`]: Documents the dataset, including feature
names, types, shapes, version, splits, citation, etc.
- [`DatasetBuilder.download_and_prepare`]: Downloads the source data
and writes it to disk.
- [`DatasetBuilder.as_dataset`]: Generates a [`Dataset`].
Some `DatasetBuilder`s expose multiple variants of the
dataset by defining a [`BuilderConfig`] subclass and accepting a
config object (or name) on construction. Configurable datasets expose a
pre-defined set of configurations in [`DatasetBuilder.builder_configs`].
Args:
cache_dir (`str`, *optional*):
Directory to cache data. Defaults to `"~/.cache/huggingface/datasets"`.
dataset_name (`str`, *optional*):
Name of the dataset, if different from the builder name. Useful for packaged builders
like csv, imagefolder, audiofolder, etc. to reflect the difference between datasets
that use the same packaged builder.
config_name (`str`, *optional*):
Name of the dataset configuration.
It affects the data generated on disk. Different configurations will have their own subdirectories and
versions.
If not provided, the default configuration is used (if it exists).
<Added version="2.3.0">
Parameter `name` was renamed to `config_name`.
</Added>
hash (`str`, *optional*):
Hash specific to the dataset code. Used to update the caching directory when the
dataset loading script code is updated (to avoid reusing old data).
The typical caching directory (defined in `self._relative_data_dir`) is `name/version/hash/`.
base_path (`str`, *optional*):
Base path for relative paths that are used to download files.
This can be a remote URL.
features ([`Features`], *optional*):
Features types to use with this dataset.
It can be used to change the [`Features`] types of a dataset, for example.
token (`str` or `bool`, *optional*):
String or boolean to use as Bearer token for remote files on the
Datasets Hub. If `True`, will get token from `"~/.huggingface"`.
repo_id (`str`, *optional*):
ID of the dataset repository.
Used to distinguish builders with the same name but not coming from the same namespace, for example "squad"
and "lhoestq/squad" repo IDs. In the latter, the builder name would be "lhoestq___squad".
data_files (`str` or `Sequence` or `Mapping`, *optional*):
Path(s) to source data file(s).
For builders like "csv" or "json" that need the user to specify data files. They can be either
local or remote files. For convenience, you can use a `DataFilesDict`.
data_dir (`str`, *optional*):
Path to directory containing source data file(s).
Use only if `data_files` is not passed, in which case it is equivalent to passing
`os.path.join(data_dir, "**")` as `data_files`.
For builders that require manual download, it must be the path to the local directory containing the
manually downloaded data.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the dataset file-system backend, if any.
writer_batch_size (`int`, *optional*):
Batch size used by the ArrowWriter.
It defines the number of samples that are kept in memory before writing them
and also the length of the arrow chunks.
None means that the ArrowWriter will use its default value.
**config_kwargs (additional keyword arguments): Keyword arguments to be passed to the corresponding builder
configuration class, set on the class attribute [`DatasetBuilder.BUILDER_CONFIG_CLASS`]. The builder
configuration class is [`BuilderConfig`] or a subclass of it.
"""
# Default version
VERSION = None # Default version set in BuilderConfig
# Class for the builder config.
BUILDER_CONFIG_CLASS = BuilderConfig
# Named configurations that modify the data generated by download_and_prepare.
BUILDER_CONFIGS = []
# Optional default config name to be used when name is None
DEFAULT_CONFIG_NAME = None
# Default batch size used by the ArrowWriter
# It defines the number of samples that are kept in memory before writing them
# and also the length of the arrow chunks
# None means that the ArrowWriter will use its default value
DEFAULT_WRITER_BATCH_SIZE = None
def __init__(
self,
cache_dir: Optional[str] = None,
dataset_name: Optional[str] = None,
config_name: Optional[str] = None,
hash: Optional[str] = None,
base_path: Optional[str] = None,
info: Optional[DatasetInfo] = None,
features: Optional[Features] = None,
token: Optional[Union[bool, str]] = None,
repo_id: Optional[str] = None,
data_files: Optional[Union[str, list, dict, DataFilesDict]] = None,
data_dir: Optional[str] = None,
storage_options: Optional[dict] = None,
writer_batch_size: Optional[int] = None,
**config_kwargs,
):
# DatasetBuilder name
self.name: str = camelcase_to_snakecase(self.__module__.split(".")[-1])
self.hash: Optional[str] = hash
self.base_path = base_path
self.token = token
self.repo_id = repo_id
self.storage_options = storage_options or {}
self.dataset_name = camelcase_to_snakecase(dataset_name) if dataset_name else self.name
self._writer_batch_size = writer_batch_size or self.DEFAULT_WRITER_BATCH_SIZE
if data_files is not None and not isinstance(data_files, DataFilesDict):
data_files = DataFilesDict.from_patterns(
sanitize_patterns(data_files),
base_path=base_path,
download_config=DownloadConfig(token=token, storage_options=self.storage_options),
)
# Prepare config: DatasetConfig contains name, version and description but can be extended by each dataset
if "features" in inspect.signature(self.BUILDER_CONFIG_CLASS.__init__).parameters and features is not None:
config_kwargs["features"] = features
if data_files is not None:
config_kwargs["data_files"] = data_files
if data_dir is not None:
config_kwargs["data_dir"] = data_dir
self.config_kwargs = config_kwargs
self.config, self.config_id = self._create_builder_config(
config_name=config_name,
custom_features=features,
**config_kwargs,
)
# prepare info: DatasetInfo are a standardized dataclass across all datasets
# Prefill datasetinfo
if info is None:
# TODO FOR PACKAGED MODULES IT IMPORTS DATA FROM src/packaged_modules which doesn't make sense
info = self.get_exported_dataset_info()
info.update(self._info())
info.builder_name = self.name
info.dataset_name = self.dataset_name
info.config_name = self.config.name
info.version = self.config.version
self.info = info
# update info with user specified infos
if features is not None:
self.info.features = features
# Prepare data dirs:
# cache_dir can be a remote bucket on GCS or S3
self._cache_dir_root = str(cache_dir or config.HF_DATASETS_CACHE)
self._cache_dir_root = (
self._cache_dir_root if is_remote_url(self._cache_dir_root) else os.path.expanduser(self._cache_dir_root)
)
self._cache_downloaded_dir = (
posixpath.join(self._cache_dir_root, config.DOWNLOADED_DATASETS_DIR)
if cache_dir
else str(config.DOWNLOADED_DATASETS_PATH)
)
self._cache_downloaded_dir = (
self._cache_downloaded_dir
if is_remote_url(self._cache_downloaded_dir)
else os.path.expanduser(self._cache_downloaded_dir)
)
# In case there exists a legacy cache directory
self._legacy_relative_data_dir = None
self._cache_dir = self._build_cache_dir()
if not is_remote_url(self._cache_dir_root):
os.makedirs(self._cache_dir_root, exist_ok=True)
lock_path = os.path.join(
self._cache_dir_root, Path(self._cache_dir).as_posix().replace("/", "_") + ".lock"
)
with FileLock(lock_path):
if os.path.exists(self._cache_dir): # check if data exist
if len(os.listdir(self._cache_dir)) > 0:
if os.path.exists(os.path.join(self._cache_dir, config.DATASET_INFO_FILENAME)):
logger.info("Overwrite dataset info from restored data version if exists.")
self.info = DatasetInfo.from_directory(self._cache_dir)
else: # dir exists but no data, remove the empty dir as data aren't available anymore
logger.warning(
f"Old caching folder {self._cache_dir} for dataset {self.dataset_name} exists but no data were found. Removing it. "
)
os.rmdir(self._cache_dir)
# Store in the cache by default unless the user specifies a custom output_dir to download_and_prepare
self._output_dir = self._cache_dir
self._fs: fsspec.AbstractFileSystem = fsspec.filesystem("file")
# Set download manager
self.dl_manager = None
# Set to True by "datasets-cli test" to generate file checksums for (deprecated) dataset_infos.json independently of verification_mode value.
self._record_infos = False
# Set in `.download_and_prepare` once the format of the generated dataset is known
self._file_format = None
# Enable streaming (e.g. it patches "open" to work with remote files)
extend_dataset_builder_for_streaming(self)
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__ = d
# Re-enable streaming, since patched functions are not kept when pickling
extend_dataset_builder_for_streaming(self)
# Must be set for datasets that use 'data_dir' functionality - the ones
# that require users to do additional steps to download the data
# (this is usually due to some external regulations / rules).
# This field should contain a string with user instructions, including
# the list of files that should be present. It will be
# displayed in the dataset documentation.
@property
def manual_download_instructions(self) -> Optional[str]:
return None
def _check_legacy_cache(self) -> Optional[str]:
"""Check for the old cache directory template {cache_dir}/{namespace}___{builder_name} from 2.13"""
if (
self.__module__.startswith("datasets.")
and not is_remote_url(self._cache_dir_root)
and self.config.name == "default"
):
from .packaged_modules import _PACKAGED_DATASETS_MODULES
namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
config_name = self.repo_id.replace("/", "--") if self.repo_id is not None else self.dataset_name
config_id = config_name + self.config_id[len(self.config.name) :]
hash = _PACKAGED_DATASETS_MODULES.get(self.name, "missing")[1]
legacy_relative_data_dir = posixpath.join(
self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}",
config_id,
"0.0.0",
hash,
)
legacy_cache_dir = posixpath.join(self._cache_dir_root, legacy_relative_data_dir)
if os.path.isdir(legacy_cache_dir):
return legacy_relative_data_dir
def _check_legacy_cache2(self, dataset_module: "DatasetModule") -> Optional[str]:
"""Check for the old cache directory template {cache_dir}/{namespace}___{dataset_name}/{config_name}-xxx from 2.14 and 2.15"""
if (
self.__module__.startswith("datasets.")
and not is_remote_url(self._cache_dir_root)
and not (set(self.config_kwargs) - {"data_files", "data_dir"})
):
from .packaged_modules import _PACKAGED_DATASETS_MODULES_2_15_HASHES
from .utils._dill import Pickler
def update_hash_with_config_parameters(hash: str, config_parameters: dict) -> str:
"""
Used to update hash of packaged modules which is used for creating unique cache directories to reflect
different config parameters which are passed in metadata from readme.
"""
params_to_exclude = {"config_name", "version", "description"}
params_to_add_to_hash = {
param: value
for param, value in sorted(config_parameters.items())
if param not in params_to_exclude
}
m = Hasher()
m.update(hash)
m.update(params_to_add_to_hash)
return m.hexdigest()
namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
with patch.object(Pickler, "_legacy_no_dict_keys_sorting", True):
config_id = self.config.name + "-" + Hasher.hash({"data_files": self.config.data_files})
hash = _PACKAGED_DATASETS_MODULES_2_15_HASHES.get(self.name, "missing")
if (
dataset_module.builder_configs_parameters.metadata_configs
and self.config.name in dataset_module.builder_configs_parameters.metadata_configs
):
hash = update_hash_with_config_parameters(
hash, dataset_module.builder_configs_parameters.metadata_configs[self.config.name]
)
legacy_relative_data_dir = posixpath.join(
self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}",
config_id,
"0.0.0",
hash,
)
legacy_cache_dir = posixpath.join(self._cache_dir_root, legacy_relative_data_dir)
if os.path.isdir(legacy_cache_dir):
return legacy_relative_data_dir
@classmethod
def get_all_exported_dataset_infos(cls) -> DatasetInfosDict:
"""Empty dict if doesn't exist
Example:
```py
>>> from datasets import load_dataset_builder
>>> ds_builder = load_dataset_builder('vivos')
>>> ds_builder.get_all_exported_dataset_infos()
{'default': DatasetInfo(description='', citation='', homepage='', license='', features={'speaker_id': Value(dtype='string', id=None), 'path': Value(dtype='string', id=None), 'audio': Audio(sampling_rate=16000, mono=True, decode=True, id=None), 'sentence': Value(dtype='string', id=None)}, post_processed=None, supervised_keys=None, builder_name=None, dataset_name=None, config_name='default', version=None, splits={'train': SplitInfo(name='train', num_bytes=1722002133, num_examples=11660, shard_lengths=None, dataset_name=None), 'test': SplitInfo(name='test', num_bytes=86120227, num_examples=760, shard_lengths=None, dataset_name=None)}, download_checksums=None, download_size=1475540500, post_processing_size=None, dataset_size=1808122360, size_in_bytes=None)}
```
"""
return DatasetInfosDict.from_directory(cls.get_imported_module_dir())
def get_exported_dataset_info(self) -> DatasetInfo:
"""Empty `DatasetInfo` if doesn't exist
Example:
```py
>>> from datasets import load_dataset_builder
>>> ds_builder = load_dataset_builder('rotten_tomatoes')
>>> ds_builder.get_exported_dataset_info()
DatasetInfo(description='', citation='', homepage='', license='', features={'speaker_id': Value(dtype='string', id=None), 'path': Value(dtype='string', id=None), 'audio': Audio(sampling_rate=16000, mono=True, decode=True, id=None), 'sentence': Value(dtype='string', id=None)}, post_processed=None, supervised_keys=None, builder_name=None, dataset_name=None, config_name='default', version=None, splits={'train': SplitInfo(name='train', num_bytes=1722002133, num_examples=11660, shard_lengths=None, dataset_name=None), 'test': SplitInfo(name='test', num_bytes=86120227, num_examples=760, shard_lengths=None, dataset_name=None)}, download_checksums=None, download_size=1475540500, post_processing_size=None, dataset_size=1808122360, size_in_bytes=None)
```
"""
return self.get_all_exported_dataset_infos().get(self.config.name, DatasetInfo())
def _create_builder_config(
self, config_name=None, custom_features=None, **config_kwargs
) -> Tuple[BuilderConfig, str]:
"""Create and validate BuilderConfig object as well as a unique config id for this config.
Raises ValueError if there are multiple builder configs and config_name and DEFAULT_CONFIG_NAME are None.
config_kwargs override the defaults kwargs in config
"""
builder_config = None
# try default config
if config_name is None and self.BUILDER_CONFIGS:
if self.DEFAULT_CONFIG_NAME is not None:
builder_config = self.builder_configs.get(self.DEFAULT_CONFIG_NAME)
logger.info(f"No config specified, defaulting to: {self.dataset_name}/{builder_config.name}")
else:
if len(self.BUILDER_CONFIGS) > 1:
if not config_kwargs:
example_of_usage = (
f"load_dataset('{self.repo_id or self.dataset_name}', '{self.BUILDER_CONFIGS[0].name}')"
)
raise ValueError(
"Config name is missing."
f"\nPlease pick one among the available configs: {list(self.builder_configs.keys())}"
+ f"\nExample of usage:\n\t`{example_of_usage}`"
)
else:
builder_config = self.BUILDER_CONFIGS[0]
logger.info(
f"No config specified, defaulting to the single config: {self.dataset_name}/{builder_config.name}"
)
# try to get config by name
if isinstance(config_name, str):
builder_config = self.builder_configs.get(config_name)
if builder_config is None and self.BUILDER_CONFIGS:
raise ValueError(
f"BuilderConfig '{config_name}' not found. Available: {list(self.builder_configs.keys())}"
)
# if not using an existing config, then create a new config on the fly
if not builder_config:
if config_name is not None:
config_kwargs["name"] = config_name
elif self.DEFAULT_CONFIG_NAME and not config_kwargs:
# Use DEFAULT_CONFIG_NAME only if no config_kwargs are passed
config_kwargs["name"] = self.DEFAULT_CONFIG_NAME
if "version" not in config_kwargs and hasattr(self, "VERSION") and self.VERSION:
config_kwargs["version"] = self.VERSION
builder_config = self.BUILDER_CONFIG_CLASS(**config_kwargs)
# otherwise use the config_kwargs to overwrite the attributes
else:
builder_config = copy.deepcopy(builder_config) if config_kwargs else builder_config
for key, value in config_kwargs.items():
if value is not None:
if not hasattr(builder_config, key):
raise ValueError(f"BuilderConfig {builder_config} doesn't have a '{key}' key.")
setattr(builder_config, key, value)
if not builder_config.name:
raise ValueError(f"BuilderConfig must have a name, got {builder_config.name}")
# resolve data files if needed
builder_config._resolve_data_files(
base_path=self.base_path,
download_config=DownloadConfig(token=self.token, storage_options=self.storage_options),
)
# compute the config id that is going to be used for caching
config_id = builder_config.create_config_id(
config_kwargs,
custom_features=custom_features,
)
is_custom = (config_id not in self.builder_configs) and config_id != "default"
if is_custom:
logger.info(f"Using custom data configuration {config_id}")
else:
if (
builder_config.name in self.builder_configs
and builder_config != self.builder_configs[builder_config.name]
):
raise ValueError(
"Cannot name a custom BuilderConfig the same as an available "
f"BuilderConfig. Change the name. Available BuilderConfigs: {list(self.builder_configs.keys())}"
)
if not builder_config.version:
raise ValueError(f"BuilderConfig {builder_config.name} must have a version")
return builder_config, config_id
@classproperty
@classmethod
@memoize()
def builder_configs(cls) -> Dict[str, BuilderConfig]:
"""Dictionary of pre-defined configurations for this builder class."""
configs = {config.name: config for config in cls.BUILDER_CONFIGS}
if len(configs) != len(cls.BUILDER_CONFIGS):
names = [config.name for config in cls.BUILDER_CONFIGS]
raise ValueError(f"Names in BUILDER_CONFIGS must not be duplicated. Got {names}")
return configs
@property
def cache_dir(self):
return self._cache_dir
def _use_legacy_cache_dir_if_possible(self, dataset_module: "DatasetModule"):
# Check for the legacy cache directory template (datasets<3.0.0)
self._legacy_relative_data_dir = (
self._check_legacy_cache2(dataset_module) or self._check_legacy_cache() or None
)
self._cache_dir = self._build_cache_dir()
self._output_dir = self._cache_dir
def _relative_data_dir(self, with_version=True, with_hash=True) -> str:
"""Relative path of this dataset in cache_dir:
Will be:
self.dataset_name/self.config.version/self.hash/
or if a repo_id with a namespace has been specified:
self.namespace___self.dataset_name/self.config.version/self.hash/
If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
"""
if self._legacy_relative_data_dir is not None and with_version and with_hash:
return self._legacy_relative_data_dir
namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
builder_data_dir = self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}"
builder_data_dir = posixpath.join(builder_data_dir, self.config_id)
if with_version:
builder_data_dir = posixpath.join(builder_data_dir, str(self.config.version))
if with_hash and self.hash and isinstance(self.hash, str):
builder_data_dir = posixpath.join(builder_data_dir, self.hash)
return builder_data_dir
def _build_cache_dir(self):
"""Return the data directory for the current version."""
builder_data_dir = posixpath.join(self._cache_dir_root, self._relative_data_dir(with_version=False))
version_data_dir = posixpath.join(self._cache_dir_root, self._relative_data_dir(with_version=True))
def _other_versions_on_disk():
"""Returns previous versions on disk."""
if not os.path.exists(builder_data_dir):
return []
version_dirnames = []
for dir_name in os.listdir(builder_data_dir):
try:
version_dirnames.append((utils.Version(dir_name), dir_name))
except ValueError: # Invalid version (ex: incomplete data dir)
pass
version_dirnames.sort(reverse=True)
return version_dirnames
# Check and warn if other versions exist
if not is_remote_url(builder_data_dir):
version_dirs = _other_versions_on_disk()
if version_dirs:
other_version = version_dirs[0][0]
if other_version != self.config.version:
warn_msg = (
f"Found a different version {str(other_version)} of dataset {self.dataset_name} in "
f"cache_dir {self._cache_dir_root}. Using currently defined version "
f"{str(self.config.version)}."
)
logger.warning(warn_msg)
return version_data_dir
@abc.abstractmethod
def _info(self) -> DatasetInfo:
"""Construct the DatasetInfo object. See `DatasetInfo` for details.
Warning: This function is only called once and the result is cached for all
following .info() calls.
Returns:
info: (DatasetInfo) The dataset information
"""
raise NotImplementedError
@classmethod
def get_imported_module_dir(cls):
"""Return the path of the module of this class or subclass."""
return os.path.dirname(inspect.getfile(inspect.getmodule(cls)))
def _rename(self, src: str, dst: str):
rename(self._fs, src, dst)
def download_and_prepare(
self,
output_dir: Optional[str] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
verification_mode: Optional[Union[VerificationMode, str]] = None,
dl_manager: Optional[DownloadManager] = None,
base_path: Optional[str] = None,
file_format: str = "arrow",
max_shard_size: Optional[Union[int, str]] = None,
num_proc: Optional[int] = None,
storage_options: Optional[dict] = None,
**download_and_prepare_kwargs,
):
"""Downloads and prepares dataset for reading.
Args:
output_dir (`str`, *optional*):
Output directory for the dataset.
Default to this builder's `cache_dir`, which is inside `~/.cache/huggingface/datasets` by default.
<Added version="2.5.0"/>
download_config (`DownloadConfig`, *optional*):
Specific download configuration parameters.
download_mode ([`DownloadMode`] or `str`, *optional*):
Select the download/generate mode, default to `REUSE_DATASET_IF_EXISTS`.
verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...).
<Added version="2.9.1"/>
dl_manager (`DownloadManager`, *optional*):
Specific `DownloadManger` to use.
base_path (`str`, *optional*):
Base path for relative paths that are used to download files. This can be a remote url.
If not specified, the value of the `base_path` attribute (`self.base_path`) will be used instead.
file_format (`str`, *optional*):
Format of the data files in which the dataset will be written.
Supported formats: "arrow", "parquet". Default to "arrow" format.
If the format is "parquet", then image and audio data are embedded into the Parquet files instead of pointing to local files.
<Added version="2.5.0"/>
max_shard_size (`Union[str, int]`, *optional*):
Maximum number of bytes written per shard, default is "500MB".
The size is based on uncompressed data size, so in practice your shard files may be smaller than
`max_shard_size` thanks to Parquet compression for example.
<Added version="2.5.0"/>
num_proc (`int`, *optional*, defaults to `None`):
Number of processes when downloading and generating the dataset locally.
Multiprocessing is disabled by default.
<Added version="2.7.0"/>
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the caching file-system backend, if any.
<Added version="2.5.0"/>
**download_and_prepare_kwargs (additional keyword arguments): Keyword arguments.
Example:
Download and prepare the dataset as Arrow files that can be loaded as a Dataset using `builder.as_dataset()`:
```py
>>> from datasets import load_dataset_builder
>>> builder = load_dataset_builder("rotten_tomatoes")
>>> builder.download_and_prepare()
```
Download and prepare the dataset as sharded Parquet files locally:
```py
>>> from datasets import load_dataset_builder
>>> builder = load_dataset_builder("rotten_tomatoes")
>>> builder.download_and_prepare("./output_dir", file_format="parquet")
```
Download and prepare the dataset as sharded Parquet files in a cloud storage:
```py
>>> from datasets import load_dataset_builder
>>> storage_options = {"key": aws_access_key_id, "secret": aws_secret_access_key}
>>> builder = load_dataset_builder("rotten_tomatoes")
>>> builder.download_and_prepare("s3://my-bucket/my_rotten_tomatoes", storage_options=storage_options, file_format="parquet")
```
"""
output_dir = output_dir if output_dir is not None else self._cache_dir
# output_dir can be a remote bucket on GCS or S3
fs, output_dir = url_to_fs(output_dir, **(storage_options or {}))
self._fs = fs
self._output_dir = output_dir if not is_remote_filesystem(self._fs) else self._fs.unstrip_protocol(output_dir)
download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS)
base_path = base_path if base_path is not None else self.base_path
if file_format is not None and file_format not in ["arrow", "parquet"]:
raise ValueError(f"Unsupported file_format: {file_format}. Expected 'arrow' or 'parquet'")
self._file_format = file_format
if self._fs._strip_protocol(self._output_dir) == "":
# We don't support the root directory, because it has no dirname,
# and we need a dirname to use a <dirname>.incomplete directory
# when the dataset is being written
raise RuntimeError(
f"Unable to download and prepare the dataset at the root {self._output_dir}. "
f"Please specify a subdirectory, e.g. '{self._output_dir + self.dataset_name}'"
)
if dl_manager is None:
if download_config is None:
download_config = DownloadConfig(
cache_dir=self._cache_downloaded_dir,
force_download=download_mode == DownloadMode.FORCE_REDOWNLOAD,
force_extract=download_mode == DownloadMode.FORCE_REDOWNLOAD,
use_etag=False,
num_proc=num_proc,
token=self.token,
storage_options=self.storage_options,
) # We don't use etag for data files to speed up the process
dl_manager = DownloadManager(
dataset_name=self.dataset_name,
download_config=download_config,
data_dir=self.config.data_dir,
base_path=base_path,
record_checksums=(self._record_infos or verification_mode == VerificationMode.ALL_CHECKS),
)
is_local = not is_remote_filesystem(self._fs)
self.dl_manager = dl_manager
# Prevent parallel local disk operations
if is_local:
# Create parent directory of the output_dir to put the lock file in there
Path(self._output_dir).parent.mkdir(parents=True, exist_ok=True)
lock_path = self._output_dir + "_builder.lock"
# File locking only with local paths; no file locking on GCS or S3
with FileLock(lock_path) if is_local else contextlib.nullcontext():
# Check if the data already exists
data_exists = self._fs.exists(posixpath.join(self._output_dir, config.DATASET_INFO_FILENAME))
if data_exists and download_mode == DownloadMode.REUSE_DATASET_IF_EXISTS:
logger.info(f"Found cached dataset {self.dataset_name} ({self._output_dir})")
# We need to update the info in case some splits were added in the meantime
# for example when calling load_dataset from multiple workers.
self.info = self._load_info()
self.download_post_processing_resources(dl_manager)
return
logger.info(f"Generating dataset {self.dataset_name} ({self._output_dir})")
if is_local: # if cache dir is local, check for available space
if not has_sufficient_disk_space(
self.info.size_in_bytes or 0, directory=Path(self._output_dir).parent
):
raise OSError(
f"Not enough disk space. Needed: {size_str(self.info.size_in_bytes or 0)} (download: {size_str(self.info.download_size or 0)}, generated: {size_str(self.info.dataset_size or 0)}, post-processed: {size_str(self.info.post_processing_size or 0)})"
)
@contextlib.contextmanager
def incomplete_dir(dirname):
"""Create temporary dir for dirname and rename on exit."""
if not is_local:
self._fs.makedirs(dirname, exist_ok=True)
yield dirname
else:
tmp_dir = dirname + ".incomplete"
os.makedirs(tmp_dir, exist_ok=True)
try:
yield tmp_dir
if os.path.isdir(dirname):
shutil.rmtree(dirname)
# LocalFileSystem.mv does copy + rm, it is more efficient to simply rename a local directory
shutil.move(tmp_dir, dirname)
finally:
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
# Print is intentional: we want this to always go to stdout so user has
# information needed to cancel download/preparation if needed.
# This comes right before the progress bar.
if self.info.size_in_bytes:
logger.info(
f"Downloading and preparing dataset {self.dataset_name}/{self.config.name} "
f"(download: {size_str(self.info.download_size)}, generated: {size_str(self.info.dataset_size)}, "
f"post-processed: {size_str(self.info.post_processing_size)}, "
f"total: {size_str(self.info.size_in_bytes)}) to {self._output_dir}..."
)
else:
_dest = self._fs._strip_protocol(self._output_dir) if is_local else self._output_dir
logger.info(f"Downloading and preparing dataset {self.dataset_name}/{self.config.name} to {_dest}...")
self._check_manual_download(dl_manager)
# Create a tmp dir and rename to self._output_dir on successful exit.
with incomplete_dir(self._output_dir) as tmp_output_dir:
# Temporarily assign _output_dir to tmp_data_dir to avoid having to forward
# it to every sub function.
with temporary_assignment(self, "_output_dir", tmp_output_dir):
prepare_split_kwargs = {"file_format": file_format}
if max_shard_size is not None:
prepare_split_kwargs["max_shard_size"] = max_shard_size
if num_proc is not None:
prepare_split_kwargs["num_proc"] = num_proc
self._download_and_prepare(
dl_manager=dl_manager,
verification_mode=verification_mode,
**prepare_split_kwargs,
**download_and_prepare_kwargs,
)
# Sync info
self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())
self.info.download_checksums = dl_manager.get_recorded_sizes_checksums()
if self.info.download_size is not None:
self.info.size_in_bytes = self.info.dataset_size + self.info.download_size
# Save info
self._save_info()
# Download post processing resources
self.download_post_processing_resources(dl_manager)
logger.info(
f"Dataset {self.dataset_name} downloaded and prepared to {self._output_dir}. "
f"Subsequent calls will reuse this data."
)
def _check_manual_download(self, dl_manager):
if self.manual_download_instructions is not None and dl_manager.manual_dir is None:
raise ManualDownloadError(
textwrap.dedent(
f"""\
The dataset {self.dataset_name} with config {self.config.name} requires manual data.
Please follow the manual download instructions:
{self.manual_download_instructions}
Manual data can be loaded with:
datasets.load_dataset("{self.repo_id or self.dataset_name}", data_dir="<path/to/manual/data>")"""
)
)
def _download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs):
"""Downloads and prepares dataset for reading.
This is the internal implementation to overwrite called when user calls
`download_and_prepare`. It should download all required data and generate
the pre-processed datasets files.
Args:
dl_manager ([`DownloadManager`]):
`DownloadManager` used to download and cache data.
verification_mode ([`VerificationMode`]):
if `ALL_CHECKS`, perform all the verifications including checksums.
if `BASIC_CHECKS`, do not perform checksums, only perform split tests.
if `NO_CHECKS`, do not perform any verification.
prepare_split_kwargs: Additional options, such as `file_format`, `max_shard_size`
"""
# Generating data for all splits
split_dict = SplitDict(dataset_name=self.dataset_name)
split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs)
split_generators = self._split_generators(dl_manager, **split_generators_kwargs)
# Checksums verification
if verification_mode == VerificationMode.ALL_CHECKS and dl_manager.record_checksums:
verify_checksums(
self.info.download_checksums, dl_manager.get_recorded_sizes_checksums(), "dataset source files"
)
# Build splits
for split_generator in split_generators:
if str(split_generator.split_info.name).lower() == "all":
raise ValueError(
"`all` is a special split keyword corresponding to the "
"union of all splits, so cannot be used as key in "
"._split_generator()."
)
logger.info(f"Generating {split_generator.split_info.name} split")
split_dict.add(split_generator.split_info)
try:
# Prepare split will record examples associated to the split
self._prepare_split(split_generator, **prepare_split_kwargs)
except OSError as e:
raise OSError(
"Cannot find data file. "
+ (self.manual_download_instructions or "")
+ "\nOriginal error:\n"
+ str(e)
) from None
# If check_duplicates is set to True , then except DuplicatedKeysError
except DuplicatedKeysError as e:
raise DuplicatedKeysError(
e.key,
e.duplicate_key_indices,
fix_msg=f"To avoid duplicate keys, please fix the dataset script {self.name}.py",
) from None
dl_manager.manage_extracted_files()
if verification_mode == VerificationMode.BASIC_CHECKS or verification_mode == VerificationMode.ALL_CHECKS:
verify_splits(self.info.splits, split_dict)
# Update the info object with the splits.
self.info.splits = split_dict
self.info.download_size = dl_manager.downloaded_size
def download_post_processing_resources(self, dl_manager):
for split in self.info.splits or []:
for resource_name, resource_file_name in self._post_processing_resources(split).items():
if not not is_remote_filesystem(self._fs):
raise NotImplementedError(f"Post processing is not supported on filesystem {self._fs}")
if os.sep in resource_file_name:
raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
resource_path = os.path.join(self._output_dir, resource_file_name)
if not os.path.exists(resource_path):
downloaded_resource_path = self._download_post_processing_resources(
split, resource_name, dl_manager
)
if downloaded_resource_path:
logger.info(f"Downloaded post-processing resource {resource_name} as {resource_file_name}")
shutil.move(downloaded_resource_path, resource_path)
def _load_info(self) -> DatasetInfo:
return DatasetInfo.from_directory(self._output_dir, storage_options=self._fs.storage_options)
def _save_info(self):
file_lock = (
FileLock(self._output_dir + "_info.lock")
if not is_remote_filesystem(self._fs)
else contextlib.nullcontext()
)
with file_lock:
self.info.write_to_directory(self._output_dir, storage_options=self._fs.storage_options)
def _save_infos(self):
file_lock = (
FileLock(self._output_dir + "_infos.lock")
if not is_remote_filesystem(self._fs)
else contextlib.nullcontext()
)
with file_lock:
DatasetInfosDict(**{self.config.name: self.info}).write_to_directory(self.get_imported_module_dir())
def _make_split_generators_kwargs(self, prepare_split_kwargs):
"""Get kwargs for `self._split_generators()` from `prepare_split_kwargs`."""
del prepare_split_kwargs
return {}
def as_dataset(
self,
split: Optional[Split] = None,
run_post_process=True,
verification_mode: Optional[Union[VerificationMode, str]] = None,
in_memory=False,
) -> Union[Dataset, DatasetDict]:
"""Return a Dataset for the specified split.
Args:
split (`datasets.Split`):
Which subset of the data to return.
run_post_process (`bool`, defaults to `True`):
Whether to run post-processing dataset transforms and/or add
indexes.
verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
Verification mode determining the checks to run on the
downloaded/processed dataset information (checksums/size/splits/...).
<Added version="2.9.1"/>
in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
Returns:
datasets.Dataset
Example:
```py
>>> from datasets import load_dataset_builder
>>> builder = load_dataset_builder('rotten_tomatoes')
>>> builder.download_and_prepare()
>>> ds = builder.as_dataset(split='train')
>>> ds
Dataset({
features: ['text', 'label'],
num_rows: 8530
})
```
"""
if self._file_format is not None and self._file_format != "arrow":
raise FileFormatError('Loading a dataset not written in the "arrow" format is not supported.')
if is_remote_filesystem(self._fs):
raise NotImplementedError(f"Loading a dataset cached in a {type(self._fs).__name__} is not supported.")
if not os.path.exists(self._output_dir):
raise FileNotFoundError(
f"Dataset {self.dataset_name}: could not find data in {self._output_dir}. Please make sure to call "
"builder.download_and_prepare(), or use "
"datasets.load_dataset() before trying to access the Dataset object."
)
logger.debug(f"Constructing Dataset for split {split or ', '.join(self.info.splits)}, from {self._output_dir}")
# By default, return all splits
if split is None:
split = {s: s for s in self.info.splits}
verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS)
# Create a dataset for each of the given splits
datasets = map_nested(
partial(
self._build_single_dataset,
run_post_process=run_post_process,
verification_mode=verification_mode,
in_memory=in_memory,
),
split,
map_tuple=True,
disable_tqdm=True,
)
if isinstance(datasets, dict):
datasets = DatasetDict(datasets)
return datasets
def _build_single_dataset(
self,
split: Union[str, ReadInstruction, Split],
run_post_process: bool,
verification_mode: VerificationMode,
in_memory: bool = False,
):
"""as_dataset for a single split."""
if not isinstance(split, ReadInstruction):
split = str(split)
if split == "all":
split = "+".join(self.info.splits.keys())
split = Split(split)
# Build base dataset
ds = self._as_dataset(
split=split,
in_memory=in_memory,
)
if run_post_process:
for resource_file_name in self._post_processing_resources(split).values():
if os.sep in resource_file_name:
raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
resources_paths = {
resource_name: os.path.join(self._output_dir, resource_file_name)
for resource_name, resource_file_name in self._post_processing_resources(split).items()
}
post_processed = self._post_process(ds, resources_paths)
if post_processed is not None:
ds = post_processed
recorded_checksums = {}
record_checksums = False
for resource_name, resource_path in resources_paths.items():
size_checksum = get_size_checksum_dict(resource_path)
recorded_checksums[resource_name] = size_checksum
if verification_mode == VerificationMode.ALL_CHECKS and record_checksums:
if self.info.post_processed is None or self.info.post_processed.resources_checksums is None:
expected_checksums = None
else:
expected_checksums = self.info.post_processed.resources_checksums.get(split)
verify_checksums(expected_checksums, recorded_checksums, "post processing resources")
if self.info.post_processed is None:
self.info.post_processed = PostProcessedInfo()
if self.info.post_processed.resources_checksums is None:
self.info.post_processed.resources_checksums = {}
self.info.post_processed.resources_checksums[str(split)] = recorded_checksums
self.info.post_processing_size = sum(
checksums_dict["num_bytes"]
for split_checksums_dicts in self.info.post_processed.resources_checksums.values()
for checksums_dict in split_checksums_dicts.values()
)
if self.info.dataset_size is not None and self.info.download_size is not None:
self.info.size_in_bytes = (
self.info.dataset_size + self.info.download_size + self.info.post_processing_size
)
self._save_info()
ds._info.post_processed = self.info.post_processed
ds._info.post_processing_size = self.info.post_processing_size
ds._info.size_in_bytes = self.info.size_in_bytes
if self.info.post_processed.features is not None:
if self.info.post_processed.features.type != ds.features.type:
raise ValueError(
f"Post-processed features info don't match the dataset:\nGot\n{self.info.post_processed.features}\nbut expected something like\n{ds.features}"
)
else:
ds.info.features = self.info.post_processed.features
return ds
def _as_dataset(self, split: Union[ReadInstruction, Split] = Split.TRAIN, in_memory: bool = False) -> Dataset:
"""Constructs a `Dataset`.
This is the internal implementation to overwrite called when user calls
`as_dataset`. It should read the pre-processed datasets files and generate
the `Dataset` object.
Args:
split (`datasets.Split`):
which subset of the data to read.
in_memory (`bool`, defaults to `False`):
Whether to copy the data in-memory.
Returns:
`Dataset`
"""
cache_dir = self._fs._strip_protocol(self._output_dir)
dataset_name = self.dataset_name
if self._check_legacy_cache():
dataset_name = self.name
dataset_kwargs = ArrowReader(cache_dir, self.info).read(
name=dataset_name,
instructions=split,
split_infos=self.info.splits.values(),
in_memory=in_memory,
)
fingerprint = self._get_dataset_fingerprint(split)
return Dataset(fingerprint=fingerprint, **dataset_kwargs)
def _get_dataset_fingerprint(self, split: Union[ReadInstruction, Split]) -> str:
"""The dataset fingerprint is the hash of the relative directory dataset_name/config_name/version/hash, as well as the split specs."""
hasher = Hasher()
hasher.update(Path(self._relative_data_dir()).as_posix())
hasher.update(str(split)) # for example: train, train+test, train[:10%], test[:33%](pct1_dropremainder)
fingerprint = hasher.hexdigest()
return fingerprint
def as_streaming_dataset(
self,
split: Optional[str] = None,
base_path: Optional[str] = None,
) -> Union[Dict[str, IterableDataset], IterableDataset]:
if is_remote_filesystem(self._fs):
raise NotImplementedError(
f"Loading a streaming dataset cached in a {type(self._fs).__name__} is not supported yet."
)
dl_manager = StreamingDownloadManager(
base_path=base_path or self.base_path,
download_config=DownloadConfig(token=self.token, storage_options=self.storage_options),
dataset_name=self.dataset_name,
data_dir=self.config.data_dir,
)
self._check_manual_download(dl_manager)
splits_generators = {sg.name: sg for sg in self._split_generators(dl_manager)}
# By default, return all splits
if split is None:
splits_generator = splits_generators
elif split in splits_generators:
splits_generator = splits_generators[split]
else:
raise ValueError(f"Bad split: {split}. Available splits: {list(splits_generators)}")
# Create a dataset for each of the given splits
datasets = map_nested(
self._as_streaming_dataset_single,
splits_generator,
map_tuple=True,
)
if isinstance(datasets, dict):
datasets = IterableDatasetDict(datasets)
return datasets
def _as_streaming_dataset_single(
self,
splits_generator,
) -> IterableDataset:
ex_iterable = self._get_examples_iterable_for_split(splits_generator)
# add auth to be able to access and decode audio/image files from private repositories.
token_per_repo_id = {self.repo_id: self.token} if self.repo_id else {}
return IterableDataset(
ex_iterable, info=self.info, split=splits_generator.name, token_per_repo_id=token_per_repo_id
)
def _post_process(self, dataset: Dataset, resources_paths: Mapping[str, str]) -> Optional[Dataset]:
"""Run dataset transforms or add indexes"""
return None
def _post_processing_resources(self, split: str) -> Dict[str, str]:
"""Mapping resource_name -> resource_file_name"""
return {}
def _download_post_processing_resources(
self, split: str, resource_name: str, dl_manager: DownloadManager
) -> Optional[str]:
"""Download the resource using the download manager and return the downloaded path."""
return None
@abc.abstractmethod
def _split_generators(self, dl_manager: Union[DownloadManager, StreamingDownloadManager]):
"""Specify feature dictionary generators and dataset splits.
This function returns a list of `SplitGenerator`s defining how to generate
data and what splits to use.
Example:
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={'file': 'train_data.zip'},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={'file': 'test_data.zip'},
),
]
The above code will first call `_generate_examples(file='train_data.zip')`
to write the train data, then `_generate_examples(file='test_data.zip')` to
write the test data.
Datasets are typically split into different subsets to be used at various
stages of training and evaluation.
Note that for datasets without a `VALIDATION` split, you can use a
fraction of the `TRAIN` data for evaluation as you iterate on your model
so as not to overfit to the `TEST` data.
For downloads and extractions, use the given `download_manager`.
Note that the `DownloadManager` caches downloads, so it is fine to have each
generator attempt to download the source data.
A good practice is to download all data in this function, and then
distribute the relevant parts to each split with the `gen_kwargs` argument
Args:
dl_manager (`Union[DownloadManager, StreamingDownloadManager]`):
Download manager to download the data
Returns:
`list<SplitGenerator>`.
"""
raise NotImplementedError()
@abc.abstractmethod
def _prepare_split(
self,
split_generator: SplitGenerator,
file_format: str = "arrow",
max_shard_size: Optional[Union[str, int]] = None,
num_proc: Optional[int] = None,
**kwargs,
):
"""Generate the examples and record them on disk.
Args:
split_generator (`SplitGenerator`):
Split generator to process
file_format (`str`, *optional*):
format of the data files in which the dataset will be written.
Supported formats: "arrow", "parquet". Default to "arrow" format.
max_shard_size (`Union[str, int]`, *optional*):
Maximum number of bytes written per shard, default is "500MB".
The size is based on uncompressed data size, so in practice your shard files may be smaller than
`max_shard_size` thanks to Parquet compression for example.
num_proc (`int`, *optional*, defaults to `None`):
Number of processes when downloading and generating the dataset locally.
Multiprocessing is disabled by default.
<Added version="2.7.0"/>
**kwargs: Additional kwargs forwarded from _download_and_prepare
"""
raise NotImplementedError()
def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
"""Generate the examples on the fly.
Args:
split_generator (`SplitGenerator`):
Split generator to process
"""
raise NotImplementedError() | class_definition | 8,679 | 66,947 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py | null | 54 |
class GeneratorBasedBuilder(DatasetBuilder):
"""Base class for datasets with data generation based on dict generators.
`GeneratorBasedBuilder` is a convenience class that abstracts away much
of the data writing and reading of `DatasetBuilder`. It expects subclasses to
implement generators of feature dictionaries across the dataset splits
(`_split_generators`). See the method docstrings for details.
"""
@abc.abstractmethod
def _generate_examples(self, **kwargs):
"""Default function generating examples for each `SplitGenerator`.
This function preprocess the examples from the raw data to the preprocessed
dataset files.
This function is called once for each `SplitGenerator` defined in
`_split_generators`. The examples yielded here will be written on
disk.
Args:
**kwargs (additional keyword arguments):
Arguments forwarded from the SplitGenerator.gen_kwargs
Yields:
key: `str` or `int`, a unique deterministic example identification key.
* Unique: An error will be raised if two examples are yield with the
same key.
* Deterministic: When generating the dataset twice, the same example
should have the same key.
Good keys can be the image id, or line number if examples are extracted
from a text file.
The key will be hashed and sorted to shuffle examples deterministically,
such as generating the dataset multiple times keep examples in the
same order.
example: `dict<str feature_name, feature_value>`, a feature dictionary
ready to be encoded and written to disk. The example will be
encoded with `self.info.features.encode_example({...})`.
"""
raise NotImplementedError()
def _prepare_split(
self,
split_generator: SplitGenerator,
check_duplicate_keys: bool,
file_format="arrow",
num_proc: Optional[int] = None,
max_shard_size: Optional[Union[int, str]] = None,
):
max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
if self.info.splits is not None:
split_info = self.info.splits[split_generator.name]
else:
split_info = split_generator.split_info
SUFFIX = "-JJJJJ-SSSSS-of-NNNNN"
fname = f"{self.dataset_name}-{split_generator.name}{SUFFIX}.{file_format}"
fpath = posixpath.join(self._output_dir, fname)
if num_proc and num_proc > 1:
num_input_shards = _number_of_shards_in_gen_kwargs(split_generator.gen_kwargs)
if num_input_shards <= 1:
logger.warning(
f"Setting num_proc from {num_proc} back to 1 for the {split_info.name} split to disable multiprocessing as it only contains one shard."
)
num_proc = 1
elif num_input_shards < num_proc:
logger.warning(
f"Setting num_proc from {num_proc} to {num_input_shards} for the {split_info.name} split as it only contains {num_input_shards} shards."
)
num_proc = num_input_shards
pbar = hf_tqdm(
unit=" examples",
total=split_info.num_examples,
desc=f"Generating {split_info.name} split",
)
_prepare_split_args = {
"fpath": fpath,
"file_format": file_format,
"max_shard_size": max_shard_size,
"split_info": split_info,
"check_duplicate_keys": check_duplicate_keys,
}
if num_proc is None or num_proc == 1:
result = None
gen_kwargs = split_generator.gen_kwargs
job_id = 0
with pbar:
for job_id, done, content in self._prepare_split_single(
gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
):
if done:
result = content
else:
pbar.update(content)
# wrapping everything into lists for consistency with the multiprocessed code path
assert result is not None, "Failed to retrieve results from prepare_split"
examples_per_job, bytes_per_job, features_per_job, shards_per_job, shard_lengths_per_job = [
[item] for item in result
]
else:
kwargs_per_job = [
{"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args}
for job_id, gen_kwargs in enumerate(
_split_gen_kwargs(split_generator.gen_kwargs, max_num_jobs=num_proc)
)
]
num_jobs = len(kwargs_per_job)
examples_per_job = [None] * num_jobs
bytes_per_job = [None] * num_jobs
features_per_job = [None] * num_jobs
shards_per_job = [None] * num_jobs
shard_lengths_per_job = [None] * num_jobs
with Pool(num_proc) as pool:
with pbar:
for job_id, done, content in iflatmap_unordered(
pool, self._prepare_split_single, kwargs_iterable=kwargs_per_job
):
if done:
# the content is the result of the job
(
examples_per_job[job_id],
bytes_per_job[job_id],
features_per_job[job_id],
shards_per_job[job_id],
shard_lengths_per_job[job_id],
) = content
else:
# the content is the number of examples progress update
pbar.update(content)
assert None not in examples_per_job, (
f"Failed to retrieve results from prepare_split: result list {examples_per_job} still contains None - at least one worker failed to return its results"
)
total_shards = sum(shards_per_job)
total_num_examples = sum(examples_per_job)
total_num_bytes = sum(bytes_per_job)
features = features_per_job[0]
split_generator.split_info.num_examples = total_num_examples
split_generator.split_info.num_bytes = total_num_bytes
# should rename everything at the end
logger.debug(f"Renaming {total_shards} shards.")
if total_shards > 1:
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(shard_and_job: Tuple[int]):
shard_id, job_id = shard_and_job
global_shard_id = sum(shards_per_job[:job_id]) + shard_id
self._rename(
fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
fpath.replace("JJJJJ-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
)
shards_and_jobs = [
(shard_id, job_id)
for job_id, num_shards in enumerate(shards_per_job)
for shard_id in range(num_shards)
]
thread_map(_rename_shard, shards_and_jobs, disable=True, max_workers=64)
split_generator.split_info.shard_lengths = [
shard_length for shard_lengths in shard_lengths_per_job for shard_length in shard_lengths
]
else:
# don't use any pattern
shard_id, job_id = 0, 0
self._rename(
fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
fpath.replace(SUFFIX, ""),
)
if self.info.features is None:
self.info.features = features
def _prepare_split_single(
self,
gen_kwargs: dict,
fpath: str,
file_format: str,
max_shard_size: int,
split_info: SplitInfo,
check_duplicate_keys: bool,
job_id: int,
) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
generator = self._generate_examples(**gen_kwargs)
writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
embed_local_files = file_format == "parquet"
shard_lengths = []
total_num_examples, total_num_bytes = 0, 0
shard_id = 0
num_examples_progress_update = 0
try:
writer = writer_class(
features=self.info.features,
path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
writer_batch_size=self._writer_batch_size,
hash_salt=split_info.name,
check_duplicates=check_duplicate_keys,
storage_options=self._fs.storage_options,
embed_local_files=embed_local_files,
)
try:
_time = time.time()
for key, record in generator:
if max_shard_size is not None and writer._num_bytes > max_shard_size:
num_examples, num_bytes = writer.finalize()
writer.close()
shard_lengths.append(num_examples)
total_num_examples += num_examples
total_num_bytes += num_bytes
shard_id += 1
writer = writer_class(
features=writer._features,
path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
writer_batch_size=self._writer_batch_size,
hash_salt=split_info.name,
check_duplicates=check_duplicate_keys,
storage_options=self._fs.storage_options,
embed_local_files=embed_local_files,
)
example = self.info.features.encode_example(record) if self.info.features is not None else record
writer.write(example, key)
num_examples_progress_update += 1
if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
_time = time.time()
yield job_id, False, num_examples_progress_update
num_examples_progress_update = 0
finally:
yield job_id, False, num_examples_progress_update
num_shards = shard_id + 1
num_examples, num_bytes = writer.finalize()
writer.close()
shard_lengths.append(num_examples)
total_num_examples += num_examples
total_num_bytes += num_bytes
except Exception as e:
# Ignore the writer's error for no examples written to the file if this error was caused by the error in _generate_examples before the first example was yielded
if isinstance(e, SchemaInferenceError) and e.__context__ is not None:
e = e.__context__
raise DatasetGenerationError("An error occurred while generating the dataset") from e
yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)
def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs):
super()._download_and_prepare(
dl_manager,
verification_mode,
check_duplicate_keys=verification_mode == VerificationMode.BASIC_CHECKS
or verification_mode == VerificationMode.ALL_CHECKS,
**prepare_splits_kwargs,
)
def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
return ExamplesIterable(self._generate_examples, split_generator.gen_kwargs) | class_definition | 66,950 | 79,073 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py | null | 55 |
class ArrowBasedBuilder(DatasetBuilder):
"""Base class for datasets with data generation based on Arrow loading functions (CSV/JSON/Parquet)."""
@abc.abstractmethod
def _generate_tables(self, **kwargs):
"""Default function generating examples for each `SplitGenerator`.
This function preprocess the examples from the raw data to the preprocessed
dataset files.
This function is called once for each `SplitGenerator` defined in
`_split_generators`. The examples yielded here will be written on
disk.
Args:
**kwargs (additional keyword arguments):
Arguments forwarded from the SplitGenerator.gen_kwargs
Yields:
key: `str` or `int`, a unique deterministic example identification key.
* Unique: An error will be raised if two examples are yield with the
same key.
* Deterministic: When generating the dataset twice, the same example
should have the same key.
Good keys can be the image id, or line number if examples are extracted
from a text file.
The key will be hashed and sorted to shuffle examples deterministically,
such as generating the dataset multiple times keep examples in the
same order.
example: `pyarrow.Table`, a feature table
ready to be encoded and written to disk.
"""
raise NotImplementedError()
def _prepare_split(
self,
split_generator: SplitGenerator,
file_format: str = "arrow",
num_proc: Optional[int] = None,
max_shard_size: Optional[Union[str, int]] = None,
):
max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
try:
split_info = self.info.splits[split_generator.name]
except Exception:
split_info = split_generator.split_info
SUFFIX = "-JJJJJ-SSSSS-of-NNNNN"
fname = f"{self.dataset_name}-{split_generator.name}{SUFFIX}.{file_format}"
fpath = posixpath.join(self._output_dir, fname)
if num_proc and num_proc > 1:
num_input_shards = _number_of_shards_in_gen_kwargs(split_generator.gen_kwargs)
if num_input_shards <= 1:
logger.warning(
f"Setting num_proc from {num_proc} back to 1 for the {split_info.name} split to disable multiprocessing as it only contains one shard."
)
num_proc = 1
elif num_input_shards < num_proc:
logger.warning(
f"Setting num_proc from {num_proc} to {num_input_shards} for the {split_info.name} split as it only contains {num_input_shards} shards."
)
num_proc = num_input_shards
pbar = hf_tqdm(
unit=" examples",
total=split_info.num_examples,
desc=f"Generating {split_info.name} split",
)
_prepare_split_args = {
"fpath": fpath,
"file_format": file_format,
"max_shard_size": max_shard_size,
}
if num_proc is None or num_proc == 1:
result = None
gen_kwargs = split_generator.gen_kwargs
job_id = 0
with pbar:
for job_id, done, content in self._prepare_split_single(
gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
):
if done:
result = content
else:
pbar.update(content)
# wrapping everything into lists for consistency with the multiprocessed code path
assert result is not None, "Failed to retrieve results from prepare_split"
examples_per_job, bytes_per_job, features_per_job, shards_per_job, shard_lengths_per_job = [
[item] for item in result
]
else:
kwargs_per_job = [
{"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args}
for job_id, gen_kwargs in enumerate(
_split_gen_kwargs(split_generator.gen_kwargs, max_num_jobs=num_proc)
)
]
num_jobs = len(kwargs_per_job)
examples_per_job = [None] * num_jobs
bytes_per_job = [None] * num_jobs
features_per_job = [None] * num_jobs
shards_per_job = [None] * num_jobs
shard_lengths_per_job = [None] * num_jobs
with Pool(num_proc) as pool:
with pbar:
for job_id, done, content in iflatmap_unordered(
pool, self._prepare_split_single, kwargs_iterable=kwargs_per_job
):
if done:
# the content is the result of the job
(
examples_per_job[job_id],
bytes_per_job[job_id],
features_per_job[job_id],
shards_per_job[job_id],
shard_lengths_per_job[job_id],
) = content
else:
# the content is the number of examples progress update
pbar.update(content)
assert None not in examples_per_job, (
f"Failed to retrieve results from prepare_split: result list {examples_per_job} still contains None - at least one worker failed to return its results"
)
total_shards = sum(shards_per_job)
total_num_examples = sum(examples_per_job)
total_num_bytes = sum(bytes_per_job)
features = features_per_job[0]
split_generator.split_info.num_examples = total_num_examples
split_generator.split_info.num_bytes = total_num_bytes
# should rename everything at the end
logger.debug(f"Renaming {total_shards} shards.")
if total_shards > 1:
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(shard_id_and_job: Tuple[int]):
shard_id, job_id = shard_id_and_job
global_shard_id = sum(shards_per_job[:job_id]) + shard_id
self._rename(
fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
fpath.replace("JJJJJ-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
)
shard_ids_and_jobs = [
(shard_id, job_id)
for job_id, num_shards in enumerate(shards_per_job)
for shard_id in range(num_shards)
]
thread_map(_rename_shard, shard_ids_and_jobs, disable=True, max_workers=64)
split_generator.split_info.shard_lengths = [
shard_length for shard_lengths in shard_lengths_per_job for shard_length in shard_lengths
]
else:
# don't use any pattern
shard_id, job_id = 0, 0
self._rename(
fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
fpath.replace(SUFFIX, ""),
)
if self.info.features is None:
self.info.features = features
def _prepare_split_single(
self, gen_kwargs: dict, fpath: str, file_format: str, max_shard_size: int, job_id: int
) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
gen_kwargs = {k: tracked_list(v) if isinstance(v, list) else v for k, v in gen_kwargs.items()}
generator = self._generate_tables(**gen_kwargs)
writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
embed_local_files = file_format == "parquet"
shard_lengths = []
total_num_examples, total_num_bytes = 0, 0
shard_id = 0
num_examples_progress_update = 0
try:
writer = writer_class(
features=self.info.features,
path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
writer_batch_size=self._writer_batch_size,
storage_options=self._fs.storage_options,
embed_local_files=embed_local_files,
)
try:
_time = time.time()
for _, table in generator:
if max_shard_size is not None and writer._num_bytes > max_shard_size:
num_examples, num_bytes = writer.finalize()
writer.close()
shard_lengths.append(num_examples)
total_num_examples += num_examples
total_num_bytes += num_bytes
shard_id += 1
writer = writer_class(
features=writer._features,
path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
writer_batch_size=self._writer_batch_size,
storage_options=self._fs.storage_options,
embed_local_files=embed_local_files,
)
try:
writer.write_table(table)
except CastError as cast_error:
raise DatasetGenerationCastError.from_cast_error(
cast_error=cast_error,
builder_name=self.info.builder_name,
gen_kwargs=gen_kwargs,
token=self.token,
)
num_examples_progress_update += len(table)
if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
_time = time.time()
yield job_id, False, num_examples_progress_update
num_examples_progress_update = 0
finally:
yield job_id, False, num_examples_progress_update
num_shards = shard_id + 1
num_examples, num_bytes = writer.finalize()
writer.close()
shard_lengths.append(num_examples)
total_num_examples += num_examples
total_num_bytes += num_bytes
except Exception as e:
# Ignore the writer's error for no examples written to the file if this error was caused by the error in _generate_examples before the first example was yielded
if isinstance(e, SchemaInferenceError) and e.__context__ is not None:
e = e.__context__
if isinstance(e, DatasetGenerationError):
raise
raise DatasetGenerationError("An error occurred while generating the dataset") from e
yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)
def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
return ArrowExamplesIterable(self._generate_tables, kwargs=split_generator.gen_kwargs) | class_definition | 79,076 | 90,420 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py | null | 56 |
class SplitsNotFoundError(ValueError):
pass | class_definition | 1,151 | 1,198 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/inspect.py | null | 57 |
class MissingIndex(Exception):
pass | class_definition | 721 | 760 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py | null | 58 |
class SearchResults(NamedTuple):
scores: List[float]
indices: List[int] | class_definition | 763 | 842 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py | null | 59 |
class BatchedSearchResults(NamedTuple):
total_scores: List[List[float]]
total_indices: List[List[int]] | class_definition | 845 | 955 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py | null | 60 |
class NearestExamplesResults(NamedTuple):
scores: List[float]
examples: dict | class_definition | 958 | 1,042 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py | null | 61 |
class BatchedNearestExamplesResults(NamedTuple):
total_scores: List[List[float]]
total_examples: List[dict] | class_definition | 1,045 | 1,160 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py | null | 62 |
class BaseIndex:
"""Base class for indexing"""
def search(self, query, k: int = 10, **kwargs) -> SearchResults:
"""
To implement.
This method has to return the scores and the indices of the retrieved examples given a certain query.
"""
raise NotImplementedError
def search_batch(self, queries, k: int = 10, **kwargs) -> BatchedSearchResults:
"""Find the nearest examples indices to the query.
Args:
queries (`Union[List[str], np.ndarray]`): The queries as a list of strings if `column` is a text index or as a numpy array if `column` is a vector index.
k (`int`): The number of examples to retrieve per query.
Ouput:
total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query.
total_indices (`List[List[int]]`): The indices of the retrieved examples per query.
"""
total_scores, total_indices = [], []
for query in queries:
scores, indices = self.search(query, k)
total_scores.append(scores)
total_indices.append(indices)
return BatchedSearchResults(total_scores, total_indices)
def save(self, file: Union[str, PurePath]):
"""Serialize the index on disk"""
raise NotImplementedError
@classmethod
def load(cls, file: Union[str, PurePath]) -> "BaseIndex":
"""Deserialize the index from disk"""
raise NotImplementedError | class_definition | 1,163 | 2,655 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py | null | 63 |
class ElasticSearchIndex(BaseIndex):
"""
Sparse index using Elasticsearch. It is used to index text and run queries based on BM25 similarity.
An Elasticsearch server needs to be accessible, and a python client is declared with
```
es_client = Elasticsearch([{'host': 'localhost', 'port': '9200'}])
```
for example.
"""
def __init__(
self,
host: Optional[str] = None,
port: Optional[int] = None,
es_client: Optional["Elasticsearch"] = None,
es_index_name: Optional[str] = None,
es_index_config: Optional[dict] = None,
):
if not _has_elasticsearch:
raise ImportError(
"You must install ElasticSearch to use ElasticSearchIndex. To do so you can run `pip install elasticsearch==7.7.1 for example`"
)
if es_client is not None and (host is not None or port is not None):
raise ValueError("Please specify either `es_client` or `(host, port)`, but not both.")
host = host or "localhost"
port = port or 9200
import elasticsearch.helpers # noqa: F401 - need this to properly load all the es features
from elasticsearch import Elasticsearch # noqa: F811
self.es_client = es_client if es_client is not None else Elasticsearch([{"host": host, "port": str(port)}])
self.es_index_name = (
es_index_name
if es_index_name is not None
else "huggingface_datasets_" + os.path.basename(tempfile.NamedTemporaryFile().name)
)
self.es_index_config = (
es_index_config
if es_index_config is not None
else {
"settings": {
"number_of_shards": 1,
"analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
},
"mappings": {"properties": {"text": {"type": "text", "analyzer": "standard", "similarity": "BM25"}}},
}
)
def add_documents(self, documents: Union[List[str], "Dataset"], column: Optional[str] = None):
"""
Add documents to the index.
If the documents are inside a certain column, you can specify it using the `column` argument.
"""
index_name = self.es_index_name
index_config = self.es_index_config
self.es_client.indices.create(index=index_name, body=index_config)
number_of_docs = len(documents)
progress = hf_tqdm(unit="docs", total=number_of_docs)
successes = 0
def passage_generator():
if column is not None:
for i, example in enumerate(documents):
yield {"text": example[column], "_id": i}
else:
for i, example in enumerate(documents):
yield {"text": example, "_id": i}
# create the ES index
import elasticsearch as es
for ok, action in es.helpers.streaming_bulk(
client=self.es_client,
index=index_name,
actions=passage_generator(),
):
progress.update(1)
successes += ok
if successes != len(documents):
logger.warning(
f"Some documents failed to be added to ElasticSearch. Failures: {len(documents) - successes}/{len(documents)}"
)
logger.info(f"Indexed {successes:d} documents")
def search(self, query: str, k=10, **kwargs) -> SearchResults:
"""Find the nearest examples indices to the query.
Args:
query (`str`): The query as a string.
k (`int`): The number of examples to retrieve.
Ouput:
scores (`List[List[float]`): The retrieval scores of the retrieved examples.
indices (`List[List[int]]`): The indices of the retrieved examples.
"""
response = self.es_client.search(
index=self.es_index_name,
body={"query": {"multi_match": {"query": query, "fields": ["text"], "type": "cross_fields"}}, "size": k},
**kwargs,
)
hits = response["hits"]["hits"]
return SearchResults([hit["_score"] for hit in hits], [int(hit["_id"]) for hit in hits])
def search_batch(self, queries, k: int = 10, max_workers=10, **kwargs) -> BatchedSearchResults:
import concurrent.futures
total_scores, total_indices = [None] * len(queries), [None] * len(queries)
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
future_to_index = {executor.submit(self.search, query, k, **kwargs): i for i, query in enumerate(queries)}
for future in concurrent.futures.as_completed(future_to_index):
index = future_to_index[future]
results: SearchResults = future.result()
total_scores[index] = results.scores
total_indices[index] = results.indices
return BatchedSearchResults(total_indices=total_indices, total_scores=total_scores) | class_definition | 2,658 | 7,728 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py | null | 64 |
class FaissIndex(BaseIndex):
"""
Dense index using Faiss. It is used to index vectors.
Faiss is a library for efficient similarity search and clustering of dense vectors.
It contains algorithms that search in sets of vectors of any size, up to ones that possibly do not fit in RAM.
You can find more information about Faiss here:
- For index types and the string factory: https://github.com/facebookresearch/faiss/wiki/The-index-factory
- For GPU settings: https://github.com/facebookresearch/faiss/wiki/Faiss-on-the-GPU
"""
def __init__(
self,
device: Optional[Union[int, List[int]]] = None,
string_factory: Optional[str] = None,
metric_type: Optional[int] = None,
custom_index: Optional["faiss.Index"] = None,
):
"""
Create a Dense index using Faiss. You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
You can find more information about Faiss here:
- For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory
"""
if string_factory is not None and custom_index is not None:
raise ValueError("Please specify either `string_factory` or `custom_index` but not both.")
if device is not None and custom_index is not None:
raise ValueError(
"Cannot pass both 'custom_index' and 'device'. "
"Pass 'custom_index' already transferred to the target device instead."
)
self.device = device
self.string_factory = string_factory
self.metric_type = metric_type
self.faiss_index = custom_index
if not _has_faiss:
raise ImportError(
"You must install Faiss to use FaissIndex. To do so you can run `conda install -c pytorch faiss-cpu` or `conda install -c pytorch faiss-gpu`. "
"A community supported package is also available on pypi: `pip install faiss-cpu` or `pip install faiss-gpu`. "
"Note that pip may not have the latest version of FAISS, and thus, some of the latest features and bug fixes may not be available."
)
def add_vectors(
self,
vectors: Union[np.array, "Dataset"],
column: Optional[str] = None,
batch_size: int = 1000,
train_size: Optional[int] = None,
faiss_verbose: Optional[bool] = None,
):
"""
Add vectors to the index.
If the arrays are inside a certain column, you can specify it using the `column` argument.
"""
import faiss # noqa: F811
if column and not isinstance(vectors.features[column], Sequence):
raise ValueError(
f"Wrong feature type for column '{column}'. Expected 1d array, got {vectors.features[column]}"
)
# Create index
if self.faiss_index is None:
size = len(vectors[0]) if column is None else len(vectors[0][column])
if self.string_factory is not None:
if self.metric_type is None:
index = faiss.index_factory(size, self.string_factory)
else:
index = faiss.index_factory(size, self.string_factory, self.metric_type)
else:
if self.metric_type is None:
index = faiss.IndexFlat(size)
else:
index = faiss.IndexFlat(size, self.metric_type)
self.faiss_index = self._faiss_index_to_device(index, self.device)
logger.info(f"Created faiss index of type {type(self.faiss_index)}")
# Set verbosity level
if faiss_verbose is not None:
self.faiss_index.verbose = faiss_verbose
if hasattr(self.faiss_index, "index") and self.faiss_index.index is not None:
self.faiss_index.index.verbose = faiss_verbose
if hasattr(self.faiss_index, "quantizer") and self.faiss_index.quantizer is not None:
self.faiss_index.quantizer.verbose = faiss_verbose
if hasattr(self.faiss_index, "clustering_index") and self.faiss_index.clustering_index is not None:
self.faiss_index.clustering_index.verbose = faiss_verbose
# Train
if train_size is not None:
train_vecs = vectors[:train_size] if column is None else vectors[:train_size][column]
logger.info(f"Training the index with the first {len(train_vecs)} vectors")
self.faiss_index.train(train_vecs)
else:
logger.info("Ignored the training step of the faiss index as `train_size` is None.")
# Add vectors
logger.info(f"Adding {len(vectors)} vectors to the faiss index")
for i in hf_tqdm(range(0, len(vectors), batch_size)):
vecs = vectors[i : i + batch_size] if column is None else vectors[i : i + batch_size][column]
self.faiss_index.add(vecs)
@staticmethod
def _faiss_index_to_device(index: "faiss.Index", device: Optional[Union[int, List[int]]] = None) -> "faiss.Index":
"""
Sends a faiss index to a device.
A device can either be a positive integer (GPU id), a negative integer (all GPUs),
or a list of positive integers (select GPUs to use), or `None` for CPU.
"""
# If device is not specified, then it runs on CPU.
if device is None:
return index
import faiss # noqa: F811
# If the device id is given as an integer
if isinstance(device, int):
# Positive integers are directly mapped to GPU ids
if device > -1:
faiss_res = faiss.StandardGpuResources()
index = faiss.index_cpu_to_gpu(faiss_res, device, index)
# And negative integers mean using all GPUs
else:
index = faiss.index_cpu_to_all_gpus(index)
# Device ids given as a list mean mapping to those devices specified.
elif isinstance(device, (list, tuple)):
index = faiss.index_cpu_to_gpus_list(index, gpus=list(device))
else:
raise TypeError(
f"The argument type: {type(device)} is not expected. "
+ "Please pass in either nothing, a positive int, a negative int, or a list of positive ints."
)
return index
def search(self, query: np.array, k=10, **kwargs) -> SearchResults:
"""Find the nearest examples indices to the query.
Args:
query (`np.array`): The query as a numpy array.
k (`int`): The number of examples to retrieve.
Ouput:
scores (`List[List[float]`): The retrieval scores of the retrieved examples.
indices (`List[List[int]]`): The indices of the retrieved examples.
"""
if len(query.shape) != 1 and (len(query.shape) != 2 or query.shape[0] != 1):
raise ValueError("Shape of query is incorrect, it has to be either a 1D array or 2D (1, N)")
queries = query.reshape(1, -1)
if not queries.flags.c_contiguous:
queries = np.asarray(queries, order="C")
scores, indices = self.faiss_index.search(queries, k, **kwargs)
return SearchResults(scores[0], indices[0].astype(int))
def search_batch(self, queries: np.array, k=10, **kwargs) -> BatchedSearchResults:
"""Find the nearest examples indices to the queries.
Args:
queries (`np.array`): The queries as a numpy array.
k (`int`): The number of examples to retrieve.
Ouput:
total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query.
total_indices (`List[List[int]]`): The indices of the retrieved examples per query.
"""
if len(queries.shape) != 2:
raise ValueError("Shape of query must be 2D")
if not queries.flags.c_contiguous:
queries = np.asarray(queries, order="C")
scores, indices = self.faiss_index.search(queries, k, **kwargs)
return BatchedSearchResults(scores, indices.astype(int))
def save(self, file: Union[str, PurePath], storage_options: Optional[Dict] = None):
"""Serialize the FaissIndex on disk"""
import faiss # noqa: F811
if self.device is not None and isinstance(self.device, (int, list, tuple)):
index = faiss.index_gpu_to_cpu(self.faiss_index)
else:
index = self.faiss_index
with fsspec.open(str(file), "wb", **(storage_options or {})) as f:
faiss.write_index(index, faiss.BufferedIOWriter(faiss.PyCallbackIOWriter(f.write)))
@classmethod
def load(
cls,
file: Union[str, PurePath],
device: Optional[Union[int, List[int]]] = None,
storage_options: Optional[Dict] = None,
) -> "FaissIndex":
"""Deserialize the FaissIndex from disk"""
import faiss # noqa: F811
# Instances of FaissIndex is essentially just a wrapper for faiss indices.
faiss_index = cls(device=device)
with fsspec.open(str(file), "rb", **(storage_options or {})) as f:
index = faiss.read_index(faiss.BufferedIOReader(faiss.PyCallbackIOReader(f.read)))
faiss_index.faiss_index = faiss_index._faiss_index_to_device(index, faiss_index.device)
return faiss_index | class_definition | 7,731 | 17,154 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py | null | 65 |
class IndexableMixin:
"""Add indexing features to `datasets.Dataset`"""
def __init__(self):
self._indexes: Dict[str, BaseIndex] = {}
def __len__(self):
raise NotImplementedError
def __getitem__(self, key):
raise NotImplementedError
def is_index_initialized(self, index_name: str) -> bool:
return index_name in self._indexes
def _check_index_is_initialized(self, index_name: str):
if not self.is_index_initialized(index_name):
raise MissingIndex(
f"Index with index_name '{index_name}' not initialized yet. Please make sure that you call `add_faiss_index` or `add_elasticsearch_index` first."
)
def list_indexes(self) -> List[str]:
"""List the `colindex_nameumns`/identifiers of all the attached indexes."""
return list(self._indexes)
def get_index(self, index_name: str) -> BaseIndex:
"""List the `index_name`/identifiers of all the attached indexes.
Args:
index_name (`str`): Index name.
Returns:
[`BaseIndex`]
"""
self._check_index_is_initialized(index_name)
return self._indexes[index_name]
def add_faiss_index(
self,
column: str,
index_name: Optional[str] = None,
device: Optional[Union[int, List[int]]] = None,
string_factory: Optional[str] = None,
metric_type: Optional[int] = None,
custom_index: Optional["faiss.Index"] = None,
batch_size: int = 1000,
train_size: Optional[int] = None,
faiss_verbose: bool = False,
):
"""Add a dense index using Faiss for fast retrieval.
The index is created using the vectors of the specified column.
You can specify `device` if you want to run it on GPU (`device` must be the GPU index, see more below).
You can find more information about Faiss here:
- For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory
Args:
column (`str`): The column of the vectors to add to the index.
index_name (Optional `str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.
By default it corresponds to `column`.
device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
string_factory (Optional `str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP.
metric_type (Optional `int`): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
custom_index (Optional `faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs.
batch_size (Optional `int`): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000.
<Added version="2.4.0"/>
train_size (Optional `int`): If the index needs a training step, specifies how many vectors will be used to train the index.
faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index.
"""
index_name = index_name if index_name is not None else column
faiss_index = FaissIndex(
device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index
)
faiss_index.add_vectors(
self, column=column, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose
)
self._indexes[index_name] = faiss_index
def add_faiss_index_from_external_arrays(
self,
external_arrays: np.array,
index_name: str,
device: Optional[Union[int, List[int]]] = None,
string_factory: Optional[str] = None,
metric_type: Optional[int] = None,
custom_index: Optional["faiss.Index"] = None,
batch_size: int = 1000,
train_size: Optional[int] = None,
faiss_verbose: bool = False,
):
"""Add a dense index using Faiss for fast retrieval.
The index is created using the vectors of `external_arrays`.
You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
You can find more information about Faiss here:
- For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory
Args:
external_arrays (`np.array`): If you want to use arrays from outside the lib for the index, you can set `external_arrays`.
It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`.
index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.
device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
string_factory (Optional `str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP.
metric_type (Optional `int`): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
custom_index (Optional `faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs.
batch_size (Optional `int`): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000.
<Added version="2.4.0"/>
train_size (Optional `int`): If the index needs a training step, specifies how many vectors will be used to train the index.
faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index.
"""
faiss_index = FaissIndex(
device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index
)
faiss_index.add_vectors(
external_arrays, column=None, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose
)
self._indexes[index_name] = faiss_index
def save_faiss_index(self, index_name: str, file: Union[str, PurePath], storage_options: Optional[Dict] = None):
"""Save a FaissIndex on disk.
Args:
index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.
file (`str`): The path to the serialized faiss index on disk or remote URI (e.g. `"s3://my-bucket/index.faiss"`).
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.11.0"/>
"""
index = self.get_index(index_name)
if not isinstance(index, FaissIndex):
raise ValueError(f"Index '{index_name}' is not a FaissIndex but a '{type(index)}'")
index.save(file, storage_options=storage_options)
logger.info(f"Saved FaissIndex {index_name} at {file}")
def load_faiss_index(
self,
index_name: str,
file: Union[str, PurePath],
device: Optional[Union[int, List[int]]] = None,
storage_options: Optional[Dict] = None,
):
"""Load a FaissIndex from disk.
If you want to do additional configurations, you can have access to the faiss index object by doing
`.get_index(index_name).faiss_index` to make it fit your needs.
Args:
index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to
call `.get_nearest` or `.search`.
file (`str`): The path to the serialized faiss index on disk or remote URI (e.g. `"s3://my-bucket/index.faiss"`).
device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.11.0"/>
"""
index = FaissIndex.load(file, device=device, storage_options=storage_options)
if index.faiss_index.ntotal != len(self):
raise ValueError(
f"Index size should match Dataset size, but Index '{index_name}' at {file} has {index.faiss_index.ntotal} elements while the dataset has {len(self)} examples."
)
self._indexes[index_name] = index
logger.info(f"Loaded FaissIndex {index_name} from {file}")
def add_elasticsearch_index(
self,
column: str,
index_name: Optional[str] = None,
host: Optional[str] = None,
port: Optional[int] = None,
es_client: Optional["Elasticsearch"] = None,
es_index_name: Optional[str] = None,
es_index_config: Optional[dict] = None,
):
"""Add a text index using ElasticSearch for fast retrieval.
Args:
column (`str`): The column of the documents to add to the index.
index_name (Optional `str`): The index_name/identifier of the index. This is the index name that is used to call `.get_nearest` or `.search`.
By default it corresponds to `column`.
host (Optional `str`, defaults to localhost):
host of where ElasticSearch is running
port (Optional `str`, defaults to 9200):
port of where ElasticSearch is running
es_client (Optional `elasticsearch.Elasticsearch`):
The elasticsearch client used to create the index if host and port are None.
es_index_name (Optional `str`): The elasticsearch index name used to create the index.
es_index_config (Optional `dict`):
The configuration of the elasticsearch index.
Default config is:
Config::
{
"settings": {
"number_of_shards": 1,
"analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
},
"mappings": {
"properties": {
"text": {
"type": "text",
"analyzer": "standard",
"similarity": "BM25"
},
}
},
}
"""
index_name = index_name if index_name is not None else column
es_index = ElasticSearchIndex(
host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config
)
es_index.add_documents(self, column=column)
self._indexes[index_name] = es_index
def load_elasticsearch_index(
self,
index_name: str,
es_index_name: str,
host: Optional[str] = None,
port: Optional[int] = None,
es_client: Optional["Elasticsearch"] = None,
es_index_config: Optional[dict] = None,
):
"""Load an existing text index using ElasticSearch for fast retrieval.
Args:
index_name (`str`):
The `index_name`/identifier of the index. This is the index name that is used to call `get_nearest` or `search`.
es_index_name (`str`):
The name of elasticsearch index to load.
host (`str`, *optional*, defaults to `localhost`):
Host of where ElasticSearch is running.
port (`str`, *optional*, defaults to `9200`):
Port of where ElasticSearch is running.
es_client (`elasticsearch.Elasticsearch`, *optional*):
The elasticsearch client used to create the index if host and port are `None`.
es_index_config (`dict`, *optional*):
The configuration of the elasticsearch index.
Default config is:
```
{
"settings": {
"number_of_shards": 1,
"analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
},
"mappings": {
"properties": {
"text": {
"type": "text",
"analyzer": "standard",
"similarity": "BM25"
},
}
},
}
```
"""
self._indexes[index_name] = ElasticSearchIndex(
host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config
)
def drop_index(self, index_name: str):
"""Drop the index with the specified column.
Args:
index_name (`str`):
The `index_name`/identifier of the index.
"""
del self._indexes[index_name]
def search(self, index_name: str, query: Union[str, np.array], k: int = 10, **kwargs) -> SearchResults:
"""Find the nearest examples indices in the dataset to the query.
Args:
index_name (`str`):
The name/identifier of the index.
query (`Union[str, np.ndarray]`):
The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
k (`int`):
The number of examples to retrieve.
Returns:
`(scores, indices)`:
A tuple of `(scores, indices)` where:
- **scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples
- **indices** (`List[List[int]]`): the indices of the retrieved examples
"""
self._check_index_is_initialized(index_name)
return self._indexes[index_name].search(query, k, **kwargs)
def search_batch(
self, index_name: str, queries: Union[List[str], np.array], k: int = 10, **kwargs
) -> BatchedSearchResults:
"""Find the nearest examples indices in the dataset to the query.
Args:
index_name (`str`):
The `index_name`/identifier of the index.
queries (`Union[List[str], np.ndarray]`):
The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
k (`int`):
The number of examples to retrieve per query.
Returns:
`(total_scores, total_indices)`:
A tuple of `(total_scores, total_indices)` where:
- **total_scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples per query
- **total_indices** (`List[List[int]]`): the indices of the retrieved examples per query
"""
self._check_index_is_initialized(index_name)
return self._indexes[index_name].search_batch(queries, k, **kwargs)
def get_nearest_examples(
self, index_name: str, query: Union[str, np.array], k: int = 10, **kwargs
) -> NearestExamplesResults:
"""Find the nearest examples in the dataset to the query.
Args:
index_name (`str`):
The index_name/identifier of the index.
query (`Union[str, np.ndarray]`):
The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
k (`int`):
The number of examples to retrieve.
Returns:
`(scores, examples)`:
A tuple of `(scores, examples)` where:
- **scores** (`List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples
- **examples** (`dict`): the retrieved examples
"""
self._check_index_is_initialized(index_name)
scores, indices = self.search(index_name, query, k, **kwargs)
top_indices = [i for i in indices if i >= 0]
return NearestExamplesResults(scores[: len(top_indices)], self[top_indices])
def get_nearest_examples_batch(
self, index_name: str, queries: Union[List[str], np.array], k: int = 10, **kwargs
) -> BatchedNearestExamplesResults:
"""Find the nearest examples in the dataset to the query.
Args:
index_name (`str`):
The `index_name`/identifier of the index.
queries (`Union[List[str], np.ndarray]`):
The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
k (`int`):
The number of examples to retrieve per query.
Returns:
`(total_scores, total_examples)`:
A tuple of `(total_scores, total_examples)` where:
- **total_scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples per query
- **total_examples** (`List[dict]`): the retrieved examples per query
"""
self._check_index_is_initialized(index_name)
total_scores, total_indices = self.search_batch(index_name, queries, k, **kwargs)
total_scores = [
scores_i[: len([i for i in indices_i if i >= 0])]
for scores_i, indices_i in zip(total_scores, total_indices)
]
total_samples = [self[[i for i in indices if i >= 0]] for indices in total_indices]
return BatchedNearestExamplesResults(total_scores, total_samples) | class_definition | 17,157 | 35,607 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py | null | 66 |
class DatasetsError(Exception):
"""Base class for exceptions in this library.""" | class_definition | 308 | 392 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py | null | 67 |
class DefunctDatasetError(DatasetsError):
"""The dataset has been defunct.""" | class_definition | 395 | 476 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py | null | 68 |
class FileNotFoundDatasetsError(DatasetsError, FileNotFoundError):
"""FileNotFoundError raised by this library.""" | class_definition | 479 | 597 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py | null | 69 |
class DataFilesNotFoundError(FileNotFoundDatasetsError):
"""No (supported) data files found.""" | class_definition | 600 | 699 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py | null | 70 |
class DatasetNotFoundError(FileNotFoundDatasetsError):
"""Dataset not found.
Raised when trying to access:
- a missing dataset, or
- a private/gated dataset and the user is not authenticated.
""" | class_definition | 702 | 918 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py | null | 71 |
class DatasetBuildError(DatasetsError):
pass | class_definition | 921 | 969 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py | null | 72 |
class ManualDownloadError(DatasetBuildError):
pass | class_definition | 972 | 1,026 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py | null | 73 |
class FileFormatError(DatasetBuildError):
pass | class_definition | 1,029 | 1,079 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py | null | 74 |
class DatasetGenerationError(DatasetBuildError):
pass | class_definition | 1,082 | 1,139 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py | null | 75 |
class DatasetGenerationCastError(DatasetGenerationError):
@classmethod
def from_cast_error(
cls,
cast_error: CastError,
builder_name: str,
gen_kwargs: Dict[str, Any],
token: Optional[Union[bool, str]],
) -> "DatasetGenerationCastError":
explanation_message = (
f"\n\nAll the data files must have the same columns, but at some point {cast_error.details()}"
)
formatted_tracked_gen_kwargs: List[str] = []
for gen_kwarg in gen_kwargs.values():
if not isinstance(gen_kwarg, (tracked_str, tracked_list, TrackedIterableFromGenerator)):
continue
while (
isinstance(gen_kwarg, (tracked_list, TrackedIterableFromGenerator)) and gen_kwarg.last_item is not None
):
gen_kwarg = gen_kwarg.last_item
if isinstance(gen_kwarg, tracked_str):
gen_kwarg = gen_kwarg.get_origin()
if isinstance(gen_kwarg, str) and gen_kwarg.startswith("hf://"):
resolved_path = HfFileSystem(endpoint=config.HF_ENDPOINT, token=token).resolve_path(gen_kwarg)
gen_kwarg = "hf://" + resolved_path.unresolve()
if "@" + resolved_path.revision in gen_kwarg:
gen_kwarg = (
gen_kwarg.replace("@" + resolved_path.revision, "", 1)
+ f" (at revision {resolved_path.revision})"
)
formatted_tracked_gen_kwargs.append(str(gen_kwarg))
if formatted_tracked_gen_kwargs:
explanation_message += f"\n\nThis happened while the {builder_name} dataset builder was generating data using\n\n{', '.join(formatted_tracked_gen_kwargs)}"
help_message = "\n\nPlease either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)"
return cls("An error occurred while generating the dataset" + explanation_message + help_message) | class_definition | 1,142 | 3,233 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py | null | 76 |
class ChecksumVerificationError(DatasetsError):
"""Error raised during checksums verifications of downloaded files.""" | class_definition | 3,236 | 3,358 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py | null | 77 |
class UnexpectedDownloadedFileError(ChecksumVerificationError):
"""Some downloaded files were not expected.""" | class_definition | 3,361 | 3,475 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py | null | 78 |
class ExpectedMoreDownloadedFilesError(ChecksumVerificationError):
"""Some files were supposed to be downloaded but were not.""" | class_definition | 3,478 | 3,610 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py | null | 79 |
class NonMatchingChecksumError(ChecksumVerificationError):
"""The downloaded file checksum don't match the expected checksum.""" | class_definition | 3,613 | 3,745 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py | null | 80 |
class SplitsVerificationError(DatasetsError):
"""Error raised during splits verifications.""" | class_definition | 3,748 | 3,845 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py | null | 81 |
class UnexpectedSplitsError(SplitsVerificationError):
"""The expected splits of the downloaded file is missing.""" | class_definition | 3,848 | 3,966 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py | null | 82 |
class ExpectedMoreSplitsError(SplitsVerificationError):
"""Some recorded splits are missing.""" | class_definition | 3,969 | 4,068 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py | null | 83 |
class NonMatchingSplitsSizesError(SplitsVerificationError):
"""The splits sizes don't match the expected splits sizes.""" | class_definition | 4,071 | 4,196 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py | null | 84 |
class SplitInfo:
name: str = dataclasses.field(default="", metadata={"include_in_asdict_even_if_is_default": True})
num_bytes: int = dataclasses.field(default=0, metadata={"include_in_asdict_even_if_is_default": True})
num_examples: int = dataclasses.field(default=0, metadata={"include_in_asdict_even_if_is_default": True})
shard_lengths: Optional[List[int]] = None
# Deprecated
# For backward compatibility, this field needs to always be included in files like
# dataset_infos.json and dataset_info.json files
# To do so, we always include it in the output of datasets.utils.py_utils.asdict(split_info)
dataset_name: Optional[str] = dataclasses.field(
default=None, metadata={"include_in_asdict_even_if_is_default": True}
)
@property
def file_instructions(self):
"""Returns the list of dict(filename, take, skip)."""
# `self.dataset_name` is assigned in `SplitDict.add()`.
instructions = make_file_instructions(
name=self.dataset_name,
split_infos=[self],
instruction=str(self.name),
)
return instructions.file_instructions | class_definition | 994 | 2,153 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py | null | 85 |
class SubSplitInfo:
"""Wrapper around a sub split info.
This class expose info on the subsplit:
```
ds, info = datasets.load_dataset(..., split='train[75%:]', with_info=True)
info.splits['train[75%:]'].num_examples
```
"""
instructions: FileInstructions
@property
def num_examples(self):
"""Returns the number of example in the subsplit."""
return self.instructions.num_examples
@property
def file_instructions(self):
"""Returns the list of dict(filename, take, skip)."""
return self.instructions.file_instructions | class_definition | 2,167 | 2,764 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py | null | 86 |
class SplitBase(metaclass=abc.ABCMeta):
# pylint: disable=line-too-long
"""Abstract base class for Split compositionality.
See the
[guide on splits](../loading#slice-splits)
for more information.
There are three parts to the composition:
1) The splits are composed (defined, merged, split,...) together before
calling the `.as_dataset()` function. This is done with the `__add__`,
`__getitem__`, which return a tree of `SplitBase` (whose leaf
are the `NamedSplit` objects)
```
split = datasets.Split.TRAIN + datasets.Split.TEST.subsplit(datasets.percent[:50])
```
2) The `SplitBase` is forwarded to the `.as_dataset()` function
to be resolved into actual read instruction. This is done by the
`.get_read_instruction()` method which takes the real dataset splits
(name, number of shards,...) and parse the tree to return a
`SplitReadInstruction()` object
```
read_instruction = split.get_read_instruction(self.info.splits)
```
3) The `SplitReadInstruction` is then used in the `tf.data.Dataset` pipeline
to define which files to read and how to skip examples within file.
"""
# pylint: enable=line-too-long
@abc.abstractmethod
def get_read_instruction(self, split_dict):
"""Parse the descriptor tree and compile all read instructions together.
Args:
split_dict: `dict`, The `dict[split_name, SplitInfo]` of the dataset
Returns:
split_read_instruction: `SplitReadInstruction`
"""
raise NotImplementedError("Abstract method")
def __eq__(self, other):
"""Equality: datasets.Split.TRAIN == 'train'."""
if isinstance(other, (NamedSplit, str)):
return False
raise NotImplementedError("Equality is not implemented between merged/sub splits.")
def __ne__(self, other):
"""InEquality: datasets.Split.TRAIN != 'test'."""
return not self.__eq__(other)
def __add__(self, other):
"""Merging: datasets.Split.TRAIN + datasets.Split.TEST."""
return _SplitMerged(self, other)
def subsplit(self, arg=None, k=None, percent=None, weighted=None): # pylint: disable=redefined-outer-name
"""Divides this split into subsplits.
There are 3 ways to define subsplits, which correspond to the 3
arguments `k` (get `k` even subsplits), `percent` (get a slice of the
dataset with `datasets.percent`), and `weighted` (get subsplits with proportions
specified by `weighted`).
Example::
```
# 50% train, 50% test
train, test = split.subsplit(k=2)
# 50% train, 25% test, 25% validation
train, test, validation = split.subsplit(weighted=[2, 1, 1])
# Extract last 20%
subsplit = split.subsplit(datasets.percent[-20:])
```
Warning: k and weighted will be converted into percent which mean that
values below the percent will be rounded up or down. The final split may be
bigger to deal with remainders. For instance:
```
train, test, valid = split.subsplit(k=3) # 33%, 33%, 34%
s1, s2, s3, s4 = split.subsplit(weighted=[2, 2, 1, 1]) # 33%, 33%, 16%, 18%
```
Args:
arg: If no kwargs are given, `arg` will be interpreted as one of
`k`, `percent`, or `weighted` depending on the type.
For example:
```
split.subsplit(10) # Equivalent to split.subsplit(k=10)
split.subsplit(datasets.percent[:-20]) # percent=datasets.percent[:-20]
split.subsplit([1, 1, 2]) # weighted=[1, 1, 2]
```
k: `int` If set, subdivide the split into `k` equal parts.
percent: `datasets.percent slice`, return a single subsplit corresponding to
a slice of the original split. For example:
`split.subsplit(datasets.percent[-20:]) # Last 20% of the dataset`.
weighted: `list[int]`, return a list of subsplits whose proportions match
the normalized sum of the list. For example:
`split.subsplit(weighted=[1, 1, 2]) # 25%, 25%, 50%`.
Returns:
A subsplit or list of subsplits extracted from this split object.
"""
# Note that the percent kwargs redefine the outer name datasets.percent. This
# is done for consistency (.subsplit(percent=datasets.percent[:40]))
if sum(bool(x) for x in (arg, k, percent, weighted)) != 1:
raise ValueError("Only one argument of subsplit should be set.")
# Auto deduce k
if isinstance(arg, int):
k = arg
elif isinstance(arg, slice):
percent = arg
elif isinstance(arg, list):
weighted = arg
if not (k or percent or weighted):
raise ValueError(
f"Invalid split argument {arg}. Only list, slice and int supported. "
"One of k, weighted or percent should be set to a non empty value."
)
def assert_slices_coverage(slices):
# Ensure that the expended slices cover all percents.
assert sum((list(range(*s.indices(100))) for s in slices), []) == list(range(100))
if k:
if not 0 < k <= 100:
raise ValueError(f"Subsplit k should be between 0 and 100, got {k}")
shift = 100 // k
slices = [slice(i * shift, (i + 1) * shift) for i in range(k)]
# Round up last element to ensure all elements are taken
slices[-1] = slice(slices[-1].start, 100)
# Internal check to ensure full coverage
assert_slices_coverage(slices)
return tuple(_SubSplit(self, s) for s in slices)
elif percent:
return _SubSplit(self, percent)
elif weighted:
# Normalize the weighted sum
total = sum(weighted)
weighted = [100 * x // total for x in weighted]
# Create the slice for each of the elements
start = 0
stop = 0
slices = []
for v in weighted:
stop += v
slices.append(slice(start, stop))
start = stop
# Round up last element to ensure all elements are taken
slices[-1] = slice(slices[-1].start, 100)
# Internal check to ensure full coverage
assert_slices_coverage(slices)
return tuple(_SubSplit(self, s) for s in slices)
else:
# Should not be possible
raise ValueError("Could not determine the split") | class_definition | 2,767 | 9,595 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py | null | 87 |
class PercentSliceMeta(type):
def __getitem__(cls, slice_value):
if not isinstance(slice_value, slice):
raise ValueError(f"datasets.percent should only be called with slice, not {slice_value}")
return slice_value | class_definition | 9,819 | 10,063 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py | null | 88 |
class PercentSlice(metaclass=PercentSliceMeta):
# pylint: disable=line-too-long
"""Syntactic sugar for defining slice subsplits: `datasets.percent[75:-5]`.
See the
[guide on splits](../loading#slice-splits)
for more information.
"""
# pylint: enable=line-too-long
pass | class_definition | 10,066 | 10,368 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py | null | 89 |
class _SplitMerged(SplitBase):
"""Represent two split descriptors merged together."""
def __init__(self, split1, split2):
self._split1 = split1
self._split2 = split2
def get_read_instruction(self, split_dict):
read_instruction1 = self._split1.get_read_instruction(split_dict)
read_instruction2 = self._split2.get_read_instruction(split_dict)
return read_instruction1 + read_instruction2
def __repr__(self):
return f"({repr(self._split1)} + {repr(self._split2)})" | class_definition | 10,428 | 10,957 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py | null | 90 |
class _SubSplit(SplitBase):
"""Represent a sub split of a split descriptor."""
def __init__(self, split, slice_value):
self._split = split
self._slice_value = slice_value
def get_read_instruction(self, split_dict):
return self._split.get_read_instruction(split_dict)[self._slice_value]
def __repr__(self):
slice_str = "{start}:{stop}"
if self._slice_value.step is not None:
slice_str += ":{step}"
slice_str = slice_str.format(
start="" if self._slice_value.start is None else self._slice_value.start,
stop="" if self._slice_value.stop is None else self._slice_value.stop,
step=self._slice_value.step,
)
return f"{repr(self._split)}(datasets.percent[{slice_str}])" | class_definition | 10,960 | 11,754 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py | null | 91 |
class NamedSplit(SplitBase):
"""Descriptor corresponding to a named split (train, test, ...).
Example:
Each descriptor can be composed with other using addition or slice:
```py
split = datasets.Split.TRAIN.subsplit(datasets.percent[0:25]) + datasets.Split.TEST
```
The resulting split will correspond to 25% of the train split merged with
100% of the test split.
A split cannot be added twice, so the following will fail:
```py
split = (
datasets.Split.TRAIN.subsplit(datasets.percent[:25]) +
datasets.Split.TRAIN.subsplit(datasets.percent[75:])
) # Error
split = datasets.Split.TEST + datasets.Split.ALL # Error
```
The slices can be applied only one time. So the following are valid:
```py
split = (
datasets.Split.TRAIN.subsplit(datasets.percent[:25]) +
datasets.Split.TEST.subsplit(datasets.percent[:50])
)
split = (datasets.Split.TRAIN + datasets.Split.TEST).subsplit(datasets.percent[:50])
```
But this is not valid:
```py
train = datasets.Split.TRAIN
test = datasets.Split.TEST
split = train.subsplit(datasets.percent[:25]).subsplit(datasets.percent[:25])
split = (train.subsplit(datasets.percent[:25]) + test).subsplit(datasets.percent[:50])
```
"""
def __init__(self, name):
self._name = name
split_names_from_instruction = [split_instruction.split("[")[0] for split_instruction in name.split("+")]
for split_name in split_names_from_instruction:
if not re.match(_split_re, split_name):
raise ValueError(f"Split name should match '{_split_re}' but got '{split_name}'.")
def __str__(self):
return self._name
def __repr__(self):
return f"NamedSplit({self._name!r})"
def __eq__(self, other):
"""Equality: datasets.Split.TRAIN == 'train'."""
if isinstance(other, NamedSplit):
return self._name == other._name # pylint: disable=protected-access
elif isinstance(other, SplitBase):
return False
elif isinstance(other, str): # Other should be string
return self._name == other
else:
return False
def __lt__(self, other):
return self._name < other._name # pylint: disable=protected-access
def __hash__(self):
return hash(self._name)
def get_read_instruction(self, split_dict):
return SplitReadInstruction(split_dict[self._name]) | class_definition | 11,757 | 14,482 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py | null | 92 |
class NamedSplitAll(NamedSplit):
"""Split corresponding to the union of all defined dataset splits."""
def __init__(self):
super().__init__("all")
def __repr__(self):
return "NamedSplitAll()"
def get_read_instruction(self, split_dict):
# Merge all dataset split together
read_instructions = [SplitReadInstruction(s) for s in split_dict.values()]
return sum(read_instructions, SplitReadInstruction()) | class_definition | 14,485 | 14,943 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py | null | 93 |
class Split:
# pylint: disable=line-too-long
"""`Enum` for dataset splits.
Datasets are typically split into different subsets to be used at various
stages of training and evaluation.
- `TRAIN`: the training data.
- `VALIDATION`: the validation data. If present, this is typically used as
evaluation data while iterating on a model (e.g. changing hyperparameters,
model architecture, etc.).
- `TEST`: the testing data. This is the data to report metrics on. Typically
you do not want to use this during model iteration as you may overfit to it.
- `ALL`: the union of all defined dataset splits.
All splits, including compositions inherit from `datasets.SplitBase`.
See the [guide](../load_hub#splits) on splits for more information.
Example:
```py
>>> datasets.SplitGenerator(
... name=datasets.Split.TRAIN,
... gen_kwargs={"split_key": "train", "files": dl_manager.download_and extract(url)},
... ),
... datasets.SplitGenerator(
... name=datasets.Split.VALIDATION,
... gen_kwargs={"split_key": "validation", "files": dl_manager.download_and extract(url)},
... ),
... datasets.SplitGenerator(
... name=datasets.Split.TEST,
... gen_kwargs={"split_key": "test", "files": dl_manager.download_and extract(url)},
... )
```
"""
# pylint: enable=line-too-long
TRAIN = NamedSplit("train")
TEST = NamedSplit("test")
VALIDATION = NamedSplit("validation")
ALL = NamedSplitAll()
def __new__(cls, name):
"""Create a custom split with datasets.Split('custom_name')."""
return NamedSplitAll() if name == "all" else NamedSplit(name) | class_definition | 14,946 | 16,656 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py | null | 94 |
class SplitReadInstruction:
"""Object containing the reading instruction for the dataset.
Similarly to `SplitDescriptor` nodes, this object can be composed with itself,
but the resolution happens instantaneously, instead of keeping track of the
tree, such as all instructions are compiled and flattened in a single
SplitReadInstruction object containing the list of files and slice to use.
Once resolved, the instructions can be accessed with:
```
read_instructions.get_list_sliced_split_info() # List of splits to use
```
"""
def __init__(self, split_info=None):
self._splits = NonMutableDict(error_msg="Overlap between splits. Split {key} has been added with itself.")
if split_info:
self.add(SlicedSplitInfo(split_info=split_info, slice_value=None))
def add(self, sliced_split):
"""Add a SlicedSplitInfo the read instructions."""
# TODO(epot): Check that the number of examples per shard % 100 == 0
# Otherwise the slices value may be unbalanced and not exactly reflect the
# requested slice.
self._splits[sliced_split.split_info.name] = sliced_split
def __add__(self, other):
"""Merging split together."""
# Will raise error if a split has already be added (NonMutableDict)
# TODO(epot): If a split is already added but there is no overlap between
# the slices, should merge the slices (ex: [:10] + [80:])
split_instruction = SplitReadInstruction()
split_instruction._splits.update(self._splits) # pylint: disable=protected-access
split_instruction._splits.update(other._splits) # pylint: disable=protected-access
return split_instruction
def __getitem__(self, slice_value):
"""Sub-splits."""
# Will raise an error if a split has already been sliced
split_instruction = SplitReadInstruction()
for v in self._splits.values():
if v.slice_value is not None:
raise ValueError(f"Trying to slice Split {v.split_info.name} which has already been sliced")
v = v._asdict()
v["slice_value"] = slice_value
split_instruction.add(SlicedSplitInfo(**v))
return split_instruction
def get_list_sliced_split_info(self):
return list(self._splits.values()) | class_definition | 16,861 | 19,218 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py | null | 95 |
class SplitDict(dict):
"""Split info object."""
def __init__(self, *args, dataset_name=None, **kwargs):
super().__init__(*args, **kwargs)
self.dataset_name = dataset_name
def __getitem__(self, key: Union[SplitBase, str]):
# 1st case: The key exists: `info.splits['train']`
if str(key) in self:
return super().__getitem__(str(key))
# 2nd case: Uses instructions: `info.splits['train[50%]']`
else:
instructions = make_file_instructions(
name=self.dataset_name,
split_infos=self.values(),
instruction=key,
)
return SubSplitInfo(instructions)
def __setitem__(self, key: Union[SplitBase, str], value: SplitInfo):
if key != value.name:
raise ValueError(f"Cannot add elem. (key mismatch: '{key}' != '{value.name}')")
super().__setitem__(key, value)
def add(self, split_info: SplitInfo):
"""Add the split info."""
if split_info.name in self:
raise ValueError(f"Split {split_info.name} already present")
split_info.dataset_name = self.dataset_name
super().__setitem__(split_info.name, split_info)
@property
def total_num_examples(self):
"""Return the total number of examples."""
return sum(s.num_examples for s in self.values())
@classmethod
def from_split_dict(cls, split_infos: Union[List, Dict], dataset_name: Optional[str] = None):
"""Returns a new SplitDict initialized from a Dict or List of `split_infos`."""
if isinstance(split_infos, dict):
split_infos = list(split_infos.values())
if dataset_name is None:
dataset_name = split_infos[0].get("dataset_name") if split_infos else None
split_dict = cls(dataset_name=dataset_name)
for split_info in split_infos:
if isinstance(split_info, dict):
split_info = SplitInfo(**split_info)
split_dict.add(split_info)
return split_dict
def to_split_dict(self):
"""Returns a list of SplitInfo protos that we have."""
out = []
for split_name, split_info in self.items():
split_info = copy.deepcopy(split_info)
split_info.name = split_name
out.append(split_info)
return out
def copy(self):
return SplitDict.from_split_dict(self.to_split_dict(), self.dataset_name)
def _to_yaml_list(self) -> list:
out = [asdict(s) for s in self.to_split_dict()]
# we don't need the shard lengths in YAML, since it depends on max_shard_size and num_proc
for split_info_dict in out:
split_info_dict.pop("shard_lengths", None)
# we don't need the dataset_name attribute that is deprecated
for split_info_dict in out:
split_info_dict.pop("dataset_name", None)
return out
@classmethod
def _from_yaml_list(cls, yaml_data: list) -> "SplitDict":
return cls.from_split_dict(yaml_data) | class_definition | 19,221 | 22,283 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py | null | 96 |
class SplitGenerator:
"""Defines the split information for the generator.
This should be used as returned value of
`GeneratorBasedBuilder._split_generators`.
See `GeneratorBasedBuilder._split_generators` for more info and example
of usage.
Args:
name (`str`):
Name of the `Split` for which the generator will
create the examples.
**gen_kwargs (additional keyword arguments):
Keyword arguments to forward to the `DatasetBuilder._generate_examples` method
of the builder.
Example:
```py
>>> datasets.SplitGenerator(
... name=datasets.Split.TRAIN,
... gen_kwargs={"split_key": "train", "files": dl_manager.download_and_extract(url)},
... )
```
"""
name: str
gen_kwargs: Dict = dataclasses.field(default_factory=dict)
split_info: SplitInfo = dataclasses.field(init=False)
def __post_init__(self):
self.name = str(self.name) # Make sure we convert NamedSplits in strings
NamedSplit(self.name) # check that it's a valid split name
self.split_info = SplitInfo(name=self.name) | class_definition | 22,297 | 23,441 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py | null | 97 |
class IndexedTableMixin:
def __init__(self, table: pa.Table):
self._schema: pa.Schema = table.schema
self._batches: List[pa.RecordBatch] = [
recordbatch for recordbatch in table.to_batches() if len(recordbatch) > 0
]
self._offsets: np.ndarray = np.cumsum([0] + [len(b) for b in self._batches], dtype=np.int64)
def fast_gather(self, indices: Union[List[int], np.ndarray]) -> pa.Table:
"""
Create a pa.Table by gathering the records at the records at the specified indices. Should be faster
than pa.concat_tables(table.fast_slice(int(i) % table.num_rows, 1) for i in indices) since NumPy can compute
the binary searches in parallel, highly optimized C
"""
if not len(indices):
raise ValueError("Indices must be non-empty")
batch_indices = np.searchsorted(self._offsets, indices, side="right") - 1
return pa.Table.from_batches(
[
self._batches[batch_idx].slice(i - self._offsets[batch_idx], 1)
for batch_idx, i in zip(batch_indices, indices)
],
schema=self._schema,
)
def fast_slice(self, offset=0, length=None) -> pa.Table:
"""
Slice the Table using interpolation search.
The behavior is the same as `pyarrow.Table.slice` but it's significantly faster.
Interpolation search is used to find the start and end indexes of the batches we want to keep.
The batches to keep are then concatenated to form the sliced Table.
"""
if offset < 0:
raise IndexError("Offset must be non-negative")
elif offset >= self._offsets[-1] or (length is not None and length <= 0):
return pa.Table.from_batches([], schema=self._schema)
i = _interpolation_search(self._offsets, offset)
if length is None or length + offset >= self._offsets[-1]:
batches = self._batches[i:]
batches[0] = batches[0].slice(offset - self._offsets[i])
else:
j = _interpolation_search(self._offsets, offset + length - 1)
batches = self._batches[i : j + 1]
batches[-1] = batches[-1].slice(0, offset + length - self._offsets[j])
batches[0] = batches[0].slice(offset - self._offsets[i])
return pa.Table.from_batches(batches, schema=self._schema) | class_definition | 3,075 | 5,469 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/table.py | null | 98 |
class Table(IndexedTableMixin):
"""
Wraps a pyarrow Table by using composition.
This is the base class for `InMemoryTable`, `MemoryMappedTable` and `ConcatenationTable`.
It implements all the basic attributes/methods of the pyarrow Table class except
the Table transforms: `slice, filter, flatten, combine_chunks, cast, add_column,
append_column, remove_column, set_column, rename_columns` and `drop`.
The implementation of these methods differs for the subclasses.
"""
def __init__(self, table: pa.Table):
super().__init__(table)
self.table = table
def __deepcopy__(self, memo: dict):
# arrow tables are immutable, so there's no need to copy self.table
# moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason
# by adding it to the memo, self.table won't be copied
memo[id(self.table)] = self.table
# same for the recordbatches used by the index
memo[id(self._batches)] = list(self._batches)
return _deepcopy(self, memo)
def validate(self, *args, **kwargs):
"""
Perform validation checks. An exception is raised if validation fails.
By default only cheap validation checks are run. Pass `full=True`
for thorough validation checks (potentially `O(n)`).
Args:
full (`bool`, defaults to `False`):
If `True`, run expensive checks, otherwise cheap checks only.
Raises:
`pa.lib.ArrowInvalid`: if validation fails
"""
return self.table.validate(*args, **kwargs)
def equals(self, *args, **kwargs):
"""
Check if contents of two tables are equal.
Args:
other ([`~datasets.table.Table`]):
Table to compare against.
check_metadata `bool`, defaults to `False`):
Whether schema metadata equality should be checked as well.
Returns:
`bool`
"""
args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args)
kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs}
return self.table.equals(*args, **kwargs)
def to_batches(self, *args, **kwargs):
"""
Convert Table to list of (contiguous) `RecordBatch` objects.
Args:
max_chunksize (`int`, defaults to `None`):
Maximum size for `RecordBatch` chunks. Individual chunks may be
smaller depending on the chunk layout of individual columns.
Returns:
`List[pyarrow.RecordBatch]`
"""
return self.table.to_batches(*args, **kwargs)
def to_pydict(self, *args, **kwargs):
"""
Convert the Table to a `dict` or `OrderedDict`.
Returns:
`dict`
"""
return self.table.to_pydict(*args, **kwargs)
def to_pylist(self, *args, **kwargs):
"""
Convert the Table to a list
Returns:
`list`
"""
return self.table.to_pylist(*args, **kwargs)
def to_pandas(self, *args, **kwargs):
"""
Convert to a pandas-compatible NumPy array or DataFrame, as appropriate.
Args:
memory_pool (`MemoryPool`, defaults to `None`):
Arrow MemoryPool to use for allocations. Uses the default memory
pool is not passed.
strings_to_categorical (`bool`, defaults to `False`):
Encode string (UTF8) and binary types to `pandas.Categorical`.
categories (`list`, defaults to `empty`):
List of fields that should be returned as `pandas.Categorical`. Only
applies to table-like data structures.
zero_copy_only (`bool`, defaults to `False`):
Raise an `ArrowException` if this function call would require copying
the underlying data.
integer_object_nulls (`bool`, defaults to `False`):
Cast integers with nulls to objects.
date_as_object (`bool`, defaults to `True`):
Cast dates to objects. If `False`, convert to `datetime64[ns]` dtype.
timestamp_as_object (`bool`, defaults to `False`):
Cast non-nanosecond timestamps (`np.datetime64`) to objects. This is
useful if you have timestamps that don't fit in the normal date
range of nanosecond timestamps (1678 CE-2262 CE).
If `False`, all timestamps are converted to `datetime64[ns]` dtype.
use_threads (`bool`, defaults to `True`):
Whether to parallelize the conversion using multiple threads.
deduplicate_objects (`bool`, defaults to `False`):
Do not create multiple copies Python objects when created, to save
on memory use. Conversion will be slower.
ignore_metadata (`bool`, defaults to `False`):
If `True`, do not use the 'pandas' metadata to reconstruct the
DataFrame index, if present.
safe (`bool`, defaults to `True`):
For certain data types, a cast is needed in order to store the
data in a pandas DataFrame or Series (e.g. timestamps are always
stored as nanoseconds in pandas). This option controls whether it
is a safe cast or not.
split_blocks (`bool`, defaults to `False`):
If `True`, generate one internal "block" for each column when
creating a pandas.DataFrame from a `RecordBatch` or `Table`. While this
can temporarily reduce memory note that various pandas operations
can trigger "consolidation" which may balloon memory use.
self_destruct (`bool`, defaults to `False`):
EXPERIMENTAL: If `True`, attempt to deallocate the originating Arrow
memory while converting the Arrow object to pandas. If you use the
object after calling `to_pandas` with this option it will crash your
program.
types_mapper (`function`, defaults to `None`):
A function mapping a pyarrow DataType to a pandas `ExtensionDtype`.
This can be used to override the default pandas type for conversion
of built-in pyarrow types or in absence of `pandas_metadata` in the
Table schema. The function receives a pyarrow DataType and is
expected to return a pandas `ExtensionDtype` or `None` if the
default conversion should be used for that type. If you have
a dictionary mapping, you can pass `dict.get` as function.
Returns:
`pandas.Series` or `pandas.DataFrame`: `pandas.Series` or `pandas.DataFrame` depending on type of object
"""
return self.table.to_pandas(*args, **kwargs)
def to_string(self, *args, **kwargs):
return self.table.to_string(*args, **kwargs)
def to_reader(self, max_chunksize: Optional[int] = None):
"""
Convert the Table to a RecordBatchReader.
Note that this method is zero-copy, it merely exposes the same data under a different API.
Args:
max_chunksize (`int`, defaults to `None`)
Maximum size for RecordBatch chunks. Individual chunks may be smaller depending
on the chunk layout of individual columns.
Returns:
`pyarrow.RecordBatchReader`
"""
return self.table.to_reader(max_chunksize=max_chunksize)
def field(self, *args, **kwargs):
"""
Select a schema field by its column name or numeric index.
Args:
i (`Union[int, str]`):
The index or name of the field to retrieve.
Returns:
`pyarrow.Field`
"""
return self.table.field(*args, **kwargs)
def column(self, *args, **kwargs):
"""
Select a column by its column name, or numeric index.
Args:
i (`Union[int, str]`):
The index or name of the column to retrieve.
Returns:
`pyarrow.ChunkedArray`
"""
return self.table.column(*args, **kwargs)
def itercolumns(self, *args, **kwargs):
"""
Iterator over all columns in their numerical order.
Yields:
`pyarrow.ChunkedArray`
"""
return self.table.itercolumns(*args, **kwargs)
@property
def schema(self):
"""
Schema of the table and its columns.
Returns:
`pyarrow.Schema`
"""
return self.table.schema
@property
def columns(self):
"""
List of all columns in numerical order.
Returns:
`List[pa.ChunkedArray]`
"""
return self.table.columns
@property
def num_columns(self):
"""
Number of columns in this table.
Returns:
int
"""
return self.table.num_columns
@property
def num_rows(self):
"""
Number of rows in this table.
Due to the definition of a table, all columns have the same number of
rows.
Returns:
int
"""
return self.table.num_rows
@property
def shape(self):
"""
Dimensions of the table: (#rows, #columns).
Returns:
`(int, int)`: Number of rows and number of columns.
"""
return self.table.shape
@property
def nbytes(self):
"""
Total number of bytes consumed by the elements of the table.
"""
return self.table.nbytes
@property
def column_names(self):
"""
Names of the table's columns.
"""
return self.table.column_names
def __eq__(self, other):
return self.equals(other)
def __getitem__(self, i):
return self.table[i]
def __len__(self):
return len(self.table)
def __repr__(self):
return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__)
def __str__(self):
return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__)
def slice(self, *args, **kwargs):
"""
Compute zero-copy slice of this Table.
Args:
offset (`int`, defaults to `0`):
Offset from start of table to slice.
length (`int`, defaults to `None`):
Length of slice (default is until end of table starting from
offset).
Returns:
`datasets.table.Table`
"""
raise NotImplementedError()
def filter(self, *args, **kwargs):
"""
Select records from a Table. See `pyarrow.compute.filter` for full usage.
"""
raise NotImplementedError()
def flatten(self, *args, **kwargs):
"""
Flatten this Table. Each column with a struct type is flattened
into one column per struct field. Other columns are left unchanged.
Args:
memory_pool (`MemoryPool`, defaults to `None`):
For memory allocations, if required, otherwise use default pool.
Returns:
`datasets.table.Table`
"""
raise NotImplementedError()
def combine_chunks(self, *args, **kwargs):
"""
Make a new table by combining the chunks this table has.
All the underlying chunks in the `ChunkedArray` of each column are
concatenated into zero or one chunk.
Args:
memory_pool (`MemoryPool`, defaults to `None`):
For memory allocations, if required, otherwise use default pool.
Returns:
`datasets.table.Table`
"""
raise NotImplementedError()
def cast(self, *args, **kwargs):
"""
Cast table values to another schema.
Args:
target_schema (`Schema`):
Schema to cast to, the names and order of fields must match.
safe (`bool`, defaults to `True`):
Check for overflows or other unsafe conversions.
Returns:
`datasets.table.Table`
"""
raise NotImplementedError()
def replace_schema_metadata(self, *args, **kwargs):
"""
EXPERIMENTAL: Create shallow copy of table by replacing schema
key-value metadata with the indicated new metadata (which may be None,
which deletes any existing metadata
Args:
metadata (`dict`, defaults to `None`):
Returns:
`datasets.table.Table`: shallow_copy
"""
raise NotImplementedError()
def add_column(self, *args, **kwargs):
"""
Add column to Table at position.
A new table is returned with the column added, the original table
object is left unchanged.
Args:
i (`int`):
Index to place the column at.
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`: New table with the passed column added.
"""
raise NotImplementedError()
def append_column(self, *args, **kwargs):
"""
Append column at end of columns.
Args:
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`: New table with the passed column added.
"""
raise NotImplementedError()
def remove_column(self, *args, **kwargs):
"""
Create new Table with the indicated column removed.
Args:
i (`int`):
Index of column to remove.
Returns:
`datasets.table.Table`: New table without the column.
"""
raise NotImplementedError()
def set_column(self, *args, **kwargs):
"""
Replace column in Table at position.
Args:
i (`int`):
Index to place the column at.
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`: New table with the passed column set.
"""
raise NotImplementedError()
def rename_columns(self, *args, **kwargs):
"""
Create new table with columns renamed to provided names.
"""
raise NotImplementedError()
def drop(self, *args, **kwargs):
"""
Drop one or more columns and return a new table.
Args:
columns (`List[str]`):
List of field names referencing existing columns.
Raises:
`KeyError` : if any of the passed columns name are not existing.
Returns:
`datasets.table.Table`: New table without the columns.
"""
raise NotImplementedError()
def select(self, *args, **kwargs):
"""
Select columns of the table.
Returns a new table with the specified columns, and metadata preserved.
Args:
columns (:obj:`Union[List[str], List[int]]`):
The column names or integer indices to select.
Returns:
`datasets.table.Table`: table with only a subset of the columns
"""
raise NotImplementedError() | class_definition | 5,472 | 21,458 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/table.py | null | 99 |