text
stringlengths 1
1.02k
| class_index
int64 0
271
| source
stringclasses 76
values |
---|---|---|
num_examples_progress_update = 0
# If `update_data` is True after processing the first example/batch, initalize these resources with `init_buffer_and_writer`
buf_writer, writer, tmp_file = None, None, None
# Check if Polars is available and import it if so
if config.POLARS_AVAILABLE and "polars" in sys.modules:
import polars as pl
# Optionally initialize the writer as a context manager
with contextlib.ExitStack() as stack:
try:
arrow_formatted_shard = shard.with_format("arrow") | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
# Loop over single examples or batches and write to buffer/file if examples are to be updated
if not batched:
shard_iterable = enumerate(arrow_formatted_shard)
else:
num_rows = len(shard) if not drop_last_batch else len(shard) // batch_size * batch_size
shard_iterable = zip(
range(0, num_rows, batch_size),
arrow_formatted_shard.iter(batch_size, drop_last_batch=drop_last_batch),
)
if not batched:
_time = time.time()
for i, example in shard_iterable:
example = apply_function_on_filtered_inputs(example, i, offset=offset)
if update_data:
if i == 0:
buf_writer, writer, tmp_file = init_buffer_and_writer()
stack.enter_context(writer) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
if isinstance(example, pa.Table):
writer.write_row(example)
elif isinstance(example, pd.DataFrame):
writer.write_row(pa.Table.from_pandas(example))
elif (
config.POLARS_AVAILABLE
and "polars" in sys.modules
and isinstance(example, pl.DataFrame)
):
writer.write_row(example.to_arrow())
else:
writer.write(example)
num_examples_progress_update += 1
if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
_time = time.time()
yield rank, False, num_examples_progress_update
num_examples_progress_update = 0 | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
else:
_time = time.time()
for i, batch in shard_iterable:
num_examples_in_batch = len(batch)
indices = list(
range(*(slice(i, i + batch_size).indices(shard.num_rows)))
) # Something simpler?
try:
batch = apply_function_on_filtered_inputs(
batch,
indices,
check_same_num_examples=len(shard.list_indexes()) > 0,
offset=offset,
)
except NumExamplesMismatchError:
raise DatasetTransformationNotAllowedError( | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
"Using `.map` in batched mode on a dataset with attached indexes is allowed only if it doesn't create or remove existing examples. You can first run `.drop_index() to remove your index and then re-add it."
) from None
if update_data:
if i == 0:
buf_writer, writer, tmp_file = init_buffer_and_writer()
stack.enter_context(writer)
if isinstance(batch, pa.Table):
writer.write_table(batch)
elif isinstance(batch, pd.DataFrame):
writer.write_table(pa.Table.from_pandas(batch))
elif (
config.POLARS_AVAILABLE and "polars" in sys.modules and isinstance(batch, pl.DataFrame)
):
writer.write_table(batch.to_arrow()) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
else:
writer.write_batch(batch)
num_examples_progress_update += num_examples_in_batch
if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
_time = time.time()
yield rank, False, num_examples_progress_update
num_examples_progress_update = 0
if update_data and writer is not None:
writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file
except (Exception, KeyboardInterrupt):
yield rank, False, num_examples_progress_update
if update_data:
if writer is not None:
writer.finalize()
if tmp_file is not None:
tmp_file.close()
if os.path.exists(tmp_file.name): | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
os.remove(tmp_file.name)
raise | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
yield rank, False, num_examples_progress_update
if update_data and tmp_file is not None:
tmp_file.close()
shutil.move(tmp_file.name, cache_file_name)
umask = os.umask(0o666)
os.umask(umask)
os.chmod(cache_file_name, 0o666 & ~umask)
if update_data:
# Create new Dataset from buffer or file
info = shard.info.copy()
info.features = writer._features
if buf_writer is None:
yield rank, True, Dataset.from_file(cache_file_name, info=info, split=shard.split)
else:
yield rank, True, Dataset.from_buffer(buf_writer.getvalue(), info=info, split=shard.split)
else:
yield rank, True, shard | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
@transmit_format
@fingerprint_transform(inplace=False)
def batch(
self,
batch_size: int,
drop_last_batch: bool = False,
num_proc: Optional[int] = None,
new_fingerprint: Optional[str] = None,
) -> "Dataset":
"""
Group samples from the dataset into batches.
Args:
batch_size (`int`):
The number of samples in each batch.
drop_last_batch (`bool`, defaults to `False`):
Whether to drop the last incomplete batch.
num_proc (`int`, *optional*, defaults to `None`):
Max number of processes when generating cache. Already cached shards are loaded sequentially.
new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Returns:
[`Dataset`]: A new Dataset where each item is a batch of multiple samples from the original dataset.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> batched_ds = ds.batch(batch_size=4)
>>> batched_ds[0]
{'text': ['compassionately explores the seemingly irreconcilable situation...', ...], # 4 items
'label': [1, 1, 1, 1]}
```
"""
def batch_fn(example):
return {k: [v] for k, v in example.items()}
return self.map(
batch_fn,
batched=True,
batch_size=batch_size,
drop_last_batch=drop_last_batch,
num_proc=num_proc,
new_fingerprint=new_fingerprint,
desc="Batching examples",
) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
@transmit_format
@fingerprint_transform(
inplace=False, ignore_kwargs=["load_from_cache_file", "cache_file_name", "desc"], version="2.0.1"
)
def filter(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
with_rank: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
batch_size: Optional[int] = 1000,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
fn_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
suffix_template: str = "_{rank:05d}_of_{num_proc:05d}",
new_fingerprint: Optional[str] = None,
desc: Optional[str] = None,
) -> "Dataset":
"""Apply a filter function to all the elements in the table in batches | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
and update the table so that the dataset only includes examples according to the filter function. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Args:
function (`Callable`): Callable with one of the following signatures:
- `function(example: Dict[str, Any]) -> bool` if `batched=False` and `with_indices=False` and `with_rank=False`
- `function(example: Dict[str, Any], *extra_args) -> bool` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
- `function(batch: Dict[str, List]) -> List[bool]` if `batched=True` and `with_indices=False` and `with_rank=False`
- `function(batch: Dict[str, List], *extra_args) -> List[bool]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
If no function is provided, defaults to an always `True` function: `lambda x: True`.
with_indices (`bool`, defaults to `False`):
Provide example indices to `function`. Note that in this case the
signature of `function` should be `def function(example, idx[, rank]): ...`.
with_rank (`bool`, defaults to `False`):
Provide process rank to `function`. Note that in this case the
signature of `function` should be `def function(example[, idx], rank): ...`.
input_columns (`str` or `List[str]`, *optional*):
The columns to be passed into `function` as
positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument.
batched (`bool`, defaults to `False`):
Provide batch of examples to `function`.
batch_size (`int`, *optional*, defaults to `1000`): | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Number of examples per batch provided to `function` if
`batched = True`. If `batched = False`, one example per batch is passed to `function`.
If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the current computation from `function`
can be identified, use it instead of recomputing.
cache_file_name (`str`, *optional*):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`): | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
fn_kwargs (`dict`, *optional*):
Keyword arguments to be passed to `function`.
num_proc (`int`, *optional*):
Number of processes for multiprocessing. By default it doesn't
use multiprocessing.
suffix_template (`str`):
If `cache_file_name` is specified, then this suffix will be added at the end of the base name of each.
For example, if `cache_file_name` is `"processed.arrow"`, then for `rank = 1` and `num_proc = 4`,
the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix (default
`_{rank:05d}_of_{num_proc:05d}`). | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
new_fingerprint (`str`, *optional*):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
desc (`str`, *optional*, defaults to `None`):
Meaningful description to be displayed alongside with the progress bar while filtering examples. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.filter(lambda x: x["label"] == 1)
Dataset({
features: ['text', 'label'],
num_rows: 533
})
```
"""
if len(self.list_indexes()) > 0:
raise DatasetTransformationNotAllowedError(
"Using `.filter` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.`"
)
if function is None:
function = lambda x: True # noqa: E731
if len(self) == 0:
return self | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
indices = self.map(
function=partial(
get_indices_from_mask_function,
function,
batched,
with_indices,
with_rank,
input_columns,
self._indices,
),
with_indices=True,
with_rank=True,
features=Features({"indices": Value("uint64")}),
batched=True,
batch_size=batch_size,
remove_columns=self.column_names,
keep_in_memory=keep_in_memory,
load_from_cache_file=load_from_cache_file,
cache_file_name=cache_file_name,
writer_batch_size=writer_batch_size,
fn_kwargs=fn_kwargs,
num_proc=num_proc,
suffix_template=suffix_template,
new_fingerprint=new_fingerprint,
input_columns=input_columns,
desc=desc or "Filter",
)
new_dataset = copy.deepcopy(self) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
new_dataset._indices = indices.data
new_dataset._fingerprint = new_fingerprint
return new_dataset | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
@transmit_format
@fingerprint_transform(inplace=False, ignore_kwargs=["cache_file_name"])
def flatten_indices(
self,
keep_in_memory: bool = False,
cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
features: Optional[Features] = None,
disable_nullable: bool = False,
num_proc: Optional[int] = None,
new_fingerprint: Optional[str] = None,
) -> "Dataset":
"""Create and cache a new Dataset by flattening the indices mapping. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Args:
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
cache_file_name (`str`, *optional*, default `None`):
Provide the name of a path for the cache file. It is used to store the
results of the computation instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
features (`Optional[datasets.Features]`, defaults to `None`):
Use a specific [`Features`] to store the cache file
instead of the automatically generated one. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
disable_nullable (`bool`, defaults to `False`):
Allow null values in the table.
num_proc (`int`, optional, default `None`):
Max number of processes when generating cache. Already cached shards are loaded sequentially
new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
""" | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
return self.map(
batched=True, # for speed
keep_in_memory=keep_in_memory,
cache_file_name=cache_file_name,
writer_batch_size=writer_batch_size,
features=features,
disable_nullable=disable_nullable,
new_fingerprint=new_fingerprint,
desc="Flattening the indices",
num_proc=num_proc,
)
def _new_dataset_with_indices(
self,
indices_cache_file_name: Optional[str] = None,
indices_buffer: Optional[pa.Buffer] = None,
fingerprint: Optional[str] = None,
) -> "Dataset":
"""Return a new Dataset obtained by adding indices (provided in indices_cache_file_name or in a buffer) to the
current Dataset.
"""
if indices_cache_file_name is None and indices_buffer is None:
raise ValueError("At least one of indices_cache_file_name or indices_buffer must be provided.") | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
if fingerprint is None:
raise ValueError("please specify a fingerprint for the dataset with indices")
if indices_cache_file_name is not None:
indices_table = MemoryMappedTable.from_file(indices_cache_file_name)
else:
indices_table = InMemoryTable.from_buffer(indices_buffer)
# Return new Dataset object
# don't forget to copy the objects
return Dataset(
self._data,
info=self.info.copy(),
split=self.split,
indices_table=indices_table,
fingerprint=fingerprint,
) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
@transmit_format
@fingerprint_transform(inplace=False, ignore_kwargs=["indices_cache_file_name"])
def select(
self,
indices: Iterable,
keep_in_memory: bool = False,
indices_cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
new_fingerprint: Optional[str] = None,
) -> "Dataset":
"""Create a new dataset with rows selected following the list/array of indices. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Args:
indices (`range`, `list`, `iterable`, `ndarray` or `Series`):
Range, list or 1D-array of integer indices for indexing.
If the indices correspond to a contiguous range, the Arrow table is simply sliced.
However passing a list of indices that are not contiguous creates indices mapping, which is much less efficient,
but still faster than recreating an Arrow table made of the requested rows.
keep_in_memory (`bool`, defaults to `False`):
Keep the indices mapping in memory instead of writing it to a cache file.
indices_cache_file_name (`str`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
indices mapping instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.select(range(4))
Dataset({
features: ['text', 'label'],
num_rows: 4
})
```
"""
if keep_in_memory and indices_cache_file_name is not None:
raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.")
if len(self.list_indexes()) > 0:
raise DatasetTransformationNotAllowedError(
"Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
)
# If the array is empty we do nothing
if len(self) == 0:
return self | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
# If indices is a PyArrow array, we convert to NumPy
if isinstance(indices, (pa.Array, pa.ChunkedArray)):
indices = indices.to_numpy().astype(np.int64)
# Convert generator objects to lists
if isinstance(indices, Iterator):
indices = list(indices) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
# If the indices are contiguous, simply slice the arrow table
if isinstance(indices, range):
if _is_range_contiguous(indices) and indices.start >= 0:
start, length = indices.start, indices.stop - indices.start
return self._select_contiguous(start, length, new_fingerprint=new_fingerprint)
else:
try:
start = next(iter(indices))
except StopIteration:
# if `indices` is an empty iterable, we return an empty dataset
return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint)
if start >= 0:
counter_from_start = itertools.count(start=start)
if all(i == j for i, j in zip(indices, counter_from_start)):
length = next(counter_from_start) - start
return self._select_contiguous(start, length, new_fingerprint=new_fingerprint) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
# If not contiguous, we need to create a new indices mapping
return self._select_with_indices_mapping(
indices,
keep_in_memory=keep_in_memory,
indices_cache_file_name=indices_cache_file_name,
writer_batch_size=writer_batch_size,
new_fingerprint=new_fingerprint,
)
@transmit_format
@fingerprint_transform(inplace=False)
def _select_contiguous(
self,
start: int,
length: int,
new_fingerprint: Optional[str] = None,
) -> "Dataset":
"""Create a new dataset with rows from a contiguous slice of data.
The slice is defined by that start index and its length. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Args:
start (`int`): start index.
length (`int`): length of the slice to select.
new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds._select_contiguous(0, 4)
Dataset({
features: ['text', 'label'],
num_rows: 4
})
```
"""
if len(self.list_indexes()) > 0:
raise DatasetTransformationNotAllowedError(
"Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
# If the array is empty we do nothing
if len(self) == 0:
return self
_check_valid_indices_value(start, len(self))
_check_valid_indices_value(start + length - 1, len(self))
if self._indices is None or length == 0:
return Dataset(
self.data.slice(start, length),
info=self.info.copy(),
split=self.split,
fingerprint=new_fingerprint,
)
else:
return Dataset(
self.data,
info=self.info.copy(),
split=self.split,
indices_table=self._indices.slice(start, length),
fingerprint=new_fingerprint,
) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
@transmit_format
@fingerprint_transform(inplace=False, ignore_kwargs=["indices_cache_file_name"])
def _select_with_indices_mapping(
self,
indices: Iterable,
keep_in_memory: bool = False,
indices_cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
new_fingerprint: Optional[str] = None,
) -> "Dataset":
"""Create a new dataset with rows selected following the list/array of indices.
The new dataset is made by creating a new indices mapping on top of the main arrow table. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Args:
indices (sequence, iterable, range, ndarray or Series): List or 1D-array of integer indices for indexing.
keep_in_memory (`bool`, default `False`): Keep the indices mapping in memory instead of writing it to a cache file.
indices_cache_file_name (`str`, optional, default `None`): Provide the name of a path for the cache file. It is used to store the
indices mapping instead of the automatically generated cache file name.
writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.
new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds._select_with_indices_mapping(range(4))
Dataset({
features: ['text', 'label'],
num_rows: 4
})
```
"""
if keep_in_memory and indices_cache_file_name is not None:
raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.")
if len(self.list_indexes()) > 0:
raise DatasetTransformationNotAllowedError(
"Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
)
# If the array is empty we do nothing
if len(self) == 0:
return self | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
# Prepare the writer for our indices arrow table
if keep_in_memory or indices_cache_file_name is None:
buf_writer = pa.BufferOutputStream()
tmp_file = None
writer = ArrowWriter(
stream=buf_writer, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices"
)
else:
buf_writer = None
logger.info(f"Caching indices mapping at {indices_cache_file_name}")
cache_dir = os.path.dirname(indices_cache_file_name)
os.makedirs(cache_dir, exist_ok=True)
tmp_file = tempfile.NamedTemporaryFile("wb", dir=cache_dir, delete=False)
writer = ArrowWriter(
path=tmp_file.name, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices"
)
indices = indices if isinstance(indices, list) else list(indices) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
size = len(self)
if indices:
_check_valid_indices_value(int(max(indices)), size=size)
_check_valid_indices_value(int(min(indices)), size=size)
else:
return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint)
indices_array = pa.array(indices, type=pa.uint64())
# Check if we need to convert indices
if self._indices is not None:
indices_array = self._indices.column(0).take(indices_array)
indices_table = pa.Table.from_arrays([indices_array], names=["indices"]) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
with writer:
try:
writer.write_table(indices_table)
writer.finalize() # close_stream=bool(buf_writer is None)) We only close if we are writing in a file
except (Exception, KeyboardInterrupt):
if tmp_file is not None:
tmp_file.close()
if os.path.exists(tmp_file.name):
os.remove(tmp_file.name)
raise
if tmp_file is not None:
tmp_file.close()
shutil.move(tmp_file.name, indices_cache_file_name)
umask = os.umask(0o666)
os.umask(umask)
os.chmod(indices_cache_file_name, 0o666 & ~umask) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
# Return new Dataset object
if buf_writer is None:
return self._new_dataset_with_indices(
indices_cache_file_name=indices_cache_file_name, fingerprint=new_fingerprint
)
else:
return self._new_dataset_with_indices(indices_buffer=buf_writer.getvalue(), fingerprint=new_fingerprint)
def skip(self, n: int) -> "Dataset":
"""
Create a new [`Dataset`] that skips the first `n` elements.
Args:
n (`int`):
Number of elements to skip.
Example: | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> list(ds.take(3))
[{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
{'label': 1, 'text': 'effective but too-tepid biopic'}]
>>> ds = ds.skip(1)
>>> list(ds.take(3))
[{'label': 1, | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
{'label': 1, 'text': 'effective but too-tepid biopic'},
{'label': 1,
'text': 'if you sometimes like to go to the movies to have fun , wasabi is a good place to start .'}]
```
"""
return self.select(range(n, len(self))) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
def take(self, n: int) -> "Dataset":
"""
Create a new [`Dataset`] with only the first `n` elements.
Args:
n (`int`):
Number of elements to take.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> small_ds = ds.take(2)
>>> list(small_ds)
[{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}]
```
"""
return self.select(range(n)) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
@transmit_format
@fingerprint_transform(inplace=False, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"])
def sort(
self,
column_names: Union[str, Sequence_[str]],
reverse: Union[bool, Sequence_[bool]] = False,
null_placement: str = "at_end",
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
indices_cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
new_fingerprint: Optional[str] = None,
) -> "Dataset":
"""Create a new dataset sorted according to a single or multiple columns. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Args:
column_names (`Union[str, Sequence[str]]`):
Column name(s) to sort by.
reverse (`Union[bool, Sequence[bool]]`, defaults to `False`):
If `True`, sort by descending order rather than ascending. If a single bool is provided,
the value is applied to the sorting of all column names. Otherwise a list of bools with the
same length and order as column_names must be provided.
null_placement (`str`, defaults to `at_end`):
Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last` | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
<Added version="1.14.2"/>
keep_in_memory (`bool`, defaults to `False`):
Keep the sorted indices in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the sorted indices
can be identified, use it instead of recomputing.
indices_cache_file_name (`str`, *optional*, defaults to `None`):
Provide the name of a path for the cache file. It is used to store the
sorted indices instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
Higher value gives smaller cache files, lower value consume less temporary memory.
new_fingerprint (`str`, *optional*, defaults to `None`): | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset('rotten_tomatoes', split='validation')
>>> ds['label'][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
>>> sorted_ds = ds.sort('label')
>>> sorted_ds['label'][:10]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
>>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False])
>>> another_sorted_ds['label'][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
```
"""
if len(self.list_indexes()) > 0:
raise DatasetTransformationNotAllowedError(
"Using `.sort` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
)
# If the array is empty we do nothing
if len(self) == 0:
return self | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
# Check proper format of and for duplicates in column_names
if isinstance(column_names, str):
column_names = [column_names]
# Check proper format and length of reverse
if not isinstance(reverse, bool):
if len(reverse) != len(column_names):
raise ValueError(
"Parameter 'reverse' should be either a boolean or a list of booleans with the same length as 'column_names'."
)
else:
reverse = [reverse] * len(column_names)
# Check whether column name(s) exist in dataset
for column in column_names:
if not isinstance(column, str) or column not in self._data.column_names:
raise ValueError(
f"Column '{column}' not found in the dataset. Please provide a column selected in: {self._data.column_names}"
) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
# Change null_placement to conform to pyarrow's sort_indices() while ensuring backwards compatability
if null_placement not in ["at_start", "at_end"]:
if null_placement == "first":
null_placement = "at_start"
elif null_placement == "last":
null_placement = "at_end"
else:
raise ValueError(
f"null_placement '{null_placement}' is an invalid parameter value. Must be either 'last', 'at_end', 'first' or 'at_start'."
)
load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
# Check if we've already cached this computation (indexed by a hash)
if self.cache_files:
if indices_cache_file_name is None:
# we create a unique hash from the function, current dataset file and the mapping args
indices_cache_file_name = self._get_cache_file_path(new_fingerprint)
if os.path.exists(indices_cache_file_name) and load_from_cache_file:
logger.info(f"Loading cached sorted indices for dataset at {indices_cache_file_name}")
return self._new_dataset_with_indices(
fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name
)
sort_table = query_table(
table=self._data,
key=slice(0, len(self)),
indices=self._indices,
)
sort_keys = [
(col, "ascending" if not col_reverse else "descending") for col, col_reverse in zip(column_names, reverse)
] | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
indices = pc.sort_indices(sort_table, sort_keys=sort_keys, null_placement=null_placement)
return self.select(
indices=indices,
keep_in_memory=keep_in_memory,
indices_cache_file_name=indices_cache_file_name,
writer_batch_size=writer_batch_size,
new_fingerprint=new_fingerprint,
)
@transmit_format
@fingerprint_transform(
inplace=False, randomized_function=True, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"]
)
def shuffle(
self,
seed: Optional[int] = None,
generator: Optional[np.random.Generator] = None,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
indices_cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
new_fingerprint: Optional[str] = None,
) -> "Dataset":
"""Create a new Dataset where the rows are shuffled. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Currently shuffling uses numpy random generators.
You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64).
Shuffling takes the list of indices `[0:len(my_dataset)]` and shuffles it to create an indices mapping.
However as soon as your [`Dataset`] has an indices mapping, the speed can become 10x slower.
This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore.
To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping.
This may take a lot of time depending of the size of your dataset though: | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
```python
my_dataset[0] # fast
my_dataset = my_dataset.shuffle(seed=42)
my_dataset[0] # up to 10x slower
my_dataset = my_dataset.flatten_indices() # rewrite the shuffled dataset on disk as contiguous chunks of data
my_dataset[0] # fast again
```
In this case, we recommend switching to an [`IterableDataset`] and leveraging its fast approximate shuffling method [`IterableDataset.shuffle`].
It only shuffles the shards order and adds a shuffle buffer to your dataset, which keeps the speed of your dataset optimal:
```python
my_iterable_dataset = my_dataset.to_iterable_dataset(num_shards=128)
for example in enumerate(my_iterable_dataset): # fast
pass
shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100)
for example in enumerate(shuffled_iterable_dataset): # as fast as before
pass
``` | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Args:
seed (`int`, *optional*):
A seed to initialize the default BitGenerator if `generator=None`.
If `None`, then fresh, unpredictable entropy will be pulled from the OS.
If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
generator (`numpy.random.Generator`, *optional*):
Numpy random Generator to use to compute the permutation of the dataset rows.
If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
keep_in_memory (`bool`, default `False`):
Keep the shuffled indices in memory instead of writing it to a cache file.
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the shuffled indices
can be identified, use it instead of recomputing. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
indices_cache_file_name (`str`, *optional*):
Provide the name of a path for the cache file. It is used to store the
shuffled indices instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed.
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the dataset after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds['label'][:10]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# set a seed
>>> shuffled_ds = ds.shuffle(seed=42)
>>> shuffled_ds['label'][:10]
[1, 0, 1, 1, 0, 0, 0, 0, 0, 0]
```
"""
if len(self.list_indexes()) > 0:
raise DatasetTransformationNotAllowedError(
"Using `.shuffle` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
)
# If the array is empty we do nothing
if len(self) == 0:
return self
if keep_in_memory and indices_cache_file_name is not None:
raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.") | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
if seed is not None and generator is not None:
raise ValueError("Both `seed` and `generator` were provided. Please specify just one of them.")
if generator is not None and not isinstance(generator, np.random.Generator):
raise ValueError("The provided generator must be an instance of numpy.random.Generator")
load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
if generator is None:
if seed is None:
_, seed, pos, *_ = np.random.get_state()
seed = seed[pos] if pos < 624 else seed[0]
_ = np.random.random() # do 1 step of rng
generator = np.random.default_rng(seed) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
# Check if we've already cached this computation (indexed by a hash)
if self.cache_files:
if indices_cache_file_name is None:
# we create a unique hash from the function, current dataset file and the mapping args
indices_cache_file_name = self._get_cache_file_path(new_fingerprint)
if os.path.exists(indices_cache_file_name) and load_from_cache_file:
logger.info(f"Loading cached shuffled indices for dataset at {indices_cache_file_name}")
return self._new_dataset_with_indices(
fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name
)
permutation = generator.permutation(len(self)) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
return self.select(
indices=permutation,
keep_in_memory=keep_in_memory,
indices_cache_file_name=indices_cache_file_name if not keep_in_memory else None,
writer_batch_size=writer_batch_size,
new_fingerprint=new_fingerprint,
) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
@transmit_format
@fingerprint_transform(
inplace=False,
randomized_function=True,
fingerprint_names=["train_new_fingerprint", "test_new_fingerprint"],
ignore_kwargs=["load_from_cache_file", "train_indices_cache_file_name", "test_indices_cache_file_name"],
)
def train_test_split(
self,
test_size: Union[float, int, None] = None,
train_size: Union[float, int, None] = None,
shuffle: bool = True,
stratify_by_column: Optional[str] = None,
seed: Optional[int] = None,
generator: Optional[np.random.Generator] = None,
keep_in_memory: bool = False,
load_from_cache_file: Optional[bool] = None,
train_indices_cache_file_name: Optional[str] = None,
test_indices_cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
train_new_fingerprint: Optional[str] = None,
test_new_fingerprint: Optional[str] = None,
) -> "DatasetDict": | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
"""Return a dictionary ([`datasets.DatasetDict`]) with two random train and test subsets (`train` and `test` `Dataset` splits).
Splits are created from the dataset according to `test_size`, `train_size` and `shuffle`. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
This method is similar to scikit-learn `train_test_split`. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Args:
test_size (`numpy.random.Generator`, *optional*):
Size of the test split
If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the test split.
If `int`, represents the absolute number of test samples.
If `None`, the value is set to the complement of the train size.
If `train_size` is also `None`, it will be set to `0.25`.
train_size (`numpy.random.Generator`, *optional*):
Size of the train split
If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the train split.
If `int`, represents the absolute number of train samples.
If `None`, the value is automatically set to the complement of the test size.
shuffle (`bool`, *optional*, defaults to `True`):
Whether or not to shuffle the data before splitting. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
stratify_by_column (`str`, *optional*, defaults to `None`):
The column name of labels to be used to perform stratified split of data.
seed (`int`, *optional*):
A seed to initialize the default BitGenerator if `generator=None`.
If `None`, then fresh, unpredictable entropy will be pulled from the OS.
If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
generator (`numpy.random.Generator`, *optional*):
Numpy random Generator to use to compute the permutation of the dataset rows.
If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
keep_in_memory (`bool`, defaults to `False`):
Keep the splits indices in memory instead of writing it to a cache file. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
If a cache file storing the splits indices
can be identified, use it instead of recomputing.
train_cache_file_name (`str`, *optional*):
Provide the name of a path for the cache file. It is used to store the
train split indices instead of the automatically generated cache file name.
test_cache_file_name (`str`, *optional*):
Provide the name of a path for the cache file. It is used to store the
test split indices instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
Number of rows per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
train_new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the train set after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
test_new_fingerprint (`str`, *optional*, defaults to `None`):
The new fingerprint of the test set after transform.
If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds = ds.train_test_split(test_size=0.2, shuffle=True)
DatasetDict({
train: Dataset({
features: ['text', 'label'],
num_rows: 852
})
test: Dataset({
features: ['text', 'label'],
num_rows: 214
})
})
# set a seed
>>> ds = ds.train_test_split(test_size=0.2, seed=42) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
# stratified split
>>> ds = load_dataset("imdb",split="train")
Dataset({
features: ['text', 'label'],
num_rows: 25000
})
>>> ds = ds.train_test_split(test_size=0.2, stratify_by_column="label")
DatasetDict({
train: Dataset({
features: ['text', 'label'],
num_rows: 20000
})
test: Dataset({
features: ['text', 'label'],
num_rows: 5000
})
})
```
"""
from .dataset_dict import DatasetDict # import here because of circular dependency | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
if len(self.list_indexes()) > 0:
raise DatasetTransformationNotAllowedError(
"Using `.train_test_split` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
)
# If the array is empty we do nothing
if len(self) == 0:
return DatasetDict({"train": self, "test": self})
if test_size is None and train_size is None:
test_size = 0.25 | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
# Safety checks similar to scikit-learn's ones.
# (adapted from https://github.com/scikit-learn/scikit-learn/blob/fd237278e895b42abe8d8d09105cbb82dc2cbba7/sklearn/model_selection/_split.py#L1750)
n_samples = len(self)
if (
isinstance(test_size, int)
and (test_size >= n_samples or test_size <= 0)
or isinstance(test_size, float)
and (test_size <= 0 or test_size >= 1)
):
raise ValueError(
f"test_size={test_size} should be either positive and smaller "
f"than the number of samples {n_samples} or a float in the (0, 1) range"
) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
if (
isinstance(train_size, int)
and (train_size >= n_samples or train_size <= 0)
or isinstance(train_size, float)
and (train_size <= 0 or train_size >= 1)
):
raise ValueError(
f"train_size={train_size} should be either positive and smaller "
f"than the number of samples {n_samples} or a float in the (0, 1) range"
)
if train_size is not None and not isinstance(train_size, (int, float)):
raise ValueError(f"Invalid value for train_size: {train_size} of type {type(train_size)}")
if test_size is not None and not isinstance(test_size, (int, float)):
raise ValueError(f"Invalid value for test_size: {test_size} of type {type(test_size)}") | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
if isinstance(train_size, float) and isinstance(test_size, float) and train_size + test_size > 1:
raise ValueError(
f"The sum of test_size and train_size = {train_size + test_size}, should be in the (0, 1)"
" range. Reduce test_size and/or train_size."
)
if isinstance(test_size, float):
n_test = ceil(test_size * n_samples)
elif isinstance(test_size, int):
n_test = float(test_size)
if isinstance(train_size, float):
n_train = floor(train_size * n_samples)
elif isinstance(train_size, int):
n_train = float(train_size)
if train_size is None:
n_train = n_samples - n_test
elif test_size is None:
n_test = n_samples - n_train | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
if n_train + n_test > n_samples:
raise ValueError(
f"The sum of train_size and test_size = {n_train + n_test}, "
"should be smaller than the number of "
f"samples {n_samples}. Reduce test_size and/or "
"train_size."
)
n_train, n_test = int(n_train), int(n_test)
if n_train == 0:
raise ValueError(
f"With n_samples={n_samples}, test_size={test_size} and train_size={train_size}, the "
"resulting train set will be empty. Adjust any of the "
"aforementioned parameters."
)
load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
if generator is None and shuffle is True:
if seed is None:
_, seed, pos, *_ = np.random.get_state()
seed = seed[pos] if pos < 624 else seed[0]
_ = np.random.random() # do 1 step of rng
generator = np.random.default_rng(seed)
# Check if we've already cached this computation (indexed by a hash)
if self.cache_files:
if train_indices_cache_file_name is None or test_indices_cache_file_name is None:
# we create a unique hash from the function, current dataset file and the mapping args | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
if train_indices_cache_file_name is None:
train_indices_cache_file_name = self._get_cache_file_path(train_new_fingerprint)
if test_indices_cache_file_name is None:
test_indices_cache_file_name = self._get_cache_file_path(test_new_fingerprint)
if (
os.path.exists(train_indices_cache_file_name)
and os.path.exists(test_indices_cache_file_name)
and load_from_cache_file
):
logger.info(
f"Loading cached split indices for dataset at {train_indices_cache_file_name} and {test_indices_cache_file_name}"
)
return DatasetDict(
{
"train": self._new_dataset_with_indices(
fingerprint=train_new_fingerprint, indices_cache_file_name=train_indices_cache_file_name
), | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
"test": self._new_dataset_with_indices(
fingerprint=test_new_fingerprint, indices_cache_file_name=test_indices_cache_file_name
),
}
)
if not shuffle:
if stratify_by_column is not None:
raise ValueError("Stratified train/test split is not implemented for `shuffle=False`")
train_indices = np.arange(n_train)
test_indices = np.arange(n_train, n_train + n_test)
else:
# stratified partition
if stratify_by_column is not None:
if stratify_by_column not in self._info.features.keys():
raise ValueError(f"Key {stratify_by_column} not found in {self._info.features.keys()}")
if not isinstance(self._info.features[stratify_by_column], ClassLabel):
raise ValueError( | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
f"Stratifying by column is only supported for {ClassLabel.__name__} column, and column {stratify_by_column} is {type(self._info.features[stratify_by_column]).__name__}."
)
try:
train_indices, test_indices = next(
stratified_shuffle_split_generate_indices(
self.with_format("numpy")[stratify_by_column], n_train, n_test, rng=generator
)
)
except Exception as error:
if str(error) == "Minimum class count error":
raise ValueError(
f"The least populated class in {stratify_by_column} column has only 1"
" member, which is too few. The minimum"
" number of groups for any class cannot"
" be less than 2."
)
else: | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
raise error | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
# random partition
else:
permutation = generator.permutation(len(self))
test_indices = permutation[:n_test]
train_indices = permutation[n_test : (n_test + n_train)]
train_split = self.select(
indices=train_indices,
keep_in_memory=keep_in_memory,
indices_cache_file_name=train_indices_cache_file_name,
writer_batch_size=writer_batch_size,
new_fingerprint=train_new_fingerprint,
)
test_split = self.select(
indices=test_indices,
keep_in_memory=keep_in_memory,
indices_cache_file_name=test_indices_cache_file_name,
writer_batch_size=writer_batch_size,
new_fingerprint=test_new_fingerprint,
)
return DatasetDict({"train": train_split, "test": test_split}) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
def shard(
self,
num_shards: int,
index: int,
contiguous: bool = True,
keep_in_memory: bool = False,
indices_cache_file_name: Optional[str] = None,
writer_batch_size: Optional[int] = 1000,
) -> "Dataset":
"""Return the `index`-nth shard from dataset split into `num_shards` pieces.
This shards deterministically. `dataset.shard(n, i)` splits the dataset into contiguous chunks,
so it can be easily concatenated back together after processing. If `len(dataset) % n == l`, then the
first `l` dataset each have length `(len(dataset) // n) + 1`, and the remaining dataset have length `(len(dataset) // n)`.
`datasets.concatenate_datasets([dset.shard(n, i) for i in range(n)])` returns a dataset with the same order as the original.
Note: n should be less or equal to the number of elements in the dataset `len(dataset)`. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
On the other hand, `dataset.shard(n, i, contiguous=False)` contains all elements of the dataset whose index mod `n = i`.
Be sure to shard before using any randomizing operator (such as `shuffle`).
It is best if the shard operator is used early in the dataset pipeline. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Args:
num_shards (`int`):
How many shards to split the dataset into.
index (`int`):
Which shard to select and return.
contiguous: (`bool`, defaults to `True`):
Whether to select contiguous blocks of indices for shards.
keep_in_memory (`bool`, defaults to `False`):
Keep the dataset in memory instead of writing it to a cache file.
indices_cache_file_name (`str`, *optional*):
Provide the name of a path for the cache file. It is used to store the
indices of each shard instead of the automatically generated cache file name.
writer_batch_size (`int`, defaults to `1000`):
This only concerns the indices mapping.
Number of indices per write operation for the cache file writer.
This value is a good trade-off between memory usage during the processing, and processing speed. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds
Dataset({
features: ['text', 'label'],
num_rows: 1066
})
>>> ds.shard(num_shards=2, index=0)
Dataset({
features: ['text', 'label'],
num_rows: 533
})
```
"""
if not 0 <= index < num_shards:
raise ValueError("index should be in [0, num_shards-1]")
if contiguous:
div = len(self) // num_shards
mod = len(self) % num_shards
start = div * index + min(index, mod)
end = start + div + (1 if index < mod else 0)
indices = range(start, end)
else:
indices = np.arange(index, len(self), num_shards) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
return self.select(
indices=indices,
keep_in_memory=keep_in_memory,
indices_cache_file_name=indices_cache_file_name,
writer_batch_size=writer_batch_size,
)
def to_csv(
self,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
storage_options: Optional[dict] = None,
**to_csv_kwargs,
) -> int:
"""Exports the dataset to csv | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Args:
path_or_buf (`PathLike` or `FileOrBuffer`):
Either a path to a file (e.g. `file.csv`), a remote URI (e.g. `hf://datasets/username/my_dataset_name/data.csv`),
or a BinaryIO, where the dataset will be saved to in the specified format.
batch_size (`int`, *optional*):
Size of the batch to load in memory and write at once.
Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
num_proc (`int`, *optional*):
Number of processes for multiprocessing. By default it doesn't
use multiprocessing. `batch_size` in this case defaults to
`datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default
value if you have sufficient compute power.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
<Added version="2.19.0"/>
**to_csv_kwargs (additional keyword arguments):
Parameters to pass to pandas's [`pandas.DataFrame.to_csv`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_csv.html).
<Changed version="2.10.0">
Now, `index` defaults to `False` if not specified.
If you would like to write the index, pass `index=True` and also set a name for the index column by
passing `index_label`.
</Changed>
Returns:
`int`: The number of characters or bytes written.
Example:
```py
>>> ds.to_csv("path/to/dataset/directory")
```
"""
# Dynamic import to avoid circular dependency
from .io.csv import CsvDatasetWriter | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
return CsvDatasetWriter(
self,
path_or_buf,
batch_size=batch_size,
num_proc=num_proc,
storage_options=storage_options,
**to_csv_kwargs,
).write()
def to_dict(self, batch_size: Optional[int] = None) -> Union[dict, Iterator[dict]]:
"""Returns the dataset as a Python dict. Can also return a generator for large datasets.
Args:
batch_size (`int`, *optional*): The size (number of rows) of the batches if `batched` is `True`.
Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
Returns:
`dict` or `Iterator[dict]`
Example:
```py
>>> ds.to_dict()
```
"""
return query_table(
table=self._data,
key=slice(0, len(self)),
indices=self._indices,
).to_pydict()
def to_list(self) -> list:
"""Returns the dataset as a Python list.
Returns:
`list` | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Example:
```py
>>> ds.to_list()
```
"""
return query_table(
table=self._data,
key=slice(0, len(self)),
indices=self._indices,
).to_pylist()
def to_json(
self,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
storage_options: Optional[dict] = None,
**to_json_kwargs,
) -> int:
"""Export the dataset to JSON Lines or JSON.
The default output format is [JSON Lines](https://jsonlines.org/).
To export to [JSON](https://www.json.org), pass `lines=False` argument and the desired `orient`. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Args:
path_or_buf (`PathLike` or `FileOrBuffer`):
Either a path to a file (e.g. `file.json`), a remote URI (e.g. `hf://datasets/username/my_dataset_name/data.json`),
or a BinaryIO, where the dataset will be saved to in the specified format.
batch_size (`int`, *optional*):
Size of the batch to load in memory and write at once.
Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
num_proc (`int`, *optional*):
Number of processes for multiprocessing. By default, it doesn't
use multiprocessing. `batch_size` in this case defaults to
`datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default
value if you have sufficient compute power.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
<Added version="2.19.0"/>
**to_json_kwargs (additional keyword arguments):
Parameters to pass to pandas's [`pandas.DataFrame.to_json`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html).
Default arguments are `lines=True` and `orient="records".
<Changed version="2.11.0">
The parameter `index` defaults to `False` if `orient` is `"split"` or `"table"`.
If you would like to write the index, pass `index=True`.
</Changed>
Returns:
`int`: The number of characters or bytes written.
Example:
```py
>>> ds.to_json("path/to/dataset/directory/filename.jsonl")
```
"""
# Dynamic import to avoid circular dependency
from .io.json import JsonDatasetWriter | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
return JsonDatasetWriter(
self,
path_or_buf,
batch_size=batch_size,
num_proc=num_proc,
storage_options=storage_options,
**to_json_kwargs,
).write()
def to_pandas(
self, batch_size: Optional[int] = None, batched: bool = False
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Returns the dataset as a `pandas.DataFrame`. Can also return a generator for large datasets.
Args:
batched (`bool`):
Set to `True` to return a generator that yields the dataset as batches
of `batch_size` rows. Defaults to `False` (returns the whole datasets once).
batch_size (`int`, *optional*):
The size (number of rows) of the batches if `batched` is `True`.
Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
Returns:
`pandas.DataFrame` or `Iterator[pandas.DataFrame]`
Example: | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
```py
>>> ds.to_pandas()
```
"""
if not batched:
return query_table(
table=self._data,
key=slice(0, len(self)),
indices=self._indices,
).to_pandas(types_mapper=pandas_types_mapper)
else:
batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
return (
query_table(
table=self._data,
key=slice(offset, offset + batch_size),
indices=self._indices,
).to_pandas(types_mapper=pandas_types_mapper)
for offset in range(0, len(self), batch_size)
) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
def to_polars(
self,
batch_size: Optional[int] = None,
batched: bool = False,
schema_overrides: Optional[dict] = None,
rechunk: bool = True,
) -> Union["pl.DataFrame", Iterator["pl.DataFrame"]]:
"""Returns the dataset as a `polars.DataFrame`. Can also return a generator for large datasets. | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
Args:
batched (`bool`):
Set to `True` to return a generator that yields the dataset as batches
of `batch_size` rows. Defaults to `False` (returns the whole datasets once).
batch_size (`int`, *optional*):
The size (number of rows) of the batches if `batched` is `True`.
Defaults to `genomicsml.datasets.config.DEFAULT_MAX_BATCH_SIZE`.
schema_overrides (`dict`, *optional*):
Support type specification or override of one or more columns; note that
any dtypes inferred from the schema param will be overridden.
rechunk (`bool`):
Make sure that all data is in contiguous memory. Defaults to `True`.
Returns:
`polars.DataFrame` or `Iterator[polars.DataFrame]`
Example:
```py
>>> ds.to_polars()
```
"""
if config.POLARS_AVAILABLE:
import polars as pl | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
if not batched:
return pl.from_arrow(
query_table(
table=self._data,
key=slice(0, len(self)),
indices=self._indices if self._indices is not None else None,
),
schema_overrides=schema_overrides,
rechunk=rechunk,
)
else:
batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
return (
pl.from_arrow(
query_table(
table=self._data,
key=slice(offset, offset + batch_size),
indices=self._indices if self._indices is not None else None,
),
schema_overrides=schema_overrides,
rechunk=rechunk,
) | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |
for offset in range(0, len(self), batch_size)
)
else:
raise ValueError("Polars needs to be installed to be able to return Polars dataframes.") | 4 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py |