text
stringlengths
1
1.02k
class_index
int64
0
271
source
stringclasses
76 values
if self._formatting and (ex_iterable.iter_arrow or self._formatting.format_type == "arrow"): if ex_iterable.iter_arrow: iterator = ex_iterable.iter_arrow() else: iterator = _convert_to_arrow(ex_iterable, batch_size=1) for key, pa_table in iterator: yield formatter.format_row(pa_table) return for key, example in ex_iterable: if self.features and not ex_iterable.is_typed: # `IterableDataset` automatically fills missing columns with None. # This is done with `_apply_feature_types_on_example`. example = _apply_feature_types_on_example( example, self.features, token_per_repo_id=self._token_per_repo_id ) yield format_dict(example) if format_dict else example def iter(self, batch_size: int, drop_last_batch: bool = False): """Iterate through the batches of size `batch_size`.
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
Args: batch_size (:obj:`int`): size of each batch to yield. drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be dropped """ if self._formatting: formatter = get_formatter(self._formatting.format_type, features=self.features) format_dict = ( formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects ) else: format_dict = None
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
ex_iterable = self._prepare_ex_iterable_for_iteration(batch_size=batch_size, drop_last_batch=drop_last_batch) if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"): if ex_iterable.iter_arrow: iterator = ex_iterable.iter_arrow() else: iterator = _convert_to_arrow(ex_iterable, batch_size=batch_size, drop_last_batch=drop_last_batch) for key, pa_table in iterator: yield formatter.format_batch(pa_table) return
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
iterator = iter(ex_iterable) for key, example in iterator: # If batched, first build the batch examples = [example] + [example for key, example in islice(iterator, batch_size - 1)] if drop_last_batch and len(examples) < batch_size: # ignore last batch return batch = _examples_to_batch(examples) if self.features and not ex_iterable.is_typed: # `IterableDataset` automatically fills missing columns with None. # This is done with `_apply_feature_types_on_batch`. batch = _apply_feature_types_on_batch(batch, self.features, token_per_repo_id=self._token_per_repo_id) yield format_dict(batch) if format_dict else batch
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
@staticmethod def from_generator( generator: Callable, features: Optional[Features] = None, gen_kwargs: Optional[dict] = None, split: NamedSplit = Split.TRAIN, ) -> "IterableDataset": """Create an Iterable Dataset from a generator. Args: generator (`Callable`): A generator function that `yields` examples. features (`Features`, *optional*): Dataset features. gen_kwargs(`dict`, *optional*): Keyword arguments to be passed to the `generator` callable. You can define a sharded iterable dataset by passing the list of shards in `gen_kwargs`. This can be used to improve shuffling and when iterating over the dataset with multiple workers. split ([`NamedSplit`], defaults to `Split.TRAIN`): Split name to be assigned to the dataset.
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
<Added version="2.21.0"/> Returns: `IterableDataset` Example: ```py >>> def gen(): ... yield {"text": "Good", "label": 0} ... yield {"text": "Bad", "label": 1} ... >>> ds = IterableDataset.from_generator(gen) ```
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
```py >>> def gen(shards): ... for shard in shards: ... with open(shard) as f: ... for line in f: ... yield {"line": line} ... >>> shards = [f"data{i}.txt" for i in range(32)] >>> ds = IterableDataset.from_generator(gen, gen_kwargs={"shards": shards}) >>> ds = ds.shuffle(seed=42, buffer_size=10_000) # shuffles the shards order + uses a shuffle buffer >>> from torch.utils.data import DataLoader >>> dataloader = DataLoader(ds.with_format("torch"), num_workers=4) # give each worker a subset of 32/4=8 shards ``` """ from .io.generator import GeneratorDatasetInputStream return GeneratorDatasetInputStream( generator=generator, features=features, gen_kwargs=gen_kwargs, streaming=True, split=split ).read()
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
@staticmethod def from_spark( df: "pyspark.sql.DataFrame", split: Optional[NamedSplit] = None, features: Optional[Features] = None, **kwargs, ) -> "IterableDataset": """Create an IterableDataset from Spark DataFrame. The dataset is streamed to the driver in batches. Args: df (`pyspark.sql.DataFrame`): The DataFrame containing the desired data. split (`NamedSplit`, *optional*): Split name to be assigned to the dataset. features (`Features`, *optional*): Dataset features. Returns: [`IterableDataset`] Example: ```py >>> df = spark.createDataFrame( >>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]], >>> columns=["id", "name"], >>> ) >>> ds = IterableDataset.from_spark(df) ``` """ from .io.spark import SparkDatasetReader
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
if sys.platform == "win32": raise EnvironmentError("IterableDataset.from_spark is not currently supported on Windows") return SparkDatasetReader( df, split=split, features=features, streaming=True, **kwargs, ).read() @staticmethod def from_file(filename: str) -> "IterableDataset": """Instantiate a IterableDataset from Arrow table at filename. Args: filename (`str`): File name of the dataset. Returns: [`IterableDataset`] """ pa_table_schema = read_schema_from_file(filename) inferred_features = Features.from_arrow_schema(pa_table_schema) ex_iterable = ArrowExamplesIterable(Dataset._generate_tables_from_cache_file, kwargs={"filename": filename}) return IterableDataset(ex_iterable=ex_iterable, info=DatasetInfo(features=inferred_features))
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def with_format( self, type: Optional[str] = None, ) -> "IterableDataset": """ Return a dataset with the specified format. The 'pandas' format is currently not implemented. Args: type (`str`, *optional*): Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'arrow', 'jax']`. `None` means it returns python objects (default). Example:
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes", split="validation", streaming=True) >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True) >>> ds = ds.with_format("torch") >>> next(iter(ds)) {'text': 'compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .', 'label': tensor(1), 'input_ids': tensor([ 101, 18027, 16310, 16001, 1103, 9321, 178, 11604, 7235, 6617, 1742, 2165, 2820, 1206, 6588, 22572, 12937, 1811, 2153, 1105, 1147, 12890, 19587, 6463, 1105, 15026, 1482, 119, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), 'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])} ``` """ type = get_format_type_from_alias(type) # TODO(QL): add format_kwargs # TODO(QL): add format_columns and return_all_columns # TODO(QL): add pandas format return IterableDataset( ex_iterable=self._ex_iterable, info=self._info.copy(), split=self._split, formatting=FormattingConfig(format_type=type), shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, )
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def map( self, function: Optional[Callable] = None, with_indices: bool = False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, drop_last_batch: bool = False, remove_columns: Optional[Union[str, List[str]]] = None, features: Optional[Features] = None, fn_kwargs: Optional[dict] = None, ) -> "IterableDataset": """ Apply a function to all the examples in the iterable dataset (individually or in batches) and update them. If your function returns a column that already exists, then it overwrites it. The function is applied on-the-fly on the examples when iterating over the dataset. You can specify whether the function should be batched or not with the `batched` parameter:
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
- If batched is `False`, then the function takes 1 example in and should return 1 example. An example is a dictionary, e.g. `{"text": "Hello there !"}`. - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples. A batch is a dictionary, e.g. a batch of 1 example is {"text": ["Hello there !"]}. - If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples. Note that the last batch may have less than `n` examples. A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
Args: function (`Callable`, *optional*, defaults to `None`): Function applied on-the-fly on the examples when you iterate on the dataset. It must have one of the following signatures: - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` - `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True` - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
For advanced usage, the function can also return a `pyarrow.Table`. Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. If no function is provided, default to identity function: `lambda x: x`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, defaults to `1000`):
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
Number of examples per batch provided to `function` if `batched=True`. `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`. drop_last_batch (`bool`, defaults to `False`): Whether a last batch smaller than the batch_size should be dropped instead of being processed by the function. remove_columns (`[List[str]]`, *optional*, defaults to `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding columns with names in `remove_columns`, these columns will be kept. features (`[Features]`, *optional*, defaults to `None`): Feature types of the resulting dataset. fn_kwargs (`Dict`, *optional*, default `None`): Keyword arguments to be passed to `function`.
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
Example:
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> def add_prefix(example): ... example["text"] = "Review: " + example["text"] ... return example >>> ds = ds.map(add_prefix) >>> list(ds.take(3)) [{'label': 1, 'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'Review: effective but too-tepid biopic'}] ``` """ if isinstance(input_columns, str):
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
input_columns = [input_columns] if isinstance(remove_columns, str): remove_columns = [remove_columns] if function is None: function = identity_func if fn_kwargs is None: fn_kwargs = {}
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
ex_iterable = self._ex_iterable # no need to apply features if ex_iterable is typed and if there was no cast_column() input_features = ( None if (ex_iterable.is_typed and (self._info.features is None or self._info.features == ex_iterable.features)) else self._info.features )
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
if self._formatting and self._formatting.format_type == "arrow": # apply formatting before iter_arrow to keep map examples iterable happy ex_iterable = FormattedExamplesIterable( ex_iterable, formatting=copy.deepcopy(self._formatting), features=input_features, token_per_repo_id=self._token_per_repo_id, ) ex_iterable = RebatchedArrowExamplesIterable( ex_iterable, batch_size=batch_size if batched else 1, drop_last_batch=drop_last_batch ) else: if self._formatting and self._ex_iterable.iter_arrow: ex_iterable = RebatchedArrowExamplesIterable( self._ex_iterable, batch_size=batch_size if batched else 1, drop_last_batch=drop_last_batch ) if self._formatting or input_features: # apply formatting after iter_arrow to avoid re-encoding the examples
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
ex_iterable = FormattedExamplesIterable( ex_iterable, formatting=copy.deepcopy(self._formatting), features=input_features, token_per_repo_id=self._token_per_repo_id, )
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
ex_iterable = MappedExamplesIterable( ex_iterable, function=function, with_indices=with_indices, input_columns=input_columns, batched=batched, batch_size=batch_size, drop_last_batch=drop_last_batch, remove_columns=remove_columns, fn_kwargs=fn_kwargs, formatting=self._formatting, features=features, ) info = self.info.copy() info.features = features return IterableDataset( ex_iterable=ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, )
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def filter( self, function: Optional[Callable] = None, with_indices=False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, fn_kwargs: Optional[dict] = None, ) -> "IterableDataset": """Apply a filter function to all the elements so that the dataset only includes examples according to the filter function. The filtering is done on-the-fly when iterating over the dataset. Args: function (`Callable`): Callable with one of the following signatures:
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
- `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False` - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False` - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True` - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True`
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
If no function is provided, defaults to an always True function: `lambda x: True`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`. input_columns (`str` or `List[str]`, *optional*): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, default `1000`): Number of examples per batch provided to `function` if `batched=True`. fn_kwargs (`Dict`, *optional*, default `None`): Keyword arguments to be passed to `function`. Example:
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> ds = ds.filter(lambda x: x["label"] == 0) >>> list(ds.take(3)) [{'label': 0, 'movie_review': 'simplistic , silly and tedious .'}, {'label': 0, 'movie_review': "it's so laddish and juvenile , only teenage boys could possibly find it funny ."}, {'label': 0, 'movie_review': 'exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}] ``` """ if isinstance(input_columns, str): input_columns = [input_columns]
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
# We need the examples to be decoded for certain feature types like Image or Audio, # format and type before filtering ex_iterable = self._ex_iterable if self._info.features or self._formatting: ex_iterable = FormattedExamplesIterable( ex_iterable, formatting=self._formatting, features=None if ex_iterable.is_typed else self._info.features, token_per_repo_id=self._token_per_repo_id, )
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
ex_iterable = FilteredExamplesIterable( ex_iterable, function=function, with_indices=with_indices, input_columns=input_columns, batched=batched, batch_size=batch_size, fn_kwargs=fn_kwargs, formatting=self._formatting, ) return IterableDataset( ex_iterable=ex_iterable, info=self._info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def shuffle( self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000 ) -> "IterableDataset": """ Randomly shuffles the elements of this dataset.
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
This dataset fills a buffer with `buffer_size` elements, then randomly samples elements from this buffer, replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or equal to the full size of the dataset is required. For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will initially select a random element from only the first 1000 elements in the buffer. Once an element is selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element, maintaining the 1000 element buffer. If the dataset is made of several shards, it also does shuffle the order of the shards. However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`] then the order of the shards is kept unchanged.
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
Args: seed (`int`, *optional*, defaults to `None`): Random seed that will be used to shuffle the dataset. It is used to sample from the shuffle buffer and also to shuffle the data shards. generator (`numpy.random.Generator`, *optional*): Numpy random Generator to use to compute the permutation of the dataset rows. If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy). buffer_size (`int`, defaults to `1000`): Size of the buffer. Example:
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> list(ds.take(3)) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'effective but too-tepid biopic'}] >>> shuffled_ds = ds.shuffle(seed=42) >>> list(shuffled_ds.take(3)) [{'label': 1, 'text': "a sports movie with action that's exciting on the field and a story you care about off it ."}, {'label': 1,
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'}, {'label': 1, 'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}] ``` """ if generator is None: generator = np.random.default_rng(seed) else: generator = deepcopy(generator) shuffling = ShufflingConfig(generator=generator, _original_seed=seed) return IterableDataset( ex_iterable=BufferShuffledExamplesIterable( self._ex_iterable, buffer_size=buffer_size, generator=generator ), info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=shuffling, distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, )
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def set_epoch(self, epoch: int): self._epoch += epoch - self._epoch # update torch value in shared memory in-place def skip(self, n: int) -> "IterableDataset": """ Create a new [`IterableDataset`] that skips the first `n` elements. Args: n (`int`): Number of elements to skip. Example:
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> list(ds.take(3)) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'effective but too-tepid biopic'}] >>> ds = ds.skip(1) >>> list(ds.take(3)) [{'label': 1,
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}, {'label': 1, 'text': 'effective but too-tepid biopic'}, {'label': 1, 'text': 'if you sometimes like to go to the movies to have fun , wasabi is a good place to start .'}] ``` """ ex_iterable = SkipExamplesIterable( self._ex_iterable, n, block_sources_order_when_shuffling=self._shuffling is None, split_when_sharding=self._distributed is None, ) return IterableDataset( ex_iterable=ex_iterable, info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed),
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
token_per_repo_id=self._token_per_repo_id, )
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def take(self, n: int) -> "IterableDataset": """ Create a new [`IterableDataset`] with only the first `n` elements. Args: n (`int`): Number of elements to take. Example:
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> small_ds = ds.take(2) >>> list(small_ds) [{'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}, {'label': 1, 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}] ``` """ ex_iterable = TakeExamplesIterable( self._ex_iterable, n, block_sources_order_when_shuffling=self._shuffling is None, split_when_sharding=self._distributed is None, ) return IterableDataset( ex_iterable=ex_iterable,
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, )
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def shard( self, num_shards: int, index: int, contiguous: bool = True, ) -> "IterableDataset": """Return the `index`-nth shard from dataset split into `num_shards` pieces. This shards deterministically. `dataset.shard(n, i)` splits the dataset into contiguous chunks, so it can be easily concatenated back together after processing. If `dataset.num_shards % n == l`, then the first `l` datasets each have `(dataset.num_shards // n) + 1` shards, and the remaining datasets have `(dataset.num_shards // n)` shards. `datasets.concatenate_datasets([dset.shard(n, i) for i in range(n)])` returns a dataset with the same order as the original. In particular, `dataset.shard(dataset.num_shards, i)` returns a dataset with 1 shard. Note: n should be less or equal to the number of shards in the dataset `dataset.num_shards`.
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
On the other hand, `dataset.shard(n, i, contiguous=False)` contains all the shards of the dataset whose index mod `n = i`. Be sure to shard before using any randomizing operator (such as `shuffle`). It is best if the shard operator is used early in the dataset pipeline. Args: num_shards (`int`): How many shards to split the dataset into. index (`int`): Which shard to select and return. contiguous: (`bool`, defaults to `True`): Whether to select contiguous blocks of indices for shards. Example:
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("amazon_polarity", split="train", streaming=True) >>> ds Dataset({ features: ['label', 'title', 'content'], num_shards: 4 }) >>> ds.shard(num_shards=2, index=0) Dataset({ features: ['label', 'title', 'content'], num_shards: 2 }) ``` """ ex_iterable = self._ex_iterable.shard_data_sources(num_shards=num_shards, index=index, contiguous=contiguous) return IterableDataset( ex_iterable=ex_iterable, info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, )
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
@property def column_names(self) -> Optional[List[str]]: """Names of the columns in the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation", streaming=True) >>> ds.column_names ['text', 'label'] ``` """ return list(self._info.features.keys()) if self._info.features is not None else None def add_column(self, name: str, column: Union[list, np.array]) -> "IterableDataset": """Add column to Dataset. Args: name (str): Column name. column (list or np.array): Column data to be added. Returns: `IterableDataset` """ return self.map(partial(add_column_fn, name=name, column=column), with_indices=True)
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDataset": """ Rename a column in the dataset, and move the features associated to the original column under the new column name. Args: original_column_name (`str`): Name of the column to rename. new_column_name (`str`): New name for the column. Returns: `IterableDataset`: A copy of the dataset with a renamed column. Example:
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> next(iter(ds)) {'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} >>> ds = ds.rename_column("text", "movie_review") >>> next(iter(ds)) {'label': 1, 'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ return self.rename_columns({original_column_name: new_column_name})
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDataset": """ Rename several columns in the dataset, and move the features associated to the original columns under the new column names. Args: column_mapping (`Dict[str, str]`): A mapping of columns to rename to their new names Returns: `IterableDataset`: A copy of the dataset with renamed columns """ original_features = self._info.features.copy() if self._info.features else None ds_iterable = self.map( partial(_rename_columns_fn, column_mapping=column_mapping), remove_columns=list(column_mapping) ) if original_features is not None: ds_iterable._info.features = Features( { column_mapping[col] if col in column_mapping.keys() else col: feature for col, feature in original_features.items() } ) return ds_iterable
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset": """ Remove one or several column(s) in the dataset and the features associated to them. The removal is done on-the-fly on the examples when iterating over the dataset. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to remove. Returns: `IterableDataset`: A copy of the dataset object without the columns to remove. Example:
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1} >>> ds = ds.remove_columns("label") >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ original_features = self._info.features.copy() if self._info.features else None ds_iterable = self.map(remove_columns=column_names) if original_features is not None: ds_iterable._info.features = original_features.copy() for col, _ in original_features.items(): if col in column_names:
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
del ds_iterable._info.features[col]
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
return ds_iterable def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset": """Select one or several column(s) in the dataset and the features associated to them. The selection is done on-the-fly on the examples when iterating over the dataset. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to select. Returns: `IterableDataset`: A copy of the dataset object with selected columns. Example:
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1} >>> ds = ds.select_columns("text") >>> next(iter(ds)) {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` """ if isinstance(column_names, str): column_names = [column_names]
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
if self._info: info = copy.deepcopy(self._info) if self._info.features is not None: missing_columns = set(column_names) - set(self._info.features.keys()) if missing_columns: raise ValueError( f"Column name {list(missing_columns)} not in the " "dataset. Columns in the dataset: " f"{list(self._info.features.keys())}." ) info.features = Features({c: info.features[c] for c in column_names}) ex_iterable = SelectColumnsIterable(self._ex_iterable, column_names) return IterableDataset( ex_iterable=ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=self._shuffling, distributed=self._distributed, token_per_repo_id=self._token_per_repo_id, )
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def cast_column(self, column: str, feature: FeatureType) -> "IterableDataset": """Cast column to feature for decoding. Args: column (`str`): Column name. feature (`Feature`): Target feature. Returns: `IterableDataset` Example:
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
```py >>> from datasets import load_dataset, Audio >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train", streaming=True) >>> ds.features {'audio': Audio(sampling_rate=8000, mono=True, decode=True, id=None), 'english_transcription': Value(dtype='string', id=None), 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None), 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None), 'path': Value(dtype='string', id=None), 'transcription': Value(dtype='string', id=None)} >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000)) >>> ds.features
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
{'audio': Audio(sampling_rate=16000, mono=True, decode=True, id=None), 'english_transcription': Value(dtype='string', id=None), 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None), 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None), 'path': Value(dtype='string', id=None), 'transcription': Value(dtype='string', id=None)} ``` """ info = self._info.copy() info.features[column] = feature return IterableDataset( ex_iterable=self._ex_iterable, info=info, split=self._split, formatting=self._formatting,
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, )
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def cast( self, features: Features, ) -> "IterableDataset": """ Cast the dataset to a new set of features. Args: features ([`Features`]): New features to cast the dataset to. The name of the fields in the features must match the current column names. The type of the data must also be convertible from one type to the other. For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~Dataset.map`] to update the Dataset. Returns: `IterableDataset`: A copy of the dataset with casted features. Example:
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
```py >>> from datasets import load_dataset, ClassLabel, Value >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True) >>> ds.features {'label': ClassLabel(names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> new_features = ds.features.copy() >>> new_features["label"] = ClassLabel(names=["bad", "good"]) >>> new_features["text"] = Value("large_string") >>> ds = ds.cast(new_features) >>> ds.features {'label': ClassLabel(names=['bad', 'good'], id=None), 'text': Value(dtype='large_string', id=None)} ``` """ info = self._info.copy() info.features = features return IterableDataset( ex_iterable=self._ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed),
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
token_per_repo_id=self._token_per_repo_id, )
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def _step(self, step: int, offset: int) -> "IterableDataset": ex_iterable = StepExamplesIterable(self._ex_iterable, step=step, offset=offset) return IterableDataset( ex_iterable=ex_iterable, info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, )
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def _resolve_features(self): if self.features is not None: return self elif self._ex_iterable.is_typed: features = self._ex_iterable.features else: features = _infer_features_from_batch(self.with_format(None)._head()) info = self.info.copy() info.features = features return IterableDataset( ex_iterable=self._ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id, ) def batch(self, batch_size: int, drop_last_batch: bool = False) -> "IterableDataset": """ Group samples from the dataset into batches.
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
Args: batch_size (`int`): The number of samples in each batch. drop_last_batch (`bool`, defaults to `False`): Whether to drop the last incomplete batch. Example: ```py >>> ds = load_dataset("some_dataset", streaming=True) >>> batched_ds = ds.batch(batch_size=32) ``` """ def batch_fn(unbatched): return {k: [v] for k, v in unbatched.items()} return self.map(batch_fn, batched=True, batch_size=batch_size, drop_last_batch=drop_last_batch)
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class DatasetDict(dict): """A dictionary (dict of str: datasets.Dataset) with dataset transforms methods (map, filter, etc.)""" def _check_values_type(self): for dataset in self.values(): if not isinstance(dataset, Dataset): raise TypeError(f"Values in `DatasetDict` should be of type `Dataset` but got type '{type(dataset)}'") def _check_values_features(self): items = list(self.items()) for item_a, item_b in zip(items[:-1], items[1:]): if item_a[1].features != item_b[1].features: raise ValueError( f"All datasets in `DatasetDict` should have the same features but features for '{item_a[0]}' and '{item_b[0]}' don't match: {item_a[1].features} != {item_b[1].features}" ) def __enter__(self): return self
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
def __exit__(self, exc_type, exc_val, exc_tb): # Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables for dataset in self.values(): if hasattr(dataset, "_data"): del dataset._data if hasattr(dataset, "_indices"): del dataset._indices
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
def __getitem__(self, k) -> Dataset: if isinstance(k, (str, NamedSplit)) or len(self) == 0: return super().__getitem__(k) else: available_suggested_splits = [ split for split in (Split.TRAIN, Split.TEST, Split.VALIDATION) if split in self ] suggested_split = available_suggested_splits[0] if available_suggested_splits else list(self)[0] raise KeyError( f"Invalid key: {k}. Please first select a split. For example: " f"`my_dataset_dictionary['{suggested_split}'][{k}]`. " f"Available splits: {sorted(self)}" ) @property def data(self) -> Dict[str, Table]: """The Apache Arrow tables backing each split. Example:
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.data ``` """ self._check_values_type() return {k: dataset.data for k, dataset in self.items()} @property def cache_files(self) -> Dict[str, Dict]: """The cache files containing the Apache Arrow table backing each split. Example:
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.cache_files {'test': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-test.arrow'}], 'train': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-train.arrow'}], 'validation': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}]} ``` """ self._check_values_type() return {k: dataset.cache_files for k, dataset in self.items()}
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
@property def num_columns(self) -> Dict[str, int]: """Number of columns in each split of the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.num_columns {'test': 2, 'train': 2, 'validation': 2} ``` """ self._check_values_type() return {k: dataset.num_columns for k, dataset in self.items()} @property def num_rows(self) -> Dict[str, int]: """Number of rows in each split of the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.num_rows {'test': 1066, 'train': 8530, 'validation': 1066} ``` """ self._check_values_type() return {k: dataset.num_rows for k, dataset in self.items()}
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
@property def column_names(self) -> Dict[str, List[str]]: """Names of the columns in each split of the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.column_names {'test': ['text', 'label'], 'train': ['text', 'label'], 'validation': ['text', 'label']} ``` """ self._check_values_type() return {k: dataset.column_names for k, dataset in self.items()} @property def shape(self) -> Dict[str, Tuple[int]]: """Shape of each split of the dataset (number of rows, number of columns). Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.shape {'test': (1066, 2), 'train': (8530, 2), 'validation': (1066, 2)} ``` """ self._check_values_type() return {k: dataset.shape for k, dataset in self.items()}
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
def flatten(self, max_depth=16) -> "DatasetDict": """Flatten the Apache Arrow Table of each split (nested features are flatten). Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Example:
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("squad") >>> ds["train"].features {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} >>> ds.flatten() DatasetDict({ train: Dataset({ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'], num_rows: 87599 }) validation: Dataset({ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'], num_rows: 10570 }) }) ``` """ self._check_values_type()
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
return DatasetDict({k: dataset.flatten(max_depth=max_depth) for k, dataset in self.items()})
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
def unique(self, column: str) -> Dict[str, List]: """Return a list of the unique elements in a column for each split. This is implemented in the low-level backend and as such, very fast. Args: column (`str`): column name (list all the column names with [`~datasets.DatasetDict.column_names`]) Returns: Dict[`str`, `list`]: Dictionary of unique elements in the given column. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.unique("label") {'test': [1, 0], 'train': [1, 0], 'validation': [1, 0]} ``` """ self._check_values_type() return {k: dataset.unique(column) for k, dataset in self.items()}
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
def cleanup_cache_files(self) -> Dict[str, int]: """Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one. Be careful when running this command that no other process is currently using other cache files. Return: `Dict` with the number of removed files for each split Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.cleanup_cache_files() {'test': 0, 'train': 0, 'validation': 0} ``` """ self._check_values_type() return {k: dataset.cleanup_cache_files() for k, dataset in self.items()} def __repr__(self): repr = "\n".join([f"{k}: {v}" for k, v in self.items()]) repr = re.sub(r"^", " " * 4, repr, 0, re.M) return f"DatasetDict({{\n{repr}\n}})"
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
def cast(self, features: Features) -> "DatasetDict": """ Cast the dataset to a new set of features. The transformation is applied to all the datasets of the dataset dictionary. Args: features ([`Features`]): New features to cast the dataset to. The name and order of the fields in the features must match the current column names. The type of the data must also be convertible from one type to the other. For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~DatasetDict.map`] to update the dataset. Example:
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
```py >>> from datasets import load_dataset, ClassLabel, Value >>> ds = load_dataset("rotten_tomatoes") >>> ds["train"].features {'label': ClassLabel(names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> new_features = ds["train"].features.copy() >>> new_features['label'] = ClassLabel(names=['bad', 'good']) >>> new_features['text'] = Value('large_string') >>> ds = ds.cast(new_features) >>> ds["train"].features {'label': ClassLabel(names=['bad', 'good'], id=None), 'text': Value(dtype='large_string', id=None)} ``` """ self._check_values_type() return DatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()}) def cast_column(self, column: str, feature) -> "DatasetDict": """Cast column to feature for decoding.
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
Args: column (`str`): Column name. feature ([`Feature`]): Target feature. Returns: [`DatasetDict`] Example: ```py >>> from datasets import load_dataset, ClassLabel >>> ds = load_dataset("rotten_tomatoes") >>> ds["train"].features {'label': ClassLabel(names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good'])) >>> ds["train"].features {'label': ClassLabel(names=['bad', 'good'], id=None), 'text': Value(dtype='string', id=None)} ``` """ self._check_values_type() return DatasetDict({k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()})
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
def remove_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict": """ Remove one or several column(s) from each split in the dataset and the features associated to the column(s). The transformation is applied to all the splits of the dataset dictionary. You can also remove a column using [`~DatasetDict.map`] with `remove_columns` but the present method doesn't copy the data of the remaining columns and is thus faster. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to remove. Returns: [`DatasetDict`]: A copy of the dataset object without the columns to remove. Example:
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds = ds.remove_columns("label") DatasetDict({ train: Dataset({ features: ['text'], num_rows: 8530 }) validation: Dataset({ features: ['text'], num_rows: 1066 }) test: Dataset({ features: ['text'], num_rows: 1066 }) }) ``` """ self._check_values_type() return DatasetDict({k: dataset.remove_columns(column_names=column_names) for k, dataset in self.items()}) def rename_column(self, original_column_name: str, new_column_name: str) -> "DatasetDict": """ Rename a column in the dataset and move the features associated to the original column under the new column name. The transformation is applied to all the datasets of the dataset dictionary.
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
You can also rename a column using [`~DatasetDict.map`] with `remove_columns` but the present method: - takes care of moving the original features under the new column name. - doesn't copy the data to a new dataset and is thus much faster. Args: original_column_name (`str`): Name of the column to rename. new_column_name (`str`): New name for the column. Example:
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds = ds.rename_column("label", "label_new") DatasetDict({ train: Dataset({ features: ['text', 'label_new'], num_rows: 8530 }) validation: Dataset({ features: ['text', 'label_new'], num_rows: 1066 }) test: Dataset({ features: ['text', 'label_new'], num_rows: 1066 }) }) ``` """ self._check_values_type() return DatasetDict( { k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name) for k, dataset in self.items() } )
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
def rename_columns(self, column_mapping: Dict[str, str]) -> "DatasetDict": """ Rename several columns in the dataset, and move the features associated to the original columns under the new column names. The transformation is applied to all the datasets of the dataset dictionary. Args: column_mapping (`Dict[str, str]`): A mapping of columns to rename to their new names. Returns: [`DatasetDict`]: A copy of the dataset with renamed columns. Example:
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.rename_columns({'text': 'text_new', 'label': 'label_new'}) DatasetDict({ train: Dataset({ features: ['text_new', 'label_new'], num_rows: 8530 }) validation: Dataset({ features: ['text_new', 'label_new'], num_rows: 1066 }) test: Dataset({ features: ['text_new', 'label_new'], num_rows: 1066 }) }) ``` """ self._check_values_type() return DatasetDict({k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()}) def select_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict": """Select one or several column(s) from each split in the dataset and the features associated to the column(s).
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
The transformation is applied to all the splits of the dataset dictionary. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to keep. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes") >>> ds.select_columns("text") DatasetDict({ train: Dataset({ features: ['text'], num_rows: 8530 }) validation: Dataset({ features: ['text'], num_rows: 1066 }) test: Dataset({ features: ['text'], num_rows: 1066 }) }) ``` """ self._check_values_type() return DatasetDict({k: dataset.select_columns(column_names=column_names) for k, dataset in self.items()})
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
def class_encode_column(self, column: str, include_nulls: bool = False) -> "DatasetDict": """Casts the given column as [`~datasets.features.ClassLabel`] and updates the tables. Args: column (`str`): The name of the column to cast. include_nulls (`bool`, defaults to `False`): Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label. <Added version="1.14.2"/> Example:
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("boolq") >>> ds["train"].features {'answer': Value(dtype='bool', id=None), 'passage': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None)} >>> ds = ds.class_encode_column("answer") >>> ds["train"].features {'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None), 'passage': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None)} ``` """ self._check_values_type() return DatasetDict( {k: dataset.class_encode_column(column=column, include_nulls=include_nulls) for k, dataset in self.items()} )
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
@contextlib.contextmanager def formatted_as( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """To be used in a `with` statement. Set `__getitem__` return format (type and columns). The transformation is applied to all the datasets of the dataset dictionary.
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
Args: type (`str`, *optional*): Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. `None` means `__getitem__` returns python objects (default). columns (`List[str]`, *optional*): Columns to format in the output. `None` means `__getitem__` returns all columns (default). output_all_columns (`bool`, defaults to False): Keep un-formatted columns as well in the output (as python objects). **format_kwargs (additional keyword arguments): Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. """ self._check_values_type() old_format_type = {k: dataset._format_type for k, dataset in self.items()} old_format_kwargs = {k: dataset._format_kwargs for k, dataset in self.items()}
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
old_format_columns = {k: dataset._format_columns for k, dataset in self.items()} old_output_all_columns = {k: dataset._output_all_columns for k, dataset in self.items()} try: self.set_format(type, columns, output_all_columns, **format_kwargs) yield finally: for k, dataset in self.items(): dataset.set_format( old_format_type[k], old_format_columns[k], old_output_all_columns[k], **old_format_kwargs[k] )
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
def set_format( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """Set `__getitem__` return format (type and columns). The format is set for every dataset in the dataset dictionary.
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
Args: type (`str`, *optional*): Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. `None` means `__getitem__` returns python objects (default). columns (`List[str]`, *optional*): Columns to format in the output. `None` means `__getitem__` returns all columns (default). output_all_columns (`bool`, defaults to False): Keep un-formatted columns as well in the output (as python objects), **format_kwargs (additional keyword arguments): Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. It is possible to call `map` after calling `set_format`. Since `map` may add new columns, then the list of formatted columns gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted:
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
`new formatted columns = (all columns - previously unformatted columns)` Example: ```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True) >>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label']) >>> ds["train"].format {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'], 'format_kwargs': {}, 'output_all_columns': False, 'type': 'numpy'} ``` """ self._check_values_type() for dataset in self.values(): dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
def reset_format(self): """Reset `__getitem__` return format to python objects and all columns. The transformation is applied to all the datasets of the dataset dictionary. Same as `self.set_format()` Example:
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True) >>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label']) >>> ds["train"].format {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'], 'format_kwargs': {}, 'output_all_columns': False, 'type': 'numpy'} >>> ds.reset_format() >>> ds["train"].format {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'], 'format_kwargs': {}, 'output_all_columns': False, 'type': None} ``` """ self._check_values_type() for dataset in self.values(): dataset.set_format()
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
def set_transform( self, transform: Optional[Callable], columns: Optional[List] = None, output_all_columns: bool = False, ): """Set ``__getitem__`` return format using this transform. The transform is applied on-the-fly on batches when ``__getitem__`` is called. The transform is set for every dataset in the dataset dictionary As :func:`datasets.Dataset.set_format`, this can be reset using :func:`datasets.Dataset.reset_format`
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
Args: transform (`Callable`, optional): user-defined formatting transform, replaces the format defined by :func:`datasets.Dataset.set_format` A formatting function is a callable that takes a batch (as a dict) as input and returns a batch. This function is applied right before returning the objects in ``__getitem__``. columns (`List[str]`, optional): columns to format in the output If specified, then the input batch of the transform only contains those columns. output_all_columns (`bool`, default to False): keep un-formatted columns as well in the output (as python objects) If set to True, then the other un-formatted columns are kept with the output of the transform. """ self._check_values_type() for dataset in self.values(): dataset.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform)
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py
def with_format( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ) -> "DatasetDict": """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly. The format `type` (for example "numpy") is used to format batches when using `__getitem__`. The format is set for every dataset in the dataset dictionary. It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`]. Contrary to [`~datasets.DatasetDict.set_format`], `with_format` returns a new [`DatasetDict`] object with new [`Dataset`] objects.
34
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py