text
stringlengths
1
1.02k
class_index
int64
0
271
source
stringclasses
76 values
class DatasetInfoMixin: """This base class exposes some attributes of DatasetInfo at the base level of the Dataset for easy access. """ def __init__(self, info: DatasetInfo, split: Optional[NamedSplit]): self._info = info self._split = split @property def info(self): """[`~datasets.DatasetInfo`] object containing all the metadata in the dataset.""" return self._info @property def split(self): """[`~datasets.NamedSplit`] object corresponding to a named dataset split.""" return self._split @property def builder_name(self) -> str: return self._info.builder_name @property def citation(self) -> str: return self._info.citation @property def config_name(self) -> str: return self._info.config_name @property def dataset_size(self) -> Optional[int]: return self._info.dataset_size @property def description(self) -> str: return self._info.description
0
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
@property def download_checksums(self) -> Optional[dict]: return self._info.download_checksums @property def download_size(self) -> Optional[int]: return self._info.download_size @property def features(self) -> Optional[Features]: return self._info.features.copy() if self._info.features is not None else None @property def homepage(self) -> Optional[str]: return self._info.homepage @property def license(self) -> Optional[str]: return self._info.license @property def size_in_bytes(self) -> Optional[int]: return self._info.size_in_bytes @property def supervised_keys(self): return self._info.supervised_keys @property def version(self): return self._info.version
0
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
class TensorflowDatasetMixin: _TF_DATASET_REFS = set() @staticmethod def _get_output_signature( dataset: "Dataset", collate_fn: Callable, collate_fn_args: dict, cols_to_retain: Optional[List[str]] = None, batch_size: Optional[int] = None, num_test_batches: int = 20, ): """Private method used by `to_tf_dataset()` to find the shapes and dtypes of samples from this dataset after being passed through the collate_fn. Tensorflow needs an exact signature for tf.numpy_function, so the only way to do this is to run test batches - the collator may add or rename columns, so we can't figure it out just by inspecting the dataset.
1
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: dataset (`Dataset`): Dataset to load samples from. collate_fn(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for validation/evaluation. collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate lists of samples into a batch. collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the `collate_fn`. batch_size (`int`, optional): The size of batches loaded from the dataset. Used for shape inference. Can be None, which indicates that batch sizes can be variable. num_test_batches (`int`): The number of batches to load from the dataset for shape inference.
1
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Returns: `dict`: Dict mapping column names to tf.Tensorspec objects `dict`: Dict mapping column names to np.dtype objects """ if config.TF_AVAILABLE: import tensorflow as tf else: raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.") if len(dataset) == 0: raise ValueError("Unable to get the output signature because the dataset is empty.") if batch_size is not None: batch_size = min(len(dataset), batch_size) test_batch_size = 1 if cols_to_retain is not None: cols_to_retain = list(set(cols_to_retain + ["label_ids", "label", "labels"]))
1
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
test_batches = [] for _ in range(num_test_batches): indices = sample(range(len(dataset)), test_batch_size) test_batch = dataset[indices] if cols_to_retain is not None: test_batch = {key: value for key, value in test_batch.items() if key in cols_to_retain} test_batch = [{key: value[i] for key, value in test_batch.items()} for i in range(test_batch_size)] test_batch = collate_fn(test_batch, **collate_fn_args) test_batches.append(test_batch)
1
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
tf_columns_to_signatures = {} np_columns_to_dtypes = {} for column in test_batches[0].keys(): raw_arrays = [batch[column] for batch in test_batches] # In case the collate_fn returns something strange np_arrays = [] for array in raw_arrays: if isinstance(array, np.ndarray): np_arrays.append(array) elif isinstance(array, tf.Tensor): np_arrays.append(array.numpy()) else: np_arrays.append(np.array(array))
1
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
if np.issubdtype(np_arrays[0].dtype, np.integer) or np_arrays[0].dtype == bool: tf_dtype = tf.int64 np_dtype = np.int64 elif np.issubdtype(np_arrays[0].dtype, np.number): tf_dtype = tf.float32 np_dtype = np.float32 elif np_arrays[0].dtype.kind == "U": # Unicode strings np_dtype = np.str_ tf_dtype = tf.string else: raise RuntimeError( f"Unrecognized array dtype {np_arrays[0].dtype}. \n" "Nested types and image/audio types are not supported yet." ) shapes = [array.shape for array in np_arrays] static_shape = [] for dim in range(len(shapes[0])): sizes = {shape[dim] for shape in shapes} if dim == 0: static_shape.append(batch_size) continue
1
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
if len(sizes) == 1: # This dimension looks constant static_shape.append(sizes.pop()) else: # Use None for variable dimensions static_shape.append(None) tf_columns_to_signatures[column] = tf.TensorSpec(shape=static_shape, dtype=tf_dtype) np_columns_to_dtypes[column] = np_dtype
1
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
return tf_columns_to_signatures, np_columns_to_dtypes def to_tf_dataset( self, batch_size: Optional[int] = None, columns: Optional[Union[str, List[str]]] = None, shuffle: bool = False, collate_fn: Optional[Callable] = None, drop_remainder: bool = False, collate_fn_args: Optional[Dict[str, Any]] = None, label_cols: Optional[Union[str, List[str]]] = None, prefetch: bool = True, num_workers: int = 0, num_test_batches: int = 20, ): """Create a `tf.data.Dataset` from the underlying Dataset. This `tf.data.Dataset` will load and collate batches from the Dataset, and is suitable for passing to methods like `model.fit()` or `model.predict()`. The dataset will yield `dicts` for both inputs and labels unless the `dict` would contain only a single key, in which case a raw `tf.Tensor` is yielded instead.
1
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: batch_size (`int`, *optional*): Size of batches to load from the dataset. Defaults to `None`, which implies that the dataset won't be batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`. columns (`List[str]` or `str`, *optional*): Dataset column(s) to load in the `tf.data.Dataset`. Column names that are created by the `collate_fn` and that do not exist in the original dataset can be used. shuffle(`bool`, defaults to `False`): Shuffle the dataset order when loading. Recommended `True` for training, `False` for validation/evaluation. drop_remainder(`bool`, defaults to `False`): Drop the last incomplete batch when loading. Ensures that all batches yielded by the dataset will have the same length on the batch dimension. collate_fn(`Callable`, *optional*):
1
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
A function or callable object (such as a `DataCollator`) that will collate lists of samples into a batch. collate_fn_args (`Dict`, *optional*): An optional `dict` of keyword arguments to be passed to the `collate_fn`. label_cols (`List[str]` or `str`, defaults to `None`): Dataset column(s) to load as labels. Note that many models compute loss internally rather than letting Keras do it, in which case passing the labels here is optional, as long as they're in the input `columns`. prefetch (`bool`, defaults to `True`): Whether to run the dataloader in a separate thread and maintain a small buffer of batches for training. Improves performance by allowing data to be loaded in the background while the model is training. num_workers (`int`, defaults to `0`):
1
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Number of workers to use for loading the dataset. num_test_batches (`int`, defaults to `20`): Number of batches to use to infer the output signature of the dataset. The higher this number, the more accurate the signature will be, but the longer it will take to create the dataset.
1
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Returns: `tf.data.Dataset` Example: ```py >>> ds_train = ds["train"].to_tf_dataset( ... columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) ``` """ if config.TF_AVAILABLE: import tensorflow as tf else: raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
1
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
if (isinstance(columns, list) and len(columns) == 1) or ( isinstance(label_cols, list) and len(label_cols) == 1 ): warnings.warn( "The output of `to_tf_dataset` will change when a passing single element list for `labels` or " "`columns` in the next datasets version. To return a tuple structure rather than dict, pass a " "single string.\n" "Old behaviour: columns=['a'], labels=['labels'] -> (tf.Tensor, tf.Tensor) \n" " : columns='a', labels='labels' -> (tf.Tensor, tf.Tensor) \n" "New behaviour: columns=['a'],labels=['labels'] -> ({'a': tf.Tensor}, {'labels': tf.Tensor}) \n" " : columns='a', labels='labels' -> (tf.Tensor, tf.Tensor) ", FutureWarning, )
1
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
if isinstance(tf.distribute.get_strategy(), tf.distribute.TPUStrategy): logger.warning( "Note that to_tf_dataset() loads the data with a generator rather than a full tf.data " "pipeline and is not compatible with remote TPU connections. If you encounter errors, please " "try using a TPU VM or, if your data can fit in memory, loading it into memory as a dict of " "Tensors instead of streaming with to_tf_dataset()." )
1
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
if collate_fn is None: # Set a very simple default collator that just stacks things together collate_fn = minimal_tf_collate_fn if collate_fn_args is None: collate_fn_args = {} if label_cols and not columns: raise ValueError("Cannot specify label_cols without specifying columns!") if label_cols is None: label_cols = [] elif isinstance(label_cols, str): label_cols = [label_cols] if len(set(label_cols)) < len(label_cols): raise ValueError("List of label_cols contains duplicates.") if columns: if isinstance(columns, str): columns = [columns] if len(set(columns)) < len(columns): raise ValueError("List of columns contains duplicates.") cols_to_retain = list(set(columns + label_cols)) else: cols_to_retain = None # Indicates keeping all valid columns columns = []
1
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
if self.format["type"] not in ["custom", "numpy"]: dataset = self.with_format("numpy") else: dataset = self # TODO(Matt, QL): deprecate the retention of label_ids and label output_signature, columns_to_np_types = dataset._get_output_signature( dataset, collate_fn=collate_fn, collate_fn_args=collate_fn_args, cols_to_retain=cols_to_retain, batch_size=batch_size if drop_remainder else None, num_test_batches=num_test_batches, ) if "labels" in output_signature: if ("label_ids" in columns or "label" in columns) and "labels" not in columns: columns = [col for col in columns if col not in ["label_ids", "label"]] + ["labels"] if ("label_ids" in label_cols or "label" in label_cols) and "labels" not in label_cols: label_cols = [col for col in label_cols if col not in ["label_ids", "label"]] + ["labels"]
1
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
for col in columns: if col not in output_signature: raise ValueError(f"Column {col} not found in dataset!") for col in label_cols: if col not in output_signature: raise ValueError(f"Label column {col} not found in dataset!")
1
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
if num_workers == 0: tf_dataset = dataset_to_tf( dataset=dataset, cols_to_retain=cols_to_retain, collate_fn=collate_fn, collate_fn_args=collate_fn_args, columns_to_np_types=columns_to_np_types, output_signature=output_signature, shuffle=shuffle, batch_size=batch_size, drop_remainder=drop_remainder, ) elif num_workers > 0: if batch_size is None: raise NotImplementedError( "`batch_size` must be specified when using multiple workers, as unbatched multiprocessing " "is not supported yet. Please provide a `batch_size` if `num_workers` is greater than 0." ) tf_dataset = multiprocess_dataset_to_tf( dataset=dataset, cols_to_retain=cols_to_retain, collate_fn=collate_fn,
1
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
collate_fn_args=collate_fn_args, columns_to_np_types=columns_to_np_types, output_signature=output_signature, shuffle=shuffle, batch_size=batch_size, drop_remainder=drop_remainder, num_workers=num_workers, ) else: raise ValueError("num_workers must be >= 0")
1
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def split_features_and_labels(input_batch): # TODO(Matt, QL): deprecate returning the dict content when there's only one key features = {key: tensor for key, tensor in input_batch.items() if key in columns} labels = {key: tensor for key, tensor in input_batch.items() if key in label_cols} if len(features) == 1: features = list(features.values())[0] if len(labels) == 1: labels = list(labels.values())[0] if isinstance(labels, dict) and len(labels) == 0: return features else: return features, labels if cols_to_retain is not None: tf_dataset = tf_dataset.map(split_features_and_labels) if prefetch: tf_dataset = tf_dataset.prefetch(tf.data.experimental.AUTOTUNE)
1
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
# Remove a reference to the open Arrow file on delete def cleanup_callback(ref): dataset.__del__() self._TF_DATASET_REFS.remove(ref) self._TF_DATASET_REFS.add(weakref.ref(tf_dataset, cleanup_callback)) return tf_dataset
1
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
class DatasetTransformationNotAllowedError(Exception): pass
2
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
class NonExistentDatasetError(Exception): """Used when we expect the existence of a dataset""" pass
3
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
class Dataset(DatasetInfoMixin, IndexableMixin, TensorflowDatasetMixin): """A Dataset backed by an Arrow table.""" def __init__( self, arrow_table: Table, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, indices_table: Optional[Table] = None, fingerprint: Optional[str] = None, ): info = info.copy() if info is not None else DatasetInfo() DatasetInfoMixin.__init__(self, info=info, split=split) IndexableMixin.__init__(self) self._data: Table = _check_table(arrow_table) self._indices: Optional[Table] = _check_table(indices_table) if indices_table is not None else None maybe_register_dataset_for_temp_dir_deletion(self) self._format_type: Optional[str] = None self._format_kwargs: dict = {} self._format_columns: Optional[list] = None self._output_all_columns: bool = False self._fingerprint: str = fingerprint # Read metadata
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
if self._data.schema.metadata is not None and b"huggingface" in self._data.schema.metadata: metadata = json.loads(self._data.schema.metadata[b"huggingface"].decode()) if ( "fingerprint" in metadata and self._fingerprint is None ): # try to load fingerprint from the arrow file metadata self._fingerprint = metadata["fingerprint"] # Infer features if None inferred_features = Features.from_arrow_schema(arrow_table.schema) if self.info.features is None: self.info.features = inferred_features else: # make sure the nested columns are in the right order try: self.info.features = self.info.features.reorder_fields_as(inferred_features) except ValueError as e: raise ValueError( f"{e}\nThe 'source' features come from dataset_info.json, and the 'target' ones are those of the dataset arrow file." )
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
# In case there are types like pa.dictionary that we need to convert to the underlying type if self.data.schema != self.info.features.arrow_schema: self._data = self.data.cast(self.info.features.arrow_schema) # Infer fingerprint if None if self._fingerprint is None: self._fingerprint = generate_fingerprint(self) # Sanity checks if self._info.features is None: raise ValueError("Features can't be None in a Dataset object") if self._fingerprint is None: raise ValueError("Fingerprint can't be None in a Dataset object") if self.info.features.type != inferred_features.type: raise ValueError( f"External features info don't match the dataset:\nGot\n{self.info.features}\nwith type\n{self.info.features.type}\n\nbut expected something like\n{inferred_features}\nwith type\n{inferred_features.type}" )
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
if self._indices is not None: if not pa.types.is_unsigned_integer(self._indices.column(0).type): raise ValueError( f"indices must be an Arrow table of unsigned integers, current type is {self._indices.column(0).type}" ) _check_column_names(self._data.column_names) self._data = update_metadata_with_features(self._data, self._info.features) @property def features(self) -> Features: features = super().features if features is None: # this is already checked in __init__ raise ValueError("Features can't be None in a Dataset object") return features @classmethod def from_file( cls, filename: str, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, indices_filename: Optional[str] = None, in_memory: bool = False, ) -> "Dataset": """Instantiate a Dataset backed by an Arrow table at filename.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: filename (`str`): File name of the dataset. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split. indices_filename (`str`, *optional*): File names of the indices. in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. Returns: [`Dataset`] """ table = ArrowReader.read_table(filename, in_memory=in_memory) if indices_filename is not None: indices_pa_table = ArrowReader.read_table(indices_filename, in_memory=in_memory) else: indices_pa_table = None return cls( arrow_table=table, info=info, split=split, indices_table=indices_pa_table, )
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
@classmethod def from_buffer( cls, buffer: pa.Buffer, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, indices_buffer: Optional[pa.Buffer] = None, ) -> "Dataset": """Instantiate a Dataset backed by an Arrow buffer. Args: buffer (`pyarrow.Buffer`): Arrow buffer. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split. indices_buffer (`pyarrow.Buffer`, *optional*): Indices Arrow buffer. Returns: [`Dataset`] """ table = InMemoryTable.from_buffer(buffer) if indices_buffer is not None: indices_table = InMemoryTable.from_buffer(buffer) else: indices_table = None return cls(table, info=info, split=split, indices_table=indices_table)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
@classmethod def from_pandas( cls, df: pd.DataFrame, features: Optional[Features] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, preserve_index: Optional[bool] = None, ) -> "Dataset": """ Convert `pandas.DataFrame` to a `pyarrow.Table` to create a [`Dataset`]. The column types in the resulting Arrow Table are inferred from the dtypes of the `pandas.Series` in the DataFrame. In the case of non-object Series, the NumPy dtype is translated to its Arrow equivalent. In the case of `object`, we need to guess the datatype by looking at the Python objects in this Series.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Be aware that Series of the `object` dtype don't carry enough information to always lead to a meaningful Arrow type. In the case that we cannot infer a type, e.g. because the DataFrame is of length 0 or the Series only contains `None/nan` objects, the type is set to `null`. This behavior can be avoided by constructing explicit features and passing it to this function. Important: a dataset created with from_pandas() lives in memory and therefore doesn't have an associated cache directory. This may change in the feature, but in the meantime if you want to reduce memory usage you should write it back on disk and reload using e.g. save_to_disk / load_from_disk.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: df (`pandas.DataFrame`): Dataframe that contains the dataset. features ([`Features`], *optional*): Dataset features. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split. preserve_index (`bool`, *optional*): Whether to store the index as an additional column in the resulting Dataset. The default of `None` will store the index as a column, except for `RangeIndex` which is stored as metadata only. Use `preserve_index=True` to force it to be stored as a column. Returns: [`Dataset`] Example:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> ds = Dataset.from_pandas(df) ``` """ if info is not None and features is not None and info.features != features: raise ValueError( f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}" ) features = features if features is not None else info.features if info is not None else None if info is None: info = DatasetInfo() info.features = features table = InMemoryTable.from_pandas( df=df, preserve_index=preserve_index, ) if features is not None: # more expensive cast than InMemoryTable.from_pandas(..., schema=features.arrow_schema) # needed to support the str to Audio conversion for instance table = table.cast(features.arrow_schema) return cls(table, info=info, split=split)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
@classmethod def from_polars( cls, df: "pl.DataFrame", features: Optional[Features] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, ) -> "Dataset": """ Collect the underlying arrow arrays in an Arrow Table. This operation is mostly zero copy. Data types that do copy: * CategoricalType Args: df (`polars.DataFrame`): DataFrame to convert to Arrow Table features (`Features`, optional): Dataset features. info (`DatasetInfo`, optional): Dataset information, like description, citation, etc. split (`NamedSplit`, optional): Name of the dataset split.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Examples: ```py >>> ds = Dataset.from_polars(df) ``` """ if info is not None and features is not None and info.features != features: raise ValueError( f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}" ) features = features if features is not None else info.features if info is not None else None if info is None: info = DatasetInfo() info.features = features table = InMemoryTable(df.to_arrow()) if features is not None: # more expensive cast than InMemoryTable.from_polars(..., schema=features.arrow_schema) # needed to support the str to Audio conversion for instance table = table.cast(features.arrow_schema) return cls(table, info=info, split=split)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
@classmethod def from_dict( cls, mapping: dict, features: Optional[Features] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, ) -> "Dataset": """ Convert `dict` to a `pyarrow.Table` to create a [`Dataset`]. Important: a dataset created with from_dict() lives in memory and therefore doesn't have an associated cache directory. This may change in the feature, but in the meantime if you want to reduce memory usage you should write it back on disk and reload using e.g. save_to_disk / load_from_disk.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: mapping (`Mapping`): Mapping of strings to Arrays or Python lists. features ([`Features`], *optional*): Dataset features. info (`DatasetInfo`, *optional*): Dataset information, like description, citation, etc. split (`NamedSplit`, *optional*): Name of the dataset split.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Returns: [`Dataset`] """ if info is not None and features is not None and info.features != features: raise ValueError( f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}" ) features = features if features is not None else info.features if info is not None else None arrow_typed_mapping = {} for col, data in mapping.items(): if isinstance(data, (pa.Array, pa.ChunkedArray)): data = cast_array_to_feature(data, features[col]) if features is not None else data else: data = OptimizedTypedSequence( features.encode_column(data, col) if features is not None else data, type=features[col] if features is not None else None, col=col, ) arrow_typed_mapping[col] = data mapping = arrow_typed_mapping
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
pa_table = InMemoryTable.from_pydict(mapping=mapping) if info is None: info = DatasetInfo() info.features = features if info.features is None: info.features = Features( { col: generate_from_arrow_type(data.type) if isinstance(data, (pa.Array, pa.ChunkedArray)) else data.get_inferred_type() for col, data in mapping.items() } ) return cls(pa_table, info=info, split=split)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
@classmethod def from_list( cls, mapping: List[dict], features: Optional[Features] = None, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, ) -> "Dataset": """ Convert a list of dicts to a `pyarrow.Table` to create a [`Dataset`]`. Note that the keys of the first entry will be used to determine the dataset columns, regardless of what is passed to features. Important: a dataset created with from_list() lives in memory and therefore doesn't have an associated cache directory. This may change in the feature, but in the meantime if you want to reduce memory usage you should write it back on disk and reload using e.g. save_to_disk / load_from_disk.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: mapping (`List[dict]`): A list of mappings of strings to row values. features (`Features`, optional): Dataset features. info (`DatasetInfo`, optional): Dataset information, like description, citation, etc. split (`NamedSplit`, optional): Name of the dataset split. Returns: [`Dataset`] """ # for simplicity and consistency wrt OptimizedTypedSequence we do not use InMemoryTable.from_pylist here mapping = {k: [r.get(k) for r in mapping] for k in mapping[0]} if mapping else {} return cls.from_dict(mapping, features, info, split) @staticmethod def from_csv( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, num_proc: Optional[int] = None, **kwargs, ): """Create Dataset from CSV file(s).
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: path_or_paths (`path-like` or list of `path-like`): Path(s) of the CSV file(s). split ([`NamedSplit`], *optional*): Split name to be assigned to the dataset. features ([`Features`], *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. <Added version="2.8.0"/> **kwargs (additional keyword arguments): Keyword arguments to be passed to [`pandas.read_csv`].
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Returns: [`Dataset`] Example: ```py >>> ds = Dataset.from_csv('path/to/dataset.csv') ``` """ # Dynamic import to avoid circular dependency from .io.csv import CsvDatasetReader return CsvDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, num_proc=num_proc, **kwargs, ).read() @staticmethod def from_generator( generator: Callable, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, gen_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, split: NamedSplit = Split.TRAIN, **kwargs, ): """Create a Dataset from a generator.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: generator (:`Callable`): A generator function that `yields` examples. features ([`Features`], *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. gen_kwargs(`dict`, *optional*): Keyword arguments to be passed to the `generator` callable. You can define a sharded dataset by passing the list of shards in `gen_kwargs` and setting `num_proc` greater than 1. num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
If `num_proc` is greater than one, then all list values in `gen_kwargs` must be the same length. These values will be split between calls to the generator. The number of shards will be the minimum of the shortest list in `gen_kwargs` and `num_proc`.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
<Added version="2.7.0"/> split ([`NamedSplit`], defaults to `Split.TRAIN`): Split name to be assigned to the dataset. <Added version="2.21.0"/> **kwargs (additional keyword arguments): Keyword arguments to be passed to :[`GeneratorConfig`]. Returns: [`Dataset`] Example: ```py >>> def gen(): ... yield {"text": "Good", "label": 0} ... yield {"text": "Bad", "label": 1} ... >>> ds = Dataset.from_generator(gen) ```
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> def gen(shards): ... for shard in shards: ... with open(shard) as f: ... for line in f: ... yield {"line": line} ... >>> shards = [f"data{i}.txt" for i in range(32)] >>> ds = Dataset.from_generator(gen, gen_kwargs={"shards": shards}) ``` """ from .io.generator import GeneratorDatasetInputStream return GeneratorDatasetInputStream( generator=generator, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, gen_kwargs=gen_kwargs, num_proc=num_proc, split=split, **kwargs, ).read()
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
@staticmethod def from_json( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, field: Optional[str] = None, num_proc: Optional[int] = None, **kwargs, ): """Create Dataset from JSON or JSON Lines file(s).
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: path_or_paths (`path-like` or list of `path-like`): Path(s) of the JSON or JSON Lines file(s). split ([`NamedSplit`], *optional*): Split name to be assigned to the dataset. features ([`Features`], *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. field (`str`, *optional*): Field name of the JSON file where the dataset is contained in. num_proc (`int`, *optional* defaults to `None`): Number of processes when downloading and generating the dataset locally. This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
<Added version="2.8.0"/> **kwargs (additional keyword arguments): Keyword arguments to be passed to [`JsonConfig`]. Returns: [`Dataset`] Example: ```py >>> ds = Dataset.from_json('path/to/dataset.json') ``` """ # Dynamic import to avoid circular dependency from .io.json import JsonDatasetReader return JsonDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, field=field, num_proc=num_proc, **kwargs, ).read()
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
@staticmethod def from_parquet( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, columns: Optional[List[str]] = None, num_proc: Optional[int] = None, **kwargs, ): """Create Dataset from Parquet file(s).
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: path_or_paths (`path-like` or list of `path-like`): Path(s) of the Parquet file(s). split (`NamedSplit`, *optional*): Split name to be assigned to the dataset. features (`Features`, *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. columns (`List[str]`, *optional*): If not `None`, only these columns will be read from the file. A column name may be a prefix of a nested field, e.g. 'a' will select 'a.b', 'a.c', and 'a.d.e'. num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
<Added version="2.8.0"/> **kwargs (additional keyword arguments): Keyword arguments to be passed to [`ParquetConfig`]. Returns: [`Dataset`] Example: ```py >>> ds = Dataset.from_parquet('path/to/dataset.parquet') ``` """ # Dynamic import to avoid circular dependency from .io.parquet import ParquetDatasetReader return ParquetDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, columns=columns, num_proc=num_proc, **kwargs, ).read()
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
@staticmethod def from_text( path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, num_proc: Optional[int] = None, **kwargs, ): """Create Dataset from text file(s).
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: path_or_paths (`path-like` or list of `path-like`): Path(s) of the text file(s). split (`NamedSplit`, *optional*): Split name to be assigned to the dataset. features (`Features`, *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. num_proc (`int`, *optional*, defaults to `None`): Number of processes when downloading and generating the dataset locally. This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default. <Added version="2.8.0"/> **kwargs (additional keyword arguments): Keyword arguments to be passed to [`TextConfig`]. Returns: [`Dataset`]
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Example: ```py >>> ds = Dataset.from_text('path/to/dataset.txt') ``` """ # Dynamic import to avoid circular dependency from .io.text import TextDatasetReader return TextDatasetReader( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, num_proc=num_proc, **kwargs, ).read() @staticmethod def from_spark( df: "pyspark.sql.DataFrame", split: Optional[NamedSplit] = None, features: Optional[Features] = None, keep_in_memory: bool = False, cache_dir: str = None, working_dir: str = None, load_from_cache_file: bool = True, **kwargs, ): """Create a Dataset from Spark DataFrame. Dataset downloading is distributed over Spark workers.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: df (`pyspark.sql.DataFrame`): The DataFrame containing the desired data. split (`NamedSplit`, *optional*): Split name to be assigned to the dataset. features (`Features`, *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. When using a multi-node Spark cluster, the cache_dir must be accessible to both workers and the driver. keep_in_memory (`bool`): Whether to copy the data in-memory. working_dir (`str`, *optional*) Intermediate directory for each Spark worker to write data to before moving it to `cache_dir`. Setting a non-NFS intermediate directory may improve performance. load_from_cache_file (`bool`): Whether to load the dataset from the cache if possible.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Returns: [`Dataset`] Example: ```py >>> df = spark.createDataFrame( >>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]], >>> columns=["id", "name"], >>> ) >>> ds = Dataset.from_spark(df) ``` """ # Dynamic import to avoid circular dependency from .io.spark import SparkDatasetReader if sys.platform == "win32": raise EnvironmentError("Dataset.from_spark is not currently supported on Windows") return SparkDatasetReader( df, split=split, features=features, streaming=False, cache_dir=cache_dir, keep_in_memory=keep_in_memory, working_dir=working_dir, load_from_cache_file=load_from_cache_file, **kwargs, ).read()
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
@staticmethod def from_sql( sql: Union[str, "sqlalchemy.sql.Selectable"], con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"], features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, **kwargs, ): """Create Dataset from SQL query or database table.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: sql (`str` or `sqlalchemy.sql.Selectable`): SQL query to be executed or a table name. con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`): A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) used to instantiate a database connection or a SQLite3/SQLAlchemy connection object. features ([`Features`], *optional*): Dataset features. cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`): Directory to cache data. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. **kwargs (additional keyword arguments): Keyword arguments to be passed to [`SqlConfig`]. Returns: [`Dataset`] Example:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> # Fetch a database table >>> ds = Dataset.from_sql("test_data", "postgres:///db_name") >>> # Execute a SQL query on the table >>> ds = Dataset.from_sql("SELECT sentence FROM test_data", "postgres:///db_name") >>> # Use a Selectable object to specify the query >>> from sqlalchemy import select, text >>> stmt = select([text("sentence")]).select_from(text("test_data")) >>> ds = Dataset.from_sql(stmt, "postgres:///db_name") ``` <Tip> The returned dataset can only be cached if `con` is specified as URI string. </Tip> """ from .io.sql import SqlDatasetReader return SqlDatasetReader( sql, con, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs, ).read()
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def __setstate__(self, state): self.__dict__.update(state) maybe_register_dataset_for_temp_dir_deletion(self) return self def __del__(self): if hasattr(self, "_data"): del self._data if hasattr(self, "_indices"): del self._indices def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): # Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables self.__del__() def save_to_disk( self, dataset_path: PathLike, max_shard_size: Optional[Union[str, int]] = None, num_shards: Optional[int] = None, num_proc: Optional[int] = None, storage_options: Optional[dict] = None, ): """ Saves a dataset to a dataset directory, or in a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`. For [`Image`], [`Audio`] and [`Video`] data:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
All the Image(), Audio() and Video() data are stored in the arrow files. If you want to store paths or urls, please use the Value("string") type. Args: dataset_path (`path-like`): Path (e.g. `dataset/train`) or remote URI (e.g. `s3://my-bucket/dataset/train`) of the dataset directory where the dataset will be saved to. max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`): The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit (like `"50MB"`). num_shards (`int`, *optional*): Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
<Added version="2.8.0"/> num_proc (`int`, *optional*): Number of processes when downloading and generating the dataset locally. Multiprocessing is disabled by default. <Added version="2.8.0"/> storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.8.0"/> Example:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> ds.save_to_disk("path/to/dataset/directory") >>> ds.save_to_disk("path/to/dataset/directory", max_shard_size="1GB") >>> ds.save_to_disk("path/to/dataset/directory", num_shards=1024) ``` """ if max_shard_size is not None and num_shards is not None: raise ValueError( "Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both." ) if self.list_indexes(): raise ValueError("please remove all the indexes using `dataset.drop_index` before saving a dataset") if num_shards is None: dataset_nbytes = self._estimate_nbytes() max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE) num_shards = int(dataset_nbytes / max_shard_size) + 1 num_shards = max(num_shards, num_proc or 1)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
num_proc = num_proc if num_proc is not None else 1 num_shards = num_shards if num_shards is not None else num_proc fs: fsspec.AbstractFileSystem fs, _ = url_to_fs(dataset_path, **(storage_options or {})) if not is_remote_filesystem(fs): parent_cache_files_paths = { Path(cache_filename["filename"]).resolve().parent for cache_filename in self.cache_files } # Check that the dataset doesn't overwrite iself. It can cause a permission error on Windows and a segfault on linux. if Path(dataset_path).expanduser().resolve() in parent_cache_files_paths: raise PermissionError( f"Tried to overwrite {Path(dataset_path).expanduser().resolve()} but a dataset can't overwrite itself." ) fs.makedirs(dataset_path, exist_ok=True)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
# Get json serializable state state = { key: self.__dict__[key] for key in [ "_fingerprint", "_format_columns", "_format_kwargs", "_format_type", "_output_all_columns", ] } state["_split"] = str(self.split) if self.split is not None else self.split state["_data_files"] = [ {"filename": f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"} for shard_idx in range(num_shards) ] for k in state["_format_kwargs"].keys(): try: json.dumps(state["_format_kwargs"][k]) except TypeError as e: raise TypeError( str(e) + f"\nThe format kwargs must be JSON serializable, but key '{k}' isn't." ) from None # Get json serializable dataset info dataset_info = asdict(self._info)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
shards_done = 0 pbar = hf_tqdm( unit=" examples", total=len(self), desc=f"Saving the dataset ({shards_done}/{num_shards} shards)", ) kwargs_per_job = ( { "job_id": shard_idx, "shard": self.shard(num_shards=num_shards, index=shard_idx, contiguous=True), "fpath": posixpath.join(dataset_path, f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"), "storage_options": storage_options, } for shard_idx in range(num_shards) ) shard_lengths = [None] * num_shards shard_sizes = [None] * num_shards if num_proc > 1: with Pool(num_proc) as pool: with pbar: for job_id, done, content in iflatmap_unordered( pool, Dataset._save_to_disk_single, kwargs_iterable=kwargs_per_job ): if done:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
shards_done += 1 pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)") logger.debug(f"Finished writing shard number {job_id} of {num_shards}.") shard_lengths[job_id], shard_sizes[job_id] = content else: pbar.update(content) else: with pbar: for kwargs in kwargs_per_job: for job_id, done, content in Dataset._save_to_disk_single(**kwargs): if done: shards_done += 1 pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)") logger.debug(f"Finished writing shard number {job_id} of {num_shards}.") shard_lengths[job_id], shard_sizes[job_id] = content else:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
pbar.update(content) with fs.open( posixpath.join(dataset_path, config.DATASET_STATE_JSON_FILENAME), "w", encoding="utf-8" ) as state_file: json.dump(state, state_file, indent=2, sort_keys=True) with fs.open( posixpath.join(dataset_path, config.DATASET_INFO_FILENAME), "w", encoding="utf-8" ) as dataset_info_file: # Sort only the first level of keys, or we might shuffle fields of nested features if we use sort_keys=True sorted_keys_dataset_info = {key: dataset_info[key] for key in sorted(dataset_info)} json.dump(sorted_keys_dataset_info, dataset_info_file, indent=2)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
@staticmethod def _save_to_disk_single(job_id: int, shard: "Dataset", fpath: str, storage_options: Optional[dict]): batch_size = config.DEFAULT_MAX_BATCH_SIZE
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
num_examples_progress_update = 0 writer = ArrowWriter( features=shard.features, path=fpath, storage_options=storage_options, embed_local_files=True, ) try: _time = time.time() for pa_table in shard.with_format("arrow").iter(batch_size): writer.write_table(pa_table) num_examples_progress_update += len(pa_table) if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL: _time = time.time() yield job_id, False, num_examples_progress_update num_examples_progress_update = 0 finally: yield job_id, False, num_examples_progress_update num_examples, num_bytes = writer.finalize() writer.close() yield job_id, True, (num_examples, num_bytes)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
@staticmethod def _build_local_temp_path(uri_or_path: str) -> Path: """ Builds and returns a Path concatenating a local temporary dir with the dir path (or absolute/relative path extracted from the uri) passed. Args: uri_or_path (`str`): Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3://my-bucket/dataset/train"`) to concatenate. Returns: :class:`Path`: the concatenated path (temp dir + path) """ src_dataset_path = Path(uri_or_path) tmp_dir = get_temporary_cache_files_directory() return Path(tmp_dir, src_dataset_path.relative_to(src_dataset_path.anchor))
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
@staticmethod def load_from_disk( dataset_path: PathLike, keep_in_memory: Optional[bool] = None, storage_options: Optional[dict] = None, ) -> "Dataset": """ Loads a dataset that was previously saved using [`save_to_disk`] from a dataset directory, or from a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: dataset_path (`path-like`): Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`) of the dataset directory where the dataset will be loaded from. keep_in_memory (`bool`, defaults to `None`): Whether to copy the dataset in-memory. If `None`, the dataset will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the [improve performance](../cache#improve-performance) section. storage_options (`dict`, *optional*): Key/value pairs to be passed on to the file-system backend, if any. <Added version="2.8.0"/>
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Returns: [`Dataset`] or [`DatasetDict`]: - If `dataset_path` is a path of a dataset directory, the dataset requested. - If `dataset_path` is a path of a dataset dict directory, a `datasets.DatasetDict` with each split. Example: ```py >>> ds = load_from_disk("path/to/dataset/directory") ``` """ fs: fsspec.AbstractFileSystem fs, dataset_path = url_to_fs(dataset_path, **(storage_options or {})) dest_dataset_path = dataset_path dataset_dict_json_path = posixpath.join(dest_dataset_path, config.DATASETDICT_JSON_FILENAME) dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME) dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
dataset_dict_is_file = fs.isfile(dataset_dict_json_path) dataset_info_is_file = fs.isfile(dataset_info_path) dataset_state_is_file = fs.isfile(dataset_state_json_path) if not dataset_info_is_file and not dataset_state_is_file: if dataset_dict_is_file: raise FileNotFoundError( f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead." ) raise FileNotFoundError( f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object but provided path is not a `Dataset`." ) if not dataset_info_is_file: if dataset_dict_is_file: raise FileNotFoundError(
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
f"No such file: '{dataset_info_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead." ) raise FileNotFoundError( f"No such file: '{dataset_info_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`." ) if not dataset_state_is_file: if dataset_dict_is_file: raise FileNotFoundError( f"No such file: '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead." ) raise FileNotFoundError( f"No such file: '{dataset_state_json_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`." )
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
# copies file from filesystem if it is remote filesystem to local filesystem and modifies dataset_path to temp directory containing local copies if is_remote_filesystem(fs): src_dataset_path = dest_dataset_path dest_dataset_path = Dataset._build_local_temp_path(src_dataset_path) fs.download(src_dataset_path, dest_dataset_path.as_posix(), recursive=True) dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME) dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME) with open(dataset_state_json_path, encoding="utf-8") as state_file: state = json.load(state_file) with open(dataset_info_path, encoding="utf-8") as dataset_info_file: dataset_info = DatasetInfo.from_dict(json.load(dataset_info_file))
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
dataset_size = estimate_dataset_size( Path(dest_dataset_path, data_file["filename"]) for data_file in state["_data_files"] ) keep_in_memory = keep_in_memory if keep_in_memory is not None else is_small_dataset(dataset_size) table_cls = InMemoryTable if keep_in_memory else MemoryMappedTable arrow_table = concat_tables( thread_map( table_cls.from_file, [posixpath.join(dest_dataset_path, data_file["filename"]) for data_file in state["_data_files"]], tqdm_class=hf_tqdm, desc="Loading dataset from disk", # set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached disable=len(state["_data_files"]) <= 16 or None, ) ) split = state["_split"] split = Split(split) if split is not None else split
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
dataset = Dataset( arrow_table=arrow_table, info=dataset_info, split=split, fingerprint=state["_fingerprint"], ) format = { "type": state["_format_type"], "format_kwargs": state["_format_kwargs"], "columns": state["_format_columns"], "output_all_columns": state["_output_all_columns"], } dataset = dataset.with_format(**format) return dataset @property def data(self) -> Table: """The Apache Arrow table backing the dataset. Example:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.data MemoryMappedTable text: string label: int64 ----
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
text: [["compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .","the soundtrack alone is worth the price of admission .","rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .","beneath the film's obvious determination to shock at any cost lies considerable skill and determination , backed by sheer nerve .","bielinsky is a filmmaker of impressive talent .","so beautifully acted and directed , it's clear that washington most certainly has a new career ahead of him if he so chooses .","a visual spectacle full of stunning images and effects .","a gentle and engrossing character study .","it's enough to watch huppert scheming , with her small , intelligent eyes as steady as any noir villain , and to enjoy the perfectly pitched web of tension that chabrol spins .","an engrossing portrait of uncompromising artists trying to create
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
something original against the backdrop of a corporate music industry that only seems to care about the bottom line .",...,"ultimately , jane learns her place as a girl , softens up and loses some of the intensity that made her an interesting character to begin with .","ah-nuld's action hero days might be over .","it's clear why deuces wild , which was shot two years ago , has been gathering dust on mgm's shelf .","feels like nothing quite so much as a middle-aged moviemaker's attempt to surround himself with beautiful , half-naked women .","when the precise nature of matthew's predicament finally comes into sharp focus , the revelation fails to justify the build-up .","this picture is murder by numbers , and as easy to be bored by as your abc's , despite a few whopping shootouts .","hilarious musical comedy though stymied by accents thick as mud .","if you are into splatter movies , then you will probably have a reasonably good time with the salton sea .","a dull , simple-minded and stereotypical tale of
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
drugs , death and mind-numbing indifference on the inner-city streets .","the feature-length stretch . . . strains the show's concept ."]]
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
label: [[1,1,1,1,1,1,1,1,1,1,...,0,0,0,0,0,0,0,0,0,0]] ``` """ return self._data
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
@property def cache_files(self) -> List[dict]: """The cache files containing the Apache Arrow table backing the dataset. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.cache_files [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}] ``` """ cache_files = list_table_cache_files(self._data) if self._indices is not None: cache_files += list_table_cache_files(self._indices) return [{"filename": cache_filename} for cache_filename in cache_files] @property def num_columns(self) -> int: """Number of columns in the dataset. Example:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.num_columns 2 ``` """ return self._data.num_columns @property def num_rows(self) -> int: """Number of rows in the dataset (same as [`Dataset.__len__`]). Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.num_rows 1066 ``` """ if self._indices is not None: return self._indices.num_rows return self._data.num_rows @property def column_names(self) -> List[str]: """Names of the columns in the dataset. Example:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.column_names ['text', 'label'] ``` """ return self._data.column_names @property def shape(self) -> Tuple[int, int]: """Shape of the dataset (number of columns, number of rows). Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.shape (1066, 2) ``` """ if self._indices is not None: return (self._indices.num_rows, self._data.num_columns) return self._data.shape def unique(self, column: str) -> List: """Return a list of the unique elements in a column. This is implemented in the low-level backend and as such, very fast.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: column (`str`): Column name (list all the column names with [`~datasets.Dataset.column_names`]). Returns: `list`: List of unique elements in the given column. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.unique('label') [1, 0] ``` """ if column not in self._data.column_names: raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).") if self._indices is not None and self._indices.num_rows != self._data.num_rows: dataset = self.flatten_indices() else: dataset = self return dataset._data.column(column).unique().to_pylist() def class_encode_column(self, column: str, include_nulls: bool = False) -> "Dataset": """Casts the given column as [`~datasets.features.ClassLabel`] and updates the table.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: column (`str`): The name of the column to cast (list all the column names with [`~datasets.Dataset.column_names`]) include_nulls (`bool`, defaults to `False`): Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label. <Added version="1.14.2"/> Example:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("boolq", split="validation") >>> ds.features {'answer': Value(dtype='bool', id=None), 'passage': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None)} >>> ds = ds.class_encode_column('answer') >>> ds.features {'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None), 'passage': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None)} ``` """ # Sanity checks if column not in self._data.column_names: raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).") src_feat = self._info.features[column] if not isinstance(src_feat, Value): raise ValueError( f"Class encoding is only supported for {Value.__name__} column, and column {column} is {type(src_feat).__name__}." )
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
if src_feat.dtype != "string" or (include_nulls and None in self.unique(column)): def stringify_column(batch): batch[column] = [ str(sample) if include_nulls or sample is not None else None for sample in batch[column] ] return batch dset = self.map( stringify_column, batched=True, desc="Stringifying the column", ) else: dset = self # Create the new feature class_names = sorted(str(sample) for sample in dset.unique(column) if include_nulls or sample is not None) dst_feat = ClassLabel(names=class_names) def cast_to_class_labels(batch): batch[column] = [ dst_feat.str2int(str(sample)) if include_nulls or sample is not None else None for sample in batch[column] ] return batch
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
new_features = dset.features.copy() new_features[column] = dst_feat dset = dset.map( cast_to_class_labels, batched=True, features=new_features, desc="Casting to class labels", ) return dset @fingerprint_transform(inplace=False) def flatten(self, new_fingerprint: Optional[str] = None, max_depth=16) -> "Dataset": """Flatten the table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`]: A copy of the dataset with flattened columns. Example:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("squad", split="train") >>> ds.features {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} >>> ds.flatten() Dataset({ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'], num_rows: 87599 }) ``` """ dataset = copy.deepcopy(self) for depth in range(1, max_depth): if any(isinstance(field.type, pa.StructType) for field in dataset._data.schema): dataset._data = dataset._data.flatten() else: break
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
dataset.info.features = self._info.features.flatten(max_depth=max_depth) dataset.info.features = Features({col: dataset.info.features[col] for col in dataset.data.column_names}) dataset._data = update_metadata_with_features(dataset._data, dataset.features) logger.info(f"Flattened dataset from depth {depth} to depth {1 if depth + 1 < max_depth else 'unknown'}.") dataset._fingerprint = new_fingerprint return dataset
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def cast( self, features: Features, batch_size: Optional[int] = 1000, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, num_proc: Optional[int] = None, ) -> "Dataset": """ Cast the dataset to a new set of features.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
README.md exists but content is empty.
Downloads last month
7