text
stringlengths 24
253k
| type
stringclasses 1
value | start
int64 67
146k
| end
int64 223
278k
| depth
int64 0
1
| filepath
stringlengths 74
128
| parent_class
stringclasses 1
value | class_index
int64 0
271
|
---|---|---|---|---|---|---|---|
class TableBlock(Table):
"""
`TableBlock` is the allowed class inside a `ConcanetationTable`.
Only `MemoryMappedTable` and `InMemoryTable` are `TableBlock`.
This is because we don't want a `ConcanetationTable` made out of other `ConcanetationTables`.
"""
pass | class_definition | 21,461 | 21,745 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/table.py | null | 100 |
class InMemoryTable(TableBlock):
"""
The table is said in-memory when it is loaded into the user's RAM.
Pickling it does copy all the data using memory.
Its implementation is simple and uses the underlying pyarrow Table methods directly.
This is different from the `MemoryMapped` table, for which pickling doesn't copy all the
data in memory. For a `MemoryMapped`, unpickling instead reloads the table from the disk.
`InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for
data bigger than memory or when you want the memory footprint of your application to
stay low.
"""
@classmethod
def from_file(cls, filename: str):
table = _in_memory_arrow_table_from_file(filename)
return cls(table)
@classmethod
def from_buffer(cls, buffer: pa.Buffer):
table = _in_memory_arrow_table_from_buffer(buffer)
return cls(table)
@classmethod
def from_pandas(cls, *args, **kwargs):
"""
Convert pandas.DataFrame to an Arrow Table.
The column types in the resulting Arrow Table are inferred from the
dtypes of the pandas.Series in the DataFrame. In the case of non-object
Series, the NumPy dtype is translated to its Arrow equivalent. In the
case of `object`, we need to guess the datatype by looking at the
Python objects in this Series.
Be aware that Series of the `object` dtype don't carry enough
information to always lead to a meaningful Arrow type. In the case that
we cannot infer a type, e.g. because the DataFrame is of length 0 or
the Series only contains `None/nan` objects, the type is set to
null. This behavior can be avoided by constructing an explicit schema
and passing it to this function.
Args:
df (`pandas.DataFrame`):
schema (`pyarrow.Schema`, *optional*):
The expected schema of the Arrow Table. This can be used to
indicate the type of columns if we cannot infer it automatically.
If passed, the output will have exactly this schema. Columns
specified in the schema that are not found in the DataFrame columns
or its index will raise an error. Additional columns or index
levels in the DataFrame which are not specified in the schema will
be ignored.
preserve_index (`bool`, *optional*):
Whether to store the index as an additional column in the resulting
`Table`. The default of None will store the index as a column,
except for RangeIndex which is stored as metadata only. Use
`preserve_index=True` to force it to be stored as a column.
nthreads (`int`, defaults to `None` (may use up to system CPU count threads))
If greater than 1, convert columns to Arrow in parallel using
indicated number of threads.
columns (`List[str]`, *optional*):
List of column to be converted. If `None`, use all columns.
safe (`bool`, defaults to `True`):
Check for overflows or other unsafe conversions,
Returns:
`datasets.table.Table`:
Examples:
```python
>>> import pandas as pd
>>> import pyarrow as pa
>>> df = pd.DataFrame({
... 'int': [1, 2],
... 'str': ['a', 'b']
... })
>>> pa.Table.from_pandas(df)
<pyarrow.lib.Table object at 0x7f05d1fb1b40>
```
"""
return cls(pa.Table.from_pandas(*args, **kwargs))
@classmethod
def from_arrays(cls, *args, **kwargs):
"""
Construct a Table from Arrow arrays.
Args:
arrays (`List[Union[pyarrow.Array, pyarrow.ChunkedArray]]`):
Equal-length arrays that should form the table.
names (`List[str]`, *optional*):
Names for the table columns. If not passed, schema must be passed.
schema (`Schema`, defaults to `None`):
Schema for the created table. If not passed, names must be passed.
metadata (`Union[dict, Mapping]`, defaults to `None`):
Optional metadata for the schema (if inferred).
Returns:
`datasets.table.Table`
"""
return cls(pa.Table.from_arrays(*args, **kwargs))
@classmethod
def from_pydict(cls, *args, **kwargs):
"""
Construct a Table from Arrow arrays or columns.
Args:
mapping (`Union[dict, Mapping]`):
A mapping of strings to Arrays or Python lists.
schema (`Schema`, defaults to `None`):
If not passed, will be inferred from the Mapping values
metadata (`Union[dict, Mapping]`, defaults to `None`):
Optional metadata for the schema (if inferred).
Returns:
`datasets.table.Table`
"""
return cls(pa.Table.from_pydict(*args, **kwargs))
@classmethod
def from_pylist(cls, mapping, *args, **kwargs):
"""
Construct a Table from list of rows / dictionaries.
Args:
mapping (`List[dict]`):
A mapping of strings to row values.
schema (`Schema`, defaults to `None`):
If not passed, will be inferred from the Mapping values
metadata (`Union[dict, Mapping]`, defaults to `None`):
Optional metadata for the schema (if inferred).
Returns:
`datasets.table.Table`
"""
return cls(pa.Table.from_pylist(mapping, *args, **kwargs))
@classmethod
def from_batches(cls, *args, **kwargs):
"""
Construct a Table from a sequence or iterator of Arrow `RecordBatches`.
Args:
batches (`Union[Sequence[pyarrow.RecordBatch], Iterator[pyarrow.RecordBatch]]`):
Sequence of `RecordBatch` to be converted, all schemas must be equal.
schema (`Schema`, defaults to `None`):
If not passed, will be inferred from the first `RecordBatch`.
Returns:
`datasets.table.Table`:
"""
return cls(pa.Table.from_batches(*args, **kwargs))
def slice(self, offset=0, length=None):
"""
Compute zero-copy slice of this Table.
Args:
offset (`int`, defaults to `0`):
Offset from start of table to slice.
length (`int`, defaults to `None`):
Length of slice (default is until end of table starting from
offset).
Returns:
`datasets.table.Table`
"""
# Use fast slicing here
return InMemoryTable(self.fast_slice(offset=offset, length=length))
def filter(self, *args, **kwargs):
"""
Select records from a Table. See `pyarrow.compute.filter` for full usage.
"""
return InMemoryTable(self.table.filter(*args, **kwargs))
def flatten(self, *args, **kwargs):
"""
Flatten this Table. Each column with a struct type is flattened
into one column per struct field. Other columns are left unchanged.
Args:
memory_pool (`MemoryPool`, defaults to `None`):
For memory allocations, if required, otherwise use default pool.
Returns:
`datasets.table.Table`
"""
return InMemoryTable(table_flatten(self.table, *args, **kwargs))
def combine_chunks(self, *args, **kwargs):
"""
Make a new table by combining the chunks this table has.
All the underlying chunks in the `ChunkedArray` of each column are
concatenated into zero or one chunk.
Args:
memory_pool (`MemoryPool`, defaults to `None`):
For memory allocations, if required, otherwise use default pool.
Returns:
`datasets.table.Table`
"""
return InMemoryTable(self.table.combine_chunks(*args, **kwargs))
def cast(self, *args, **kwargs):
"""
Cast table values to another schema.
Args:
target_schema (`Schema`):
Schema to cast to, the names and order of fields must match.
safe (`bool`, defaults to `True`):
Check for overflows or other unsafe conversions.
Returns:
`datasets.table.Table`
"""
return InMemoryTable(table_cast(self.table, *args, **kwargs))
def replace_schema_metadata(self, *args, **kwargs):
"""
EXPERIMENTAL: Create shallow copy of table by replacing schema
key-value metadata with the indicated new metadata (which may be `None`,
which deletes any existing metadata).
Args:
metadata (`dict`, defaults to `None`):
Returns:
`datasets.table.Table`: shallow_copy
"""
return InMemoryTable(self.table.replace_schema_metadata(*args, **kwargs))
def add_column(self, *args, **kwargs):
"""
Add column to Table at position.
A new table is returned with the column added, the original table
object is left unchanged.
Args:
i (`int`):
Index to place the column at.
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`: New table with the passed column added.
"""
return InMemoryTable(self.table.add_column(*args, **kwargs))
def append_column(self, *args, **kwargs):
"""
Append column at end of columns.
Args:
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`:
New table with the passed column added.
"""
return InMemoryTable(self.table.append_column(*args, **kwargs))
def remove_column(self, *args, **kwargs):
"""
Create new Table with the indicated column removed.
Args:
i (`int`):
Index of column to remove.
Returns:
`datasets.table.Table`:
New table without the column.
"""
return InMemoryTable(self.table.remove_column(*args, **kwargs))
def set_column(self, *args, **kwargs):
"""
Replace column in Table at position.
Args:
i (`int`):
Index to place the column at.
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`:
New table with the passed column set.
"""
return InMemoryTable(self.table.set_column(*args, **kwargs))
def rename_columns(self, *args, **kwargs):
"""
Create new table with columns renamed to provided names.
"""
return InMemoryTable(self.table.rename_columns(*args, **kwargs))
def drop(self, *args, **kwargs):
"""
Drop one or more columns and return a new table.
Args:
columns (`List[str]`):
List of field names referencing existing columns.
Raises:
`KeyError` : if any of the passed columns name are not existing.
Returns:
`datasets.table.Table`:
New table without the columns.
"""
return InMemoryTable(self.table.drop(*args, **kwargs))
def select(self, *args, **kwargs):
"""
Select columns of the table.
Returns a new table with the specified columns, and metadata preserved.
Args:
columns (:obj:`Union[List[str], List[int]]`):
The column names or integer indices to select.
Returns:
:class:`datasets.table.Table`: New table with the specified columns, and metadata preserved.
"""
return InMemoryTable(self.table.select(*args, **kwargs)) | class_definition | 21,748 | 34,336 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/table.py | null | 101 |
class MemoryMappedTable(TableBlock):
"""
The table is said memory mapped when it doesn't use the user's RAM but loads the data
from the disk instead.
Pickling it doesn't copy the data into memory.
Instead, only the path to the memory mapped arrow file is pickled, as well as the list
of transforms to "replay" when reloading the table from the disk.
Its implementation requires to store an history of all the transforms that were applied
to the underlying pyarrow Table, so that they can be "replayed" when reloading the Table
from the disk.
This is different from the `InMemoryTable` table, for which pickling does copy all the
data in memory.
`InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for
data bigger than memory or when you want the memory footprint of your application to
stay low.
"""
def __init__(self, table: pa.Table, path: str, replays: Optional[List[Replay]] = None):
super().__init__(table)
self.path = os.path.abspath(path)
self.replays: List[Replay] = replays if replays is not None else []
@classmethod
def from_file(cls, filename: str, replays=None):
table = _memory_mapped_arrow_table_from_file(filename)
table = cls._apply_replays(table, replays)
return cls(table, filename, replays)
def __getstate__(self):
return {"path": self.path, "replays": self.replays}
def __setstate__(self, state):
path = state["path"]
replays = state["replays"]
table = _memory_mapped_arrow_table_from_file(path)
table = self._apply_replays(table, replays)
MemoryMappedTable.__init__(self, table, path=path, replays=replays)
@staticmethod
def _apply_replays(table: pa.Table, replays: Optional[List[Replay]] = None) -> pa.Table:
if replays is not None:
for name, args, kwargs in replays:
if name == "cast":
table = table_cast(table, *args, **kwargs)
elif name == "flatten":
table = table_flatten(table, *args, **kwargs)
else:
table = getattr(table, name)(*args, **kwargs)
return table
def _append_replay(self, replay: Replay) -> List[Replay]:
replays = copy.deepcopy(self.replays)
replays.append(replay)
return replays
def slice(self, offset=0, length=None):
"""
Compute zero-copy slice of this Table.
Args:
offset (`int`, defaults to `0`):
Offset from start of table to slice.
length (`int`, defaults to `None`):
Length of slice (default is until end of table starting from
offset).
Returns:
`datasets.table.Table`
"""
replay = ("slice", (offset, length), {})
replays = self._append_replay(replay)
# Use fast slicing here
return MemoryMappedTable(self.fast_slice(offset=offset, length=length), self.path, replays)
def filter(self, *args, **kwargs):
"""
Select records from a Table. See `pyarrow.compute.filter` for full usage.
"""
replay = ("filter", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(self.table.filter(*args, **kwargs), self.path, replays)
def flatten(self, *args, **kwargs):
"""
Flatten this Table. Each column with a struct type is flattened
into one column per struct field. Other columns are left unchanged.
Args:
memory_pool (`MemoryPool`, defaults to `None`):
For memory allocations, if required, otherwise use default pool.
Returns:
`datasets.table.Table`
"""
replay = ("flatten", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(table_flatten(self.table, *args, **kwargs), self.path, replays)
def combine_chunks(self, *args, **kwargs):
"""
Make a new table by combining the chunks this table has.
All the underlying chunks in the ChunkedArray of each column are
concatenated into zero or one chunk.
Args:
memory_pool (`MemoryPool`, defaults to `None`):
For memory allocations, if required, otherwise use default pool.
Returns:
`datasets.table.Table`
"""
replay = ("combine_chunks", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(self.table.combine_chunks(*args, **kwargs), self.path, replays)
def cast(self, *args, **kwargs):
"""
Cast table values to another schema
Args:
target_schema (`Schema`):
Schema to cast to, the names and order of fields must match.
safe (`bool`, defaults to `True`):
Check for overflows or other unsafe conversions.
Returns:
`datasets.table.Table`
"""
replay = ("cast", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(table_cast(self.table, *args, **kwargs), self.path, replays)
def replace_schema_metadata(self, *args, **kwargs):
"""
EXPERIMENTAL: Create shallow copy of table by replacing schema
key-value metadata with the indicated new metadata (which may be None,
which deletes any existing metadata.
Args:
metadata (`dict`, defaults to `None`):
Returns:
`datasets.table.Table`: shallow_copy
"""
replay = ("replace_schema_metadata", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(self.table.replace_schema_metadata(*args, **kwargs), self.path, replays)
def add_column(self, *args, **kwargs):
"""
Add column to Table at position.
A new table is returned with the column added, the original table
object is left unchanged.
Args:
i (`int`):
Index to place the column at.
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`: New table with the passed column added.
"""
replay = ("add_column", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(self.table.add_column(*args, **kwargs), self.path, replays)
def append_column(self, *args, **kwargs):
"""
Append column at end of columns.
Args:
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`:
New table with the passed column added.
"""
replay = ("append_column", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(self.table.append_column(*args, **kwargs), self.path, replays)
def remove_column(self, *args, **kwargs):
"""
Create new Table with the indicated column removed.
Args:
i (`int`):
Index of column to remove.
Returns:
`datasets.table.Table`:
New table without the column.
"""
replay = ("remove_column", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(self.table.remove_column(*args, **kwargs), self.path, replays)
def set_column(self, *args, **kwargs):
"""
Replace column in Table at position.
Args:
i (`int`):
Index to place the column at.
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`:
New table with the passed column set.
"""
replay = ("set_column", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(self.table.set_column(*args, **kwargs), self.path, replays)
def rename_columns(self, *args, **kwargs):
"""
Create new table with columns renamed to provided names.
"""
replay = ("rename_columns", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(self.table.rename_columns(*args, **kwargs), self.path, replays)
def drop(self, *args, **kwargs):
"""
Drop one or more columns and return a new table.
Args:
columns (`List[str]`):
List of field names referencing existing columns.
Raises:
`KeyError` : if any of the passed columns name are not existing.
Returns:
`datasets.table.Table`:
New table without the columns.
"""
replay = ("drop", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(self.table.drop(*args, **kwargs), self.path, replays)
def select(self, *args, **kwargs):
"""
Select columns of the table.
Returns a new table with the specified columns, and metadata preserved.
Args:
columns (:obj:`Union[List[str], List[int]]`):
The column names or integer indices to select.
Returns:
:class:`datasets.table.Table`: New table with the specified columns, and metadata preserved.
"""
replay = ("select", copy.deepcopy(args), copy.deepcopy(kwargs))
replays = self._append_replay(replay)
return MemoryMappedTable(self.table.select(*args, **kwargs), self.path, replays) | class_definition | 34,452 | 44,969 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/table.py | null | 102 |
class ConcatenationTable(Table):
"""
The table comes from the concatenation of several tables called blocks.
It enables concatenation on both axis 0 (append rows) and axis 1 (append columns).
The underlying tables are called "blocks" and can be either `InMemoryTable`
or `MemoryMappedTable` objects.
This allows to combine tables that come from memory or that are memory mapped.
When a `ConcatenationTable` is pickled, then each block is pickled:
- the `InMemoryTable` objects are pickled by copying all the data in memory.
- the MemoryMappedTable objects are pickled without copying the data into memory.
Instead, only the path to the memory mapped arrow file is pickled, as well as the list
of transforms to "replays" when reloading the table from the disk.
Its implementation requires to store each block separately.
The `blocks` attributes stores a list of list of blocks.
The first axis concatenates the tables along the axis 0 (it appends rows),
while the second axis concatenates tables along the axis 1 (it appends columns).
If some columns are missing when concatenating on axis 0, they are filled with null values.
This is done using `pyarrow.concat_tables(tables, promote=True)`.
You can access the fully combined table by accessing the `ConcatenationTable.table` attribute,
and the blocks by accessing the `ConcatenationTable.blocks` attribute.
"""
def __init__(self, table: pa.Table, blocks: List[List[TableBlock]]):
super().__init__(table)
self.blocks = blocks
# Check that all the blocks have the right type.
# Only InMemoryTable and MemoryMappedTable are allowed.
for subtables in blocks:
for subtable in subtables:
if not isinstance(subtable, TableBlock):
raise TypeError(
"The blocks of a ConcatenationTable must be InMemoryTable or MemoryMappedTable objects"
f", but got {_short_str(subtable)}."
)
def __getstate__(self):
return {"blocks": self.blocks, "schema": self.table.schema}
def __setstate__(self, state):
blocks = state["blocks"]
schema = state["schema"]
table = self._concat_blocks_horizontally_and_vertically(blocks)
if schema is not None and table.schema != schema:
# We fix the columns by concatenating with an empty table with the right columns
empty_table = pa.Table.from_batches([], schema=schema)
# We set promote_options="default" to fill missing columns with null values
table = pa.concat_tables([table, empty_table], promote_options="default")
ConcatenationTable.__init__(self, table, blocks=blocks)
@staticmethod
def _concat_blocks(blocks: List[Union[TableBlock, pa.Table]], axis: int = 0) -> pa.Table:
pa_tables = [table.table if hasattr(table, "table") else table for table in blocks]
if axis == 0:
# We set promote_options="default" to fill missing columns with null values
return pa.concat_tables(pa_tables, promote_options="default")
elif axis == 1:
for i, table in enumerate(pa_tables):
if i == 0:
pa_table = table
else:
for name, col in zip(table.column_names, table.columns):
pa_table = pa_table.append_column(name, col)
return pa_table
else:
raise ValueError("'axis' must be either 0 or 1")
@classmethod
def _concat_blocks_horizontally_and_vertically(cls, blocks: List[List[TableBlock]]) -> pa.Table:
pa_tables_to_concat_vertically = []
for i, tables in enumerate(blocks):
if not tables:
continue
pa_table_horizontally_concatenated = cls._concat_blocks(tables, axis=1)
pa_tables_to_concat_vertically.append(pa_table_horizontally_concatenated)
return cls._concat_blocks(pa_tables_to_concat_vertically, axis=0)
@classmethod
def _merge_blocks(cls, blocks: TableBlockContainer, axis: Optional[int] = None) -> TableBlockContainer:
if axis is not None:
merged_blocks = []
for is_in_memory, block_group in groupby(blocks, key=lambda x: isinstance(x, InMemoryTable)):
if is_in_memory:
block_group = [InMemoryTable(cls._concat_blocks(list(block_group), axis=axis))]
merged_blocks += list(block_group)
else: # both
merged_blocks = [cls._merge_blocks(row_block, axis=1) for row_block in blocks]
if all(len(row_block) == 1 for row_block in merged_blocks):
merged_blocks = cls._merge_blocks(
[block for row_block in merged_blocks for block in row_block], axis=0
)
return merged_blocks
@classmethod
def _consolidate_blocks(cls, blocks: TableBlockContainer) -> TableBlockContainer:
if isinstance(blocks, TableBlock):
return blocks
elif isinstance(blocks[0], TableBlock):
return cls._merge_blocks(blocks, axis=0)
else:
return cls._merge_blocks(blocks)
@classmethod
def from_blocks(cls, blocks: TableBlockContainer) -> "ConcatenationTable":
blocks = cls._consolidate_blocks(blocks)
if isinstance(blocks, TableBlock):
table = blocks
return cls(table.table, [[table]])
elif isinstance(blocks[0], TableBlock):
table = cls._concat_blocks(blocks, axis=0)
blocks = [[t] for t in blocks]
return cls(table, blocks)
else:
table = cls._concat_blocks_horizontally_and_vertically(blocks)
return cls(table, blocks)
@classmethod
def from_tables(cls, tables: List[Union[pa.Table, Table]], axis: int = 0) -> "ConcatenationTable":
"""Create `ConcatenationTable` from list of tables.
Args:
tables (list of `Table` or list of `pyarrow.Table`):
List of tables.
axis (`{0, 1}`, defaults to `0`, meaning over rows):
Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns
(horizontally).
<Added version="1.6.0"/>
"""
def to_blocks(table: Union[pa.Table, Table]) -> List[List[TableBlock]]:
if isinstance(table, pa.Table):
return [[InMemoryTable(table)]]
elif isinstance(table, ConcatenationTable):
return copy.deepcopy(table.blocks)
else:
return [[table]]
def _slice_row_block(row_block: List[TableBlock], length: int) -> Tuple[List[TableBlock], List[TableBlock]]:
sliced = [table.slice(0, length) for table in row_block]
remainder = [table.slice(length, len(row_block[0]) - length) for table in row_block]
return sliced, remainder
def _split_both_like(
result: List[List[TableBlock]], blocks: List[List[TableBlock]]
) -> Tuple[List[List[TableBlock]], List[List[TableBlock]]]:
"""
Make sure each row_block contain the same num_rows to be able to concatenate them on axis=1.
To do so, we modify both blocks sets to have the same row_blocks boundaries.
For example, if `result` has 2 row_blocks of 3 rows and `blocks` has 3 row_blocks of 2 rows,
we modify both to have 4 row_blocks of size 2, 1, 1 and 2:
[ x x x | x x x ]
+ [ y y | y y | y y ]
-----------------------------
= [ x x | x | x | x x ]
[ y y | y | y | y y ]
"""
result, blocks = list(result), list(blocks)
new_result, new_blocks = [], []
while result and blocks:
# we slice the longest row block to save two row blocks of same length
# and we replace the long row block by its remainder if necessary
if len(result[0][0]) > len(blocks[0][0]):
new_blocks.append(blocks[0])
sliced, result[0] = _slice_row_block(result[0], len(blocks.pop(0)[0]))
new_result.append(sliced)
elif len(result[0][0]) < len(blocks[0][0]):
new_result.append(result[0])
sliced, blocks[0] = _slice_row_block(blocks[0], len(result.pop(0)[0]))
new_blocks.append(sliced)
else:
new_result.append(result.pop(0))
new_blocks.append(blocks.pop(0))
if result or blocks:
raise ValueError("Failed to concatenate on axis=1 because tables don't have the same number of rows")
return new_result, new_blocks
def _extend_blocks(
result: List[List[TableBlock]], blocks: List[List[TableBlock]], axis: int = 0
) -> List[List[TableBlock]]:
if axis == 0:
result.extend(blocks)
elif axis == 1:
# We make sure each row_block have the same num_rows
result, blocks = _split_both_like(result, blocks)
for i, row_block in enumerate(blocks):
result[i].extend(row_block)
return result
blocks = to_blocks(tables[0])
for table in tables[1:]:
table_blocks = to_blocks(table)
blocks = _extend_blocks(blocks, table_blocks, axis=axis)
return cls.from_blocks(blocks)
@property
def _slices(self):
offset = 0
for tables in self.blocks:
length = len(tables[0])
yield (offset, length)
offset += length
def slice(self, offset=0, length=None):
"""
Compute zero-copy slice of this Table.
Args:
offset (`int`, defaults to `0`):
Offset from start of table to slice.
length (`int`, defaults to `None`):
Length of slice (default is until end of table starting from
offset).
Returns:
`datasets.table.Table`
"""
table = self.table.slice(offset, length=length)
length = length if length is not None else self.num_rows - offset
blocks = []
for tables in self.blocks:
n_rows = len(tables[0])
if length == 0:
break
elif n_rows <= offset:
offset = offset - n_rows
elif n_rows <= offset + length:
blocks.append([t.slice(offset) for t in tables])
length, offset = length + offset - n_rows, 0
else:
blocks.append([t.slice(offset, length) for t in tables])
length, offset = 0, 0
return ConcatenationTable(table, blocks)
def filter(self, mask, *args, **kwargs):
"""
Select records from a Table. See `pyarrow.compute.filter` for full usage.
"""
table = self.table.filter(mask, *args, **kwargs)
blocks = []
for (offset, length), tables in zip(self._slices, self.blocks):
submask = mask.slice(offset, length)
blocks.append([t.filter(submask, *args, **kwargs) for t in tables])
return ConcatenationTable(table, blocks)
def flatten(self, *args, **kwargs):
"""
Flatten this Table. Each column with a struct type is flattened
into one column per struct field. Other columns are left unchanged.
Args:
memory_pool (`MemoryPool`, defaults to `None`):
For memory allocations, if required, otherwise use default pool.
Returns:
`datasets.table.Table`
"""
table = table_flatten(self.table, *args, **kwargs)
blocks = []
for tables in self.blocks:
blocks.append([t.flatten(*args, **kwargs) for t in tables])
return ConcatenationTable(table, blocks)
def combine_chunks(self, *args, **kwargs):
"""
Make a new table by combining the chunks this table has.
All the underlying chunks in the `ChunkedArray` of each column are
concatenated into zero or one chunk.
Args:
memory_pool (`MemoryPool`, defaults to `None`):
For memory allocations, if required, otherwise use default pool.
Returns:
`datasets.table.Table`
"""
table = self.table.combine_chunks(*args, **kwargs)
blocks = []
for tables in self.blocks:
blocks.append([t.combine_chunks(*args, **kwargs) for t in tables])
return ConcatenationTable(table, blocks)
def cast(self, target_schema, *args, **kwargs):
"""
Cast table values to another schema.
Args:
target_schema (`Schema`):
Schema to cast to, the names and order of fields must match.
safe (`bool`, defaults to `True`):
Check for overflows or other unsafe conversions.
Returns:
`datasets.table.Table`
"""
from .features import Features
table = table_cast(self.table, target_schema, *args, **kwargs)
target_features = Features.from_arrow_schema(target_schema)
blocks = []
for subtables in self.blocks:
new_tables = []
fields = list(target_schema)
for subtable in subtables:
subfields = []
for name in subtable.column_names:
subfields.append(fields.pop(next(i for i, field in enumerate(fields) if field.name == name)))
subfeatures = Features({subfield.name: target_features[subfield.name] for subfield in subfields})
subschema = subfeatures.arrow_schema
new_tables.append(subtable.cast(subschema, *args, **kwargs))
blocks.append(new_tables)
return ConcatenationTable(table, blocks)
def replace_schema_metadata(self, *args, **kwargs):
"""
EXPERIMENTAL: Create shallow copy of table by replacing schema
key-value metadata with the indicated new metadata (which may be `None`,
which deletes any existing metadata).
Args:
metadata (`dict`, defaults to `None`):
Returns:
`datasets.table.Table`: shallow_copy
"""
table = self.table.replace_schema_metadata(*args, **kwargs)
blocks = []
for tables in self.blocks:
blocks.append([t.replace_schema_metadata(*args, **kwargs) for t in tables])
return ConcatenationTable(table, self.blocks)
def add_column(self, *args, **kwargs):
"""
Add column to Table at position.
A new table is returned with the column added, the original table
object is left unchanged.
Args:
i (`int`):
Index to place the column at.
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`: New table with the passed column added.
"""
raise NotImplementedError()
def append_column(self, *args, **kwargs):
"""
Append column at end of columns.
Args:
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`:
New table with the passed column added.
"""
raise NotImplementedError()
def remove_column(self, i, *args, **kwargs):
"""
Create new Table with the indicated column removed.
Args:
i (`int`):
Index of column to remove.
Returns:
`datasets.table.Table`:
New table without the column.
"""
table = self.table.remove_column(i, *args, **kwargs)
name = self.table.column_names[i]
blocks = []
for tables in self.blocks:
blocks.append(
[
t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t
for t in tables
]
)
return ConcatenationTable(table, blocks)
def set_column(self, *args, **kwargs):
"""
Replace column in Table at position.
Args:
i (`int`):
Index to place the column at.
field_ (`Union[str, pyarrow.Field]`):
If a string is passed then the type is deduced from the column
data.
column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
Column data.
Returns:
`datasets.table.Table`:
New table with the passed column set.
"""
raise NotImplementedError()
def rename_columns(self, names, *args, **kwargs):
"""
Create new table with columns renamed to provided names.
"""
table = self.table.rename_columns(names, *args, **kwargs)
names = dict(zip(self.table.column_names, names))
blocks = []
for tables in self.blocks:
blocks.append(
[t.rename_columns([names[name] for name in t.column_names], *args, **kwargs) for t in tables]
)
return ConcatenationTable(table, blocks)
def drop(self, columns, *args, **kwargs):
"""
Drop one or more columns and return a new table.
Args:
columns (`List[str]`):
List of field names referencing existing columns.
Raises:
`KeyError` : if any of the passed columns name are not existing.
Returns:
`datasets.table.Table`:
New table without the columns.
"""
table = self.table.drop(columns, *args, **kwargs)
blocks = []
for tables in self.blocks:
blocks.append([t.drop([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables])
return ConcatenationTable(table, blocks)
def select(self, columns, *args, **kwargs):
"""
Select columns of the table.
Returns a new table with the specified columns, and metadata preserved.
Args:
columns (:obj:`Union[List[str], List[int]]`):
The column names or integer indices to select.
Returns:
:class:`datasets.table.Table`: New table with the specified columns, and metadata preserved.
"""
table = self.table.select(columns, *args, **kwargs)
blocks = []
for tables in self.blocks:
blocks.append([t.select([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables])
return ConcatenationTable(table, blocks) | class_definition | 45,365 | 64,709 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/table.py | null | 103 |
class CastError(ValueError):
"""When it's not possible to cast an Arrow table to a specific schema or set of features"""
def __init__(self, *args, table_column_names: List[str], requested_column_names: List[str]) -> None:
super().__init__(*args)
self.table_column_names = table_column_names
self.requested_column_names = requested_column_names
def __reduce__(self):
# Fix unpickling: TypeError: __init__() missing 2 required keyword-only arguments: 'table_column_names' and 'requested_column_names'
return partial(
CastError, table_column_names=self.table_column_names, requested_column_names=self.requested_column_names
), ()
def details(self):
new_columns = set(self.table_column_names) - set(self.requested_column_names)
missing_columns = set(self.requested_column_names) - set(self.table_column_names)
if new_columns and missing_columns:
return f"there are {len(new_columns)} new columns ({_short_str(new_columns)}) and {len(missing_columns)} missing columns ({_short_str(missing_columns)})."
elif new_columns:
return f"there are {len(new_columns)} new columns ({_short_str(new_columns)})"
else:
return f"there are {len(missing_columns)} missing columns ({_short_str(missing_columns)})" | class_definition | 86,502 | 87,847 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/table.py | null | 104 |
class _InitializeConfiguredDatasetBuilder:
"""
From https://stackoverflow.com/questions/4647566/pickle-a-dynamically-parameterized-sub-class
See also ConfiguredDatasetBuilder.__reduce__
When called with the param value as the only argument, returns an
un-initialized instance of the parameterized class. Subsequent __setstate__
will be called by pickle.
"""
def __call__(self, builder_cls, metadata_configs, default_config_name, name):
# make a simple object which has no complex __init__ (this one will do)
obj = _InitializeConfiguredDatasetBuilder()
obj.__class__ = configure_builder_class(
builder_cls, metadata_configs, default_config_name=default_config_name, dataset_name=name
)
return obj | class_definition | 7,037 | 7,817 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py | null | 105 |
class ConfiguredDatasetBuilder(builder_cls):
BUILDER_CONFIGS = builder_configs
DEFAULT_CONFIG_NAME = default_config_name
__module__ = builder_cls.__module__ # so that the actual packaged builder can be imported
def __reduce__(self): # to make dynamically created class pickable, see _InitializeParameterizedDatasetBuilder
parent_builder_cls = self.__class__.__mro__[1]
return (
_InitializeConfiguredDatasetBuilder(),
(
parent_builder_cls,
self.BUILDER_CONFIGS,
self.DEFAULT_CONFIG_NAME,
self.dataset_name,
),
self.__dict__.copy(),
) | class_definition | 8,223 | 8,971 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py | null | 106 |
class BuilderConfigsParameters:
"""Dataclass containing objects related to creation of builder configurations from yaml's metadata content.
Attributes:
metadata_configs (`MetadataConfigs`, *optional*):
Configs parsed from yaml's metadata.
builder_configs (`list[BuilderConfig]`, *optional*):
List of BuilderConfig objects created from metadata_configs above.
default_config_name (`str`):
Name of default config taken from yaml's metadata.
"""
metadata_configs: Optional[MetadataConfigs] = None
builder_configs: Optional[List[BuilderConfig]] = None
default_config_name: Optional[str] = None | class_definition | 29,329 | 30,004 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py | null | 107 |
class DatasetModule:
module_path: str
hash: str
builder_kwargs: dict
builder_configs_parameters: BuilderConfigsParameters = field(default_factory=BuilderConfigsParameters)
dataset_infos: Optional[DatasetInfosDict] = None
importable_file_path: Optional[str] = None | class_definition | 30,018 | 30,305 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py | null | 108 |
class _DatasetModuleFactory:
def get_module(self) -> DatasetModule:
raise NotImplementedError | class_definition | 30,308 | 30,413 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py | null | 109 |
class LocalDatasetModuleFactoryWithScript(_DatasetModuleFactory):
"""Get the module of a local dataset. The dataset script is loaded from a local script."""
def __init__(
self,
path: str,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
dynamic_modules_path: Optional[str] = None,
trust_remote_code: Optional[bool] = None,
):
self.path = path
self.name = Path(path).stem
self.download_config = download_config or DownloadConfig()
self.download_mode = download_mode
self.dynamic_modules_path = dynamic_modules_path
self.trust_remote_code = trust_remote_code
def get_module(self) -> DatasetModule:
if config.HF_DATASETS_TRUST_REMOTE_CODE and self.trust_remote_code is None:
warnings.warn(
f"The repository for {self.name} contains custom code which must be executed to correctly "
f"load the dataset. You can inspect the repository content at {self.path}\n"
f"You can avoid this message in future by passing the argument `trust_remote_code=True`.\n"
f"Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.",
FutureWarning,
)
# get script and other files
dataset_infos_path = Path(self.path).parent / config.DATASETDICT_INFOS_FILENAME
dataset_readme_path = Path(self.path).parent / config.REPOCARD_FILENAME
imports = get_imports(self.path)
local_imports, library_imports = _download_additional_modules(
name=self.name,
base_path=str(Path(self.path).parent),
imports=imports,
download_config=self.download_config,
)
additional_files = []
if dataset_infos_path.is_file():
additional_files.append((config.DATASETDICT_INFOS_FILENAME, str(dataset_infos_path)))
if dataset_readme_path.is_file():
additional_files.append((config.REPOCARD_FILENAME, dataset_readme_path))
# copy the script and the files in an importable directory
dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
hash = files_to_hash([self.path] + [loc[1] for loc in local_imports])
importable_file_path = _get_importable_file_path(
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
subdirectory_name=hash,
name=self.name,
)
if not os.path.exists(importable_file_path):
trust_remote_code = resolve_trust_remote_code(self.trust_remote_code, self.name)
if trust_remote_code:
_create_importable_file(
local_path=self.path,
local_imports=local_imports,
additional_files=additional_files,
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
subdirectory_name=hash,
name=self.name,
download_mode=self.download_mode,
)
else:
raise ValueError(
f"Loading {self.name} requires you to execute the dataset script in that"
" repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
" set the option `trust_remote_code=True` to remove this error."
)
_check_library_imports(name=self.name, library_imports=library_imports)
module_path, hash = _load_importable_file(
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
subdirectory_name=hash,
name=self.name,
)
# make the new module to be noticed by the import system
importlib.invalidate_caches()
builder_kwargs = {"base_path": str(Path(self.path).parent)}
return DatasetModule(module_path, hash, builder_kwargs, importable_file_path=importable_file_path) | class_definition | 30,416 | 34,631 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py | null | 110 |
class LocalDatasetModuleFactoryWithoutScript(_DatasetModuleFactory):
"""Get the module of a dataset loaded from the user's data files. The dataset builder module to use is inferred
from the data files extensions."""
def __init__(
self,
path: str,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, List, Dict]] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
):
if data_dir and os.path.isabs(data_dir):
raise ValueError(f"`data_dir` must be relative to a dataset directory's root: {path}")
self.path = Path(path).as_posix()
self.name = Path(path).stem
self.data_files = data_files
self.data_dir = data_dir
self.download_mode = download_mode
def get_module(self) -> DatasetModule:
readme_path = os.path.join(self.path, config.REPOCARD_FILENAME)
standalone_yaml_path = os.path.join(self.path, config.REPOYAML_FILENAME)
dataset_card_data = DatasetCard.load(readme_path).data if os.path.isfile(readme_path) else DatasetCardData()
if os.path.exists(standalone_yaml_path):
with open(standalone_yaml_path, "r", encoding="utf-8") as f:
standalone_yaml_data = yaml.safe_load(f.read())
if standalone_yaml_data:
_dataset_card_data_dict = dataset_card_data.to_dict()
_dataset_card_data_dict.update(standalone_yaml_data)
dataset_card_data = DatasetCardData(**_dataset_card_data_dict)
metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
dataset_infos = DatasetInfosDict.from_dataset_card_data(dataset_card_data)
# we need a set of data files to find which dataset builder to use
# because we need to infer module name by files extensions
base_path = Path(self.path, self.data_dir or "").expanduser().resolve().as_posix()
if self.data_files is not None:
patterns = sanitize_patterns(self.data_files)
elif metadata_configs and not self.data_dir and "data_files" in next(iter(metadata_configs.values())):
patterns = sanitize_patterns(next(iter(metadata_configs.values()))["data_files"])
else:
patterns = get_data_patterns(base_path)
data_files = DataFilesDict.from_patterns(
patterns,
base_path=base_path,
allowed_extensions=ALL_ALLOWED_EXTENSIONS,
)
module_name, default_builder_kwargs = infer_module_for_data_files(
data_files=data_files,
path=self.path,
)
data_files = data_files.filter_extensions(_MODULE_TO_EXTENSIONS[module_name])
# Collect metadata files if the module supports them
supports_metadata = module_name in _MODULE_SUPPORTS_METADATA
if self.data_files is None and supports_metadata:
try:
metadata_patterns = get_metadata_patterns(base_path)
except FileNotFoundError:
metadata_patterns = None
if metadata_patterns is not None:
metadata_data_files_list = DataFilesList.from_patterns(metadata_patterns, base_path=base_path)
if metadata_data_files_list:
data_files = DataFilesDict(
{
split: data_files_list + metadata_data_files_list
for split, data_files_list in data_files.items()
}
)
module_path, _ = _PACKAGED_DATASETS_MODULES[module_name]
if metadata_configs:
builder_configs, default_config_name = create_builder_configs_from_metadata_configs(
module_path,
metadata_configs,
base_path=base_path,
supports_metadata=supports_metadata,
default_builder_kwargs=default_builder_kwargs,
)
else:
builder_configs: List[BuilderConfig] = [
import_main_class(module_path).BUILDER_CONFIG_CLASS(
data_files=data_files,
**default_builder_kwargs,
)
]
default_config_name = None
builder_kwargs = {
"base_path": self.path,
"dataset_name": camelcase_to_snakecase(Path(self.path).name),
}
if self.data_dir:
builder_kwargs["data_files"] = data_files
# this file is deprecated and was created automatically in old versions of push_to_hub
if os.path.isfile(os.path.join(self.path, config.DATASETDICT_INFOS_FILENAME)):
with open(os.path.join(self.path, config.DATASETDICT_INFOS_FILENAME), encoding="utf-8") as f:
legacy_dataset_infos = DatasetInfosDict(
{
config_name: DatasetInfo.from_dict(dataset_info_dict)
for config_name, dataset_info_dict in json.load(f).items()
}
)
if len(legacy_dataset_infos) == 1:
# old config e.g. named "username--dataset_name"
legacy_config_name = next(iter(legacy_dataset_infos))
legacy_dataset_infos["default"] = legacy_dataset_infos.pop(legacy_config_name)
legacy_dataset_infos.update(dataset_infos)
dataset_infos = legacy_dataset_infos
if default_config_name is None and len(dataset_infos) == 1:
default_config_name = next(iter(dataset_infos))
hash = Hasher.hash({"dataset_infos": dataset_infos, "builder_configs": builder_configs})
return DatasetModule(
module_path,
hash,
builder_kwargs,
dataset_infos=dataset_infos,
builder_configs_parameters=BuilderConfigsParameters(
metadata_configs=metadata_configs,
builder_configs=builder_configs,
default_config_name=default_config_name,
),
) | class_definition | 34,634 | 40,725 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py | null | 111 |
class PackagedDatasetModuleFactory(_DatasetModuleFactory):
"""Get the dataset builder module from the ones that are packaged with the library: csv, json, etc."""
def __init__(
self,
name: str,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, List, Dict]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
):
self.name = name
self.data_files = data_files
self.data_dir = data_dir
self.download_config = download_config
self.download_mode = download_mode
increase_load_count(name)
def get_module(self) -> DatasetModule:
base_path = Path(self.data_dir or "").expanduser().resolve().as_posix()
patterns = (
sanitize_patterns(self.data_files)
if self.data_files is not None
else get_data_patterns(base_path, download_config=self.download_config)
)
data_files = DataFilesDict.from_patterns(
patterns,
download_config=self.download_config,
base_path=base_path,
)
supports_metadata = self.name in _MODULE_SUPPORTS_METADATA
if self.data_files is None and supports_metadata and patterns != DEFAULT_PATTERNS_ALL:
try:
metadata_patterns = get_metadata_patterns(base_path, download_config=self.download_config)
except FileNotFoundError:
metadata_patterns = None
if metadata_patterns is not None:
metadata_data_files_list = DataFilesList.from_patterns(
metadata_patterns, download_config=self.download_config, base_path=base_path
)
if metadata_data_files_list:
data_files = DataFilesDict(
{
split: data_files_list + metadata_data_files_list
for split, data_files_list in data_files.items()
}
)
module_path, hash = _PACKAGED_DATASETS_MODULES[self.name]
builder_kwargs = {
"data_files": data_files,
"dataset_name": self.name,
}
return DatasetModule(module_path, hash, builder_kwargs) | class_definition | 40,728 | 43,057 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py | null | 112 |
class HubDatasetModuleFactoryWithoutScript(_DatasetModuleFactory):
"""
Get the module of a dataset loaded from data files of a dataset repository.
The dataset builder module to use is inferred from the data files extensions.
"""
def __init__(
self,
name: str,
commit_hash: str,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, List, Dict]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
use_exported_dataset_infos: bool = False,
):
self.name = name
self.commit_hash = commit_hash
self.data_files = data_files
self.data_dir = data_dir
self.download_config = download_config or DownloadConfig()
self.download_mode = download_mode
self.use_exported_dataset_infos = use_exported_dataset_infos
increase_load_count(name)
def get_module(self) -> DatasetModule:
# Get the Dataset Card and fix the revision in case there are new commits in the meantime
api = HfApi(
endpoint=config.HF_ENDPOINT,
token=self.download_config.token,
library_name="datasets",
library_version=__version__,
user_agent=get_datasets_user_agent(self.download_config.user_agent),
)
try:
dataset_readme_path = api.hf_hub_download(
repo_id=self.name,
filename=config.REPOCARD_FILENAME,
repo_type="dataset",
revision=self.commit_hash,
proxies=self.download_config.proxies,
)
dataset_card_data = DatasetCard.load(dataset_readme_path).data
except EntryNotFoundError:
dataset_card_data = DatasetCardData()
download_config = self.download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading standalone yaml"
try:
standalone_yaml_path = cached_path(
hf_dataset_url(self.name, config.REPOYAML_FILENAME, revision=self.commit_hash),
download_config=download_config,
)
with open(standalone_yaml_path, "r", encoding="utf-8") as f:
standalone_yaml_data = yaml.safe_load(f.read())
if standalone_yaml_data:
_dataset_card_data_dict = dataset_card_data.to_dict()
_dataset_card_data_dict.update(standalone_yaml_data)
dataset_card_data = DatasetCardData(**_dataset_card_data_dict)
except FileNotFoundError:
pass
base_path = f"hf://datasets/{self.name}@{self.commit_hash}/{self.data_dir or ''}".rstrip("/")
metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
dataset_infos = DatasetInfosDict.from_dataset_card_data(dataset_card_data)
if config.USE_PARQUET_EXPORT and self.use_exported_dataset_infos:
try:
exported_dataset_infos = _dataset_viewer.get_exported_dataset_infos(
dataset=self.name, commit_hash=self.commit_hash, token=self.download_config.token
)
exported_dataset_infos = DatasetInfosDict(
{
config_name: DatasetInfo.from_dict(exported_dataset_infos[config_name])
for config_name in exported_dataset_infos
}
)
except _dataset_viewer.DatasetViewerError:
exported_dataset_infos = None
else:
exported_dataset_infos = None
if exported_dataset_infos:
exported_dataset_infos.update(dataset_infos)
dataset_infos = exported_dataset_infos
# we need a set of data files to find which dataset builder to use
# because we need to infer module name by files extensions
if self.data_files is not None:
patterns = sanitize_patterns(self.data_files)
elif metadata_configs and not self.data_dir and "data_files" in next(iter(metadata_configs.values())):
patterns = sanitize_patterns(next(iter(metadata_configs.values()))["data_files"])
else:
patterns = get_data_patterns(base_path, download_config=self.download_config)
data_files = DataFilesDict.from_patterns(
patterns,
base_path=base_path,
allowed_extensions=ALL_ALLOWED_EXTENSIONS,
download_config=self.download_config,
)
module_name, default_builder_kwargs = infer_module_for_data_files(
data_files=data_files,
path=self.name,
download_config=self.download_config,
)
data_files = data_files.filter_extensions(_MODULE_TO_EXTENSIONS[module_name])
# Collect metadata files if the module supports them
supports_metadata = module_name in _MODULE_SUPPORTS_METADATA
if self.data_files is None and supports_metadata:
try:
metadata_patterns = get_metadata_patterns(base_path, download_config=self.download_config)
except FileNotFoundError:
metadata_patterns = None
if metadata_patterns is not None:
metadata_data_files_list = DataFilesList.from_patterns(
metadata_patterns, download_config=self.download_config, base_path=base_path
)
if metadata_data_files_list:
data_files = DataFilesDict(
{
split: data_files_list + metadata_data_files_list
for split, data_files_list in data_files.items()
}
)
module_path, _ = _PACKAGED_DATASETS_MODULES[module_name]
if metadata_configs:
builder_configs, default_config_name = create_builder_configs_from_metadata_configs(
module_path,
metadata_configs,
base_path=base_path,
supports_metadata=supports_metadata,
default_builder_kwargs=default_builder_kwargs,
download_config=self.download_config,
)
else:
builder_configs: List[BuilderConfig] = [
import_main_class(module_path).BUILDER_CONFIG_CLASS(
data_files=data_files,
**default_builder_kwargs,
)
]
default_config_name = None
builder_kwargs = {
"base_path": hf_dataset_url(self.name, "", revision=self.commit_hash).rstrip("/"),
"repo_id": self.name,
"dataset_name": camelcase_to_snakecase(Path(self.name).name),
}
if self.data_dir:
builder_kwargs["data_files"] = data_files
download_config = self.download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading metadata"
try:
# this file is deprecated and was created automatically in old versions of push_to_hub
dataset_infos_path = cached_path(
hf_dataset_url(self.name, config.DATASETDICT_INFOS_FILENAME, revision=self.commit_hash),
download_config=download_config,
)
with open(dataset_infos_path, encoding="utf-8") as f:
legacy_dataset_infos = DatasetInfosDict(
{
config_name: DatasetInfo.from_dict(dataset_info_dict)
for config_name, dataset_info_dict in json.load(f).items()
}
)
if len(legacy_dataset_infos) == 1:
# old config e.g. named "username--dataset_name"
legacy_config_name = next(iter(legacy_dataset_infos))
legacy_dataset_infos["default"] = legacy_dataset_infos.pop(legacy_config_name)
legacy_dataset_infos.update(dataset_infos)
dataset_infos = legacy_dataset_infos
except FileNotFoundError:
pass
if default_config_name is None and len(dataset_infos) == 1:
default_config_name = next(iter(dataset_infos))
return DatasetModule(
module_path,
self.commit_hash,
builder_kwargs,
dataset_infos=dataset_infos,
builder_configs_parameters=BuilderConfigsParameters(
metadata_configs=metadata_configs,
builder_configs=builder_configs,
default_config_name=default_config_name,
),
) | class_definition | 43,060 | 51,810 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py | null | 113 |
class HubDatasetModuleFactoryWithParquetExport(_DatasetModuleFactory):
"""
Get the module of a dataset loaded from parquet files of a dataset repository parquet export.
"""
def __init__(
self,
name: str,
commit_hash: str,
download_config: Optional[DownloadConfig] = None,
):
self.name = name
self.commit_hash = commit_hash
self.download_config = download_config or DownloadConfig()
increase_load_count(name)
def get_module(self) -> DatasetModule:
exported_parquet_files = _dataset_viewer.get_exported_parquet_files(
dataset=self.name, commit_hash=self.commit_hash, token=self.download_config.token
)
exported_dataset_infos = _dataset_viewer.get_exported_dataset_infos(
dataset=self.name, commit_hash=self.commit_hash, token=self.download_config.token
)
dataset_infos = DatasetInfosDict(
{
config_name: DatasetInfo.from_dict(exported_dataset_infos[config_name])
for config_name in exported_dataset_infos
}
)
parquet_commit_hash = (
HfApi(
endpoint=config.HF_ENDPOINT,
token=self.download_config.token,
library_name="datasets",
library_version=__version__,
user_agent=get_datasets_user_agent(self.download_config.user_agent),
)
.dataset_info(
self.name,
revision="refs/convert/parquet",
token=self.download_config.token,
timeout=100.0,
)
.sha
) # fix the revision in case there are new commits in the meantime
metadata_configs = MetadataConfigs._from_exported_parquet_files_and_dataset_infos(
parquet_commit_hash=parquet_commit_hash,
exported_parquet_files=exported_parquet_files,
dataset_infos=dataset_infos,
)
module_path, _ = _PACKAGED_DATASETS_MODULES["parquet"]
builder_configs, default_config_name = create_builder_configs_from_metadata_configs(
module_path,
metadata_configs,
supports_metadata=False,
download_config=self.download_config,
)
builder_kwargs = {
"repo_id": self.name,
"dataset_name": camelcase_to_snakecase(Path(self.name).name),
}
return DatasetModule(
module_path,
self.commit_hash,
builder_kwargs,
dataset_infos=dataset_infos,
builder_configs_parameters=BuilderConfigsParameters(
metadata_configs=metadata_configs,
builder_configs=builder_configs,
default_config_name=default_config_name,
),
) | class_definition | 51,813 | 54,668 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py | null | 114 |
class HubDatasetModuleFactoryWithScript(_DatasetModuleFactory):
"""
Get the module of a dataset from a dataset repository.
The dataset script comes from the script inside the dataset repository.
"""
def __init__(
self,
name: str,
commit_hash: str,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
dynamic_modules_path: Optional[str] = None,
trust_remote_code: Optional[bool] = None,
):
self.name = name
self.commit_hash = commit_hash
self.download_config = download_config or DownloadConfig()
self.download_mode = download_mode
self.dynamic_modules_path = dynamic_modules_path
self.trust_remote_code = trust_remote_code
increase_load_count(name)
def download_loading_script(self) -> str:
file_path = hf_dataset_url(self.name, self.name.split("/")[-1] + ".py", revision=self.commit_hash)
download_config = self.download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading builder script"
return cached_path(file_path, download_config=download_config)
def download_dataset_infos_file(self) -> str:
dataset_infos = hf_dataset_url(self.name, config.DATASETDICT_INFOS_FILENAME, revision=self.commit_hash)
# Download the dataset infos file if available
download_config = self.download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading metadata"
try:
return cached_path(
dataset_infos,
download_config=download_config,
)
except (FileNotFoundError, ConnectionError):
return None
def download_dataset_readme_file(self) -> str:
readme_url = hf_dataset_url(self.name, config.REPOCARD_FILENAME, revision=self.commit_hash)
# Download the dataset infos file if available
download_config = self.download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading readme"
try:
return cached_path(
readme_url,
download_config=download_config,
)
except (FileNotFoundError, ConnectionError):
return None
def get_module(self) -> DatasetModule:
if config.HF_DATASETS_TRUST_REMOTE_CODE and self.trust_remote_code is None:
warnings.warn(
f"The repository for {self.name} contains custom code which must be executed to correctly "
f"load the dataset. You can inspect the repository content at https://hf.co/datasets/{self.name}\n"
f"You can avoid this message in future by passing the argument `trust_remote_code=True`.\n"
f"Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.",
FutureWarning,
)
# get script and other files
local_path = self.download_loading_script()
dataset_infos_path = self.download_dataset_infos_file()
dataset_readme_path = self.download_dataset_readme_file()
imports = get_imports(local_path)
local_imports, library_imports = _download_additional_modules(
name=self.name,
base_path=hf_dataset_url(self.name, "", revision=self.commit_hash),
imports=imports,
download_config=self.download_config,
)
additional_files = []
if dataset_infos_path:
additional_files.append((config.DATASETDICT_INFOS_FILENAME, dataset_infos_path))
if dataset_readme_path:
additional_files.append((config.REPOCARD_FILENAME, dataset_readme_path))
# copy the script and the files in an importable directory
dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
hash = files_to_hash([local_path] + [loc[1] for loc in local_imports])
importable_file_path = _get_importable_file_path(
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
subdirectory_name=hash,
name=self.name,
)
if not os.path.exists(importable_file_path):
trust_remote_code = resolve_trust_remote_code(self.trust_remote_code, self.name)
if trust_remote_code:
_create_importable_file(
local_path=local_path,
local_imports=local_imports,
additional_files=additional_files,
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
subdirectory_name=hash,
name=self.name,
download_mode=self.download_mode,
)
else:
raise ValueError(
f"Loading {self.name} requires you to execute the dataset script in that"
" repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
" set the option `trust_remote_code=True` to remove this error."
)
_check_library_imports(name=self.name, library_imports=library_imports)
module_path, hash = _load_importable_file(
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
subdirectory_name=hash,
name=self.name,
)
# make the new module to be noticed by the import system
importlib.invalidate_caches()
builder_kwargs = {
"base_path": hf_dataset_url(self.name, "", revision=self.commit_hash).rstrip("/"),
"repo_id": self.name,
}
return DatasetModule(module_path, hash, builder_kwargs, importable_file_path=importable_file_path) | class_definition | 54,671 | 60,740 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py | null | 115 |
class CachedDatasetModuleFactory(_DatasetModuleFactory):
"""
Get the module of a dataset that has been loaded once already and cached.
The script that is loaded from the cache is the most recent one with a matching name.
"""
def __init__(
self,
name: str,
cache_dir: Optional[str] = None,
dynamic_modules_path: Optional[str] = None,
):
self.name = name
self.cache_dir = cache_dir
self.dynamic_modules_path = dynamic_modules_path
assert self.name.count("/") <= 1
def get_module(self) -> DatasetModule:
dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
importable_directory_path = os.path.join(dynamic_modules_path, "datasets", self.name.replace("/", "--"))
hashes = (
[h for h in os.listdir(importable_directory_path) if len(h) == 64]
if os.path.isdir(importable_directory_path)
else None
)
if hashes:
# get most recent
def _get_modification_time(module_hash):
return (
(Path(importable_directory_path) / module_hash / (self.name.split("/")[-1] + ".py"))
.stat()
.st_mtime
)
hash = sorted(hashes, key=_get_modification_time)[-1]
warning_msg = (
f"Using the latest cached version of the module from {os.path.join(importable_directory_path, hash)} "
f"(last modified on {time.ctime(_get_modification_time(hash))}) since it "
f"couldn't be found locally at {self.name}"
)
if not config.HF_HUB_OFFLINE:
warning_msg += ", or remotely on the Hugging Face Hub."
logger.warning(warning_msg)
importable_file_path = _get_importable_file_path(
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
subdirectory_name=hash,
name=self.name,
)
module_path, hash = _load_importable_file(
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
subdirectory_name=hash,
name=self.name,
)
# make the new module to be noticed by the import system
importlib.invalidate_caches()
builder_kwargs = {
"repo_id": self.name,
}
return DatasetModule(module_path, hash, builder_kwargs, importable_file_path=importable_file_path)
cache_dir = os.path.expanduser(str(self.cache_dir or config.HF_DATASETS_CACHE))
namespace_and_dataset_name = self.name.split("/")
namespace_and_dataset_name[-1] = camelcase_to_snakecase(namespace_and_dataset_name[-1])
cached_relative_path = "___".join(namespace_and_dataset_name)
cached_datasets_directory_path_root = os.path.join(cache_dir, cached_relative_path)
cached_directory_paths = [
cached_directory_path
for cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", "*", "*"))
if os.path.isdir(cached_directory_path)
]
if cached_directory_paths:
builder_kwargs = {
"repo_id": self.name,
"dataset_name": self.name.split("/")[-1],
}
warning_msg = f"Using the latest cached version of the dataset since {self.name} couldn't be found on the Hugging Face Hub"
if config.HF_HUB_OFFLINE:
warning_msg += " (offline mode is enabled)."
logger.warning(warning_msg)
return DatasetModule(
"datasets.packaged_modules.cache.cache",
"auto",
{**builder_kwargs, "version": "auto"},
)
raise FileNotFoundError(f"Dataset {self.name} is not cached in {self.cache_dir}") | class_definition | 60,743 | 64,770 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py | null | 116 |
class SupervisedKeysData:
input: str = ""
output: str = "" | class_definition | 1,571 | 1,637 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py | null | 117 |
class DownloadChecksumsEntryData:
key: str = ""
value: str = "" | class_definition | 1,651 | 1,722 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py | null | 118 |
class MissingCachedSizesConfigError(Exception):
"""The expected cached sizes of the download file are missing.""" | class_definition | 1,725 | 1,842 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py | null | 119 |
class NonMatchingCachedSizesError(Exception):
"""The prepared split doesn't have expected sizes.""" | class_definition | 1,845 | 1,948 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py | null | 120 |
class PostProcessedInfo:
features: Optional[Features] = None
resources_checksums: Optional[dict] = None
def __post_init__(self):
# Convert back to the correct classes when we reload from dict
if self.features is not None and not isinstance(self.features, Features):
self.features = Features.from_dict(self.features)
@classmethod
def from_dict(cls, post_processed_info_dict: dict) -> "PostProcessedInfo":
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in post_processed_info_dict.items() if k in field_names}) | class_definition | 1,962 | 2,573 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py | null | 121 |
class DatasetInfo:
"""Information about a dataset.
`DatasetInfo` documents datasets, including its name, version, and features.
See the constructor arguments and properties for a full list.
Not all fields are known on construction and may be updated later.
Attributes:
description (`str`):
A description of the dataset.
citation (`str`):
A BibTeX citation of the dataset.
homepage (`str`):
A URL to the official homepage for the dataset.
license (`str`):
The dataset's license. It can be the name of the license or a paragraph containing the terms of the license.
features ([`Features`], *optional*):
The features used to specify the dataset's column types.
post_processed (`PostProcessedInfo`, *optional*):
Information regarding the resources of a possible post-processing of a dataset. For example, it can contain the information of an index.
supervised_keys (`SupervisedKeysData`, *optional*):
Specifies the input feature and the label for supervised learning if applicable for the dataset (legacy from TFDS).
builder_name (`str`, *optional*):
The name of the `GeneratorBasedBuilder` subclass used to create the dataset. Usually matched to the corresponding script name. It is also the snake_case version of the dataset builder class name.
config_name (`str`, *optional*):
The name of the configuration derived from [`BuilderConfig`].
version (`str` or [`Version`], *optional*):
The version of the dataset.
splits (`dict`, *optional*):
The mapping between split name and metadata.
download_checksums (`dict`, *optional*):
The mapping between the URL to download the dataset's checksums and corresponding metadata.
download_size (`int`, *optional*):
The size of the files to download to generate the dataset, in bytes.
post_processing_size (`int`, *optional*):
Size of the dataset in bytes after post-processing, if any.
dataset_size (`int`, *optional*):
The combined size in bytes of the Arrow tables for all splits.
size_in_bytes (`int`, *optional*):
The combined size in bytes of all files associated with the dataset (downloaded files + Arrow files).
**config_kwargs (additional keyword arguments):
Keyword arguments to be passed to the [`BuilderConfig`] and used in the [`DatasetBuilder`].
"""
# Set in the dataset scripts
description: str = dataclasses.field(default_factory=str)
citation: str = dataclasses.field(default_factory=str)
homepage: str = dataclasses.field(default_factory=str)
license: str = dataclasses.field(default_factory=str)
features: Optional[Features] = None
post_processed: Optional[PostProcessedInfo] = None
supervised_keys: Optional[SupervisedKeysData] = None
# Set later by the builder
builder_name: Optional[str] = None
dataset_name: Optional[str] = None # for packaged builders, to be different from builder_name
config_name: Optional[str] = None
version: Optional[Union[str, Version]] = None
# Set later by `download_and_prepare`
splits: Optional[dict] = None
download_checksums: Optional[dict] = None
download_size: Optional[int] = None
post_processing_size: Optional[int] = None
dataset_size: Optional[int] = None
size_in_bytes: Optional[int] = None
_INCLUDED_INFO_IN_YAML: ClassVar[List[str]] = [
"config_name",
"download_size",
"dataset_size",
"features",
"splits",
]
def __post_init__(self):
# Convert back to the correct classes when we reload from dict
if self.features is not None and not isinstance(self.features, Features):
self.features = Features.from_dict(self.features)
if self.post_processed is not None and not isinstance(self.post_processed, PostProcessedInfo):
self.post_processed = PostProcessedInfo.from_dict(self.post_processed)
if self.version is not None and not isinstance(self.version, Version):
if isinstance(self.version, str):
self.version = Version(self.version)
else:
self.version = Version.from_dict(self.version)
if self.splits is not None and not isinstance(self.splits, SplitDict):
self.splits = SplitDict.from_split_dict(self.splits)
if self.supervised_keys is not None and not isinstance(self.supervised_keys, SupervisedKeysData):
if isinstance(self.supervised_keys, (tuple, list)):
self.supervised_keys = SupervisedKeysData(*self.supervised_keys)
else:
self.supervised_keys = SupervisedKeysData(**self.supervised_keys)
def write_to_directory(self, dataset_info_dir, pretty_print=False, storage_options: Optional[dict] = None):
"""Write `DatasetInfo` and license (if present) as JSON files to `dataset_info_dir`.
Args:
dataset_info_dir (`str`):
Destination directory.
pretty_print (`bool`, defaults to `False`):
If `True`, the JSON will be pretty-printed with the indent level of 4.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.9.0"/>
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.info.write_to_directory("/path/to/directory/")
```
"""
fs: fsspec.AbstractFileSystem
fs, *_ = url_to_fs(dataset_info_dir, **(storage_options or {}))
with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "wb") as f:
self._dump_info(f, pretty_print=pretty_print)
if self.license:
with fs.open(posixpath.join(dataset_info_dir, config.LICENSE_FILENAME), "wb") as f:
self._dump_license(f)
def _dump_info(self, file, pretty_print=False):
"""Dump info in `file` file-like object open in bytes mode (to support remote files)"""
file.write(json.dumps(asdict(self), indent=4 if pretty_print else None).encode("utf-8"))
def _dump_license(self, file):
"""Dump license in `file` file-like object open in bytes mode (to support remote files)"""
file.write(self.license.encode("utf-8"))
@classmethod
def from_merge(cls, dataset_infos: List["DatasetInfo"]):
dataset_infos = [dset_info.copy() for dset_info in dataset_infos if dset_info is not None]
if len(dataset_infos) > 0 and all(dataset_infos[0] == dset_info for dset_info in dataset_infos):
# if all dataset_infos are equal we don't need to merge. Just return the first.
return dataset_infos[0]
description = "\n\n".join(unique_values(info.description for info in dataset_infos)).strip()
citation = "\n\n".join(unique_values(info.citation for info in dataset_infos)).strip()
homepage = "\n\n".join(unique_values(info.homepage for info in dataset_infos)).strip()
license = "\n\n".join(unique_values(info.license for info in dataset_infos)).strip()
features = None
supervised_keys = None
return cls(
description=description,
citation=citation,
homepage=homepage,
license=license,
features=features,
supervised_keys=supervised_keys,
)
@classmethod
def from_directory(cls, dataset_info_dir: str, storage_options: Optional[dict] = None) -> "DatasetInfo":
"""Create [`DatasetInfo`] from the JSON file in `dataset_info_dir`.
This function updates all the dynamically generated fields (num_examples,
hash, time of creation,...) of the [`DatasetInfo`].
This will overwrite all previous metadata.
Args:
dataset_info_dir (`str`):
The directory containing the metadata file. This
should be the root directory of a specific dataset version.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.9.0"/>
Example:
```py
>>> from datasets import DatasetInfo
>>> ds_info = DatasetInfo.from_directory("/path/to/directory/")
```
"""
fs: fsspec.AbstractFileSystem
fs, *_ = url_to_fs(dataset_info_dir, **(storage_options or {}))
logger.info(f"Loading Dataset info from {dataset_info_dir}")
if not dataset_info_dir:
raise ValueError("Calling DatasetInfo.from_directory() with undefined dataset_info_dir.")
with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "r", encoding="utf-8") as f:
dataset_info_dict = json.load(f)
return cls.from_dict(dataset_info_dict)
@classmethod
def from_dict(cls, dataset_info_dict: dict) -> "DatasetInfo":
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dataset_info_dict.items() if k in field_names})
def update(self, other_dataset_info: "DatasetInfo", ignore_none=True):
self_dict = self.__dict__
self_dict.update(
**{
k: copy.deepcopy(v)
for k, v in other_dataset_info.__dict__.items()
if (v is not None or not ignore_none)
}
)
def copy(self) -> "DatasetInfo":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
def _to_yaml_dict(self) -> dict:
yaml_dict = {}
dataset_info_dict = asdict(self)
for key in dataset_info_dict:
if key in self._INCLUDED_INFO_IN_YAML:
value = getattr(self, key)
if hasattr(value, "_to_yaml_list"): # Features, SplitDict
yaml_dict[key] = value._to_yaml_list()
elif hasattr(value, "_to_yaml_string"): # Version
yaml_dict[key] = value._to_yaml_string()
else:
yaml_dict[key] = value
return yaml_dict
@classmethod
def _from_yaml_dict(cls, yaml_data: dict) -> "DatasetInfo":
yaml_data = copy.deepcopy(yaml_data)
if yaml_data.get("features") is not None:
yaml_data["features"] = Features._from_yaml_list(yaml_data["features"])
if yaml_data.get("splits") is not None:
yaml_data["splits"] = SplitDict._from_yaml_list(yaml_data["splits"])
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in yaml_data.items() if k in field_names}) | class_definition | 2,587 | 13,544 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py | null | 122 |
class DatasetInfosDict(Dict[str, DatasetInfo]):
def write_to_directory(self, dataset_infos_dir, overwrite=False, pretty_print=False) -> None:
total_dataset_infos = {}
dataset_infos_path = os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)
dataset_readme_path = os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME)
if not overwrite:
total_dataset_infos = self.from_directory(dataset_infos_dir)
total_dataset_infos.update(self)
if os.path.exists(dataset_infos_path):
# for backward compatibility, let's update the JSON file if it exists
with open(dataset_infos_path, "w", encoding="utf-8") as f:
dataset_infos_dict = {
config_name: asdict(dset_info) for config_name, dset_info in total_dataset_infos.items()
}
json.dump(dataset_infos_dict, f, indent=4 if pretty_print else None)
# Dump the infos in the YAML part of the README.md file
if os.path.exists(dataset_readme_path):
dataset_card = DatasetCard.load(dataset_readme_path)
dataset_card_data = dataset_card.data
else:
dataset_card = None
dataset_card_data = DatasetCardData()
if total_dataset_infos:
total_dataset_infos.to_dataset_card_data(dataset_card_data)
dataset_card = (
DatasetCard("---\n" + str(dataset_card_data) + "\n---\n") if dataset_card is None else dataset_card
)
dataset_card.save(Path(dataset_readme_path))
@classmethod
def from_directory(cls, dataset_infos_dir) -> "DatasetInfosDict":
logger.info(f"Loading Dataset Infos from {dataset_infos_dir}")
# Load the info from the YAML part of README.md
if os.path.exists(os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME)):
dataset_card_data = DatasetCard.load(Path(dataset_infos_dir) / config.REPOCARD_FILENAME).data
if "dataset_info" in dataset_card_data:
return cls.from_dataset_card_data(dataset_card_data)
if os.path.exists(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)):
# this is just to have backward compatibility with dataset_infos.json files
with open(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME), encoding="utf-8") as f:
return cls(
{
config_name: DatasetInfo.from_dict(dataset_info_dict)
for config_name, dataset_info_dict in json.load(f).items()
}
)
else:
return cls()
@classmethod
def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "DatasetInfosDict":
if isinstance(dataset_card_data.get("dataset_info"), (list, dict)):
if isinstance(dataset_card_data["dataset_info"], list):
return cls(
{
dataset_info_yaml_dict.get("config_name", "default"): DatasetInfo._from_yaml_dict(
dataset_info_yaml_dict
)
for dataset_info_yaml_dict in dataset_card_data["dataset_info"]
}
)
else:
dataset_info = DatasetInfo._from_yaml_dict(dataset_card_data["dataset_info"])
dataset_info.config_name = dataset_card_data["dataset_info"].get("config_name", "default")
return cls({dataset_info.config_name: dataset_info})
else:
return cls()
def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None:
if self:
# first get existing metadata info
if "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], dict):
dataset_metadata_infos = {
dataset_card_data["dataset_info"].get("config_name", "default"): dataset_card_data["dataset_info"]
}
elif "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], list):
dataset_metadata_infos = {
config_metadata["config_name"]: config_metadata
for config_metadata in dataset_card_data["dataset_info"]
}
else:
dataset_metadata_infos = {}
# update/rewrite existing metadata info with the one to dump
total_dataset_infos = {
**dataset_metadata_infos,
**{config_name: dset_info._to_yaml_dict() for config_name, dset_info in self.items()},
}
# the config_name from the dataset_infos_dict takes over the config_name of the DatasetInfo
for config_name, dset_info_yaml_dict in total_dataset_infos.items():
dset_info_yaml_dict["config_name"] = config_name
if len(total_dataset_infos) == 1:
# use a struct instead of a list of configurations, since there's only one
dataset_card_data["dataset_info"] = next(iter(total_dataset_infos.values()))
config_name = dataset_card_data["dataset_info"].pop("config_name", None)
if config_name != "default":
# if config_name is not "default" preserve it and put at the first position
dataset_card_data["dataset_info"] = {
"config_name": config_name,
**dataset_card_data["dataset_info"],
}
else:
dataset_card_data["dataset_info"] = []
for config_name, dataset_info_yaml_dict in sorted(total_dataset_infos.items()):
# add the config_name field in first position
dataset_info_yaml_dict.pop("config_name", None)
dataset_info_yaml_dict = {"config_name": config_name, **dataset_info_yaml_dict}
dataset_card_data["dataset_info"].append(dataset_info_yaml_dict) | class_definition | 13,547 | 19,674 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py | null | 123 |
class InvalidKeyError(Exception):
"""Raises an error when given key is of invalid datatype."""
def __init__(self, hash_data):
self.prefix = "\nFAILURE TO GENERATE DATASET: Invalid key type detected"
self.err_msg = f"\nFound Key {hash_data} of type {type(hash_data)}"
self.suffix = "\nKeys should be either str, int or bytes type"
super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}") | class_definition | 2,026 | 2,458 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/keyhash.py | null | 124 |
class DuplicatedKeysError(Exception):
"""Raise an error when duplicate key found."""
def __init__(self, key, duplicate_key_indices, fix_msg=""):
self.key = key
self.duplicate_key_indices = duplicate_key_indices
self.fix_msg = fix_msg
self.prefix = "Found multiple examples generated with the same key"
if len(duplicate_key_indices) <= 20:
self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices)} have the key {key}"
else:
self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices[:20])}... ({len(duplicate_key_indices) - 20} more) have the key {key}"
self.suffix = "\n" + fix_msg if fix_msg else ""
super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}") | class_definition | 2,461 | 3,253 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/keyhash.py | null | 125 |
class KeyHasher:
"""KeyHasher class for providing hash using md5"""
def __init__(self, hash_salt: str):
self._split_md5 = insecure_hashlib.md5(_as_bytes(hash_salt))
def hash(self, key: Union[str, int, bytes]) -> int:
"""Returns 128-bits unique hash of input key
Args:
key: the input key to be hashed (should be str, int or bytes)
Returns: 128-bit int hash key"""
md5 = self._split_md5.copy()
byte_key = _as_bytes(key)
md5.update(byte_key)
# Convert to integer with hexadecimal conversion
return int(md5.hexdigest(), 16) | class_definition | 3,256 | 3,871 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/keyhash.py | null | 126 |
class ParallelBackendConfig:
backend_name = None | class_definition | 171 | 223 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/parallel/parallel.py | null | 127 |
class ParquetDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
hash = _PACKAGED_DATASETS_MODULES["parquet"][1]
self.builder = Parquet(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
hash=hash,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset | class_definition | 490 | 2,336 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/parquet.py | null | 128 |
class ParquetDatasetWriter:
def __init__(
self,
dataset: Dataset,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
storage_options: Optional[dict] = None,
**parquet_writer_kwargs,
):
self.dataset = dataset
self.path_or_buf = path_or_buf
self.batch_size = batch_size or get_writer_batch_size(dataset.features)
self.storage_options = storage_options or {}
self.parquet_writer_kwargs = parquet_writer_kwargs
def write(self) -> int:
batch_size = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf, "wb", **(self.storage_options or {})) as buffer:
written = self._write(file_obj=buffer, batch_size=batch_size, **self.parquet_writer_kwargs)
else:
written = self._write(file_obj=self.path_or_buf, batch_size=batch_size, **self.parquet_writer_kwargs)
return written
def _write(self, file_obj: BinaryIO, batch_size: int, **parquet_writer_kwargs) -> int:
"""Writes the pyarrow table as Parquet to a binary file handle.
Caller is responsible for opening and closing the handle.
"""
written = 0
_ = parquet_writer_kwargs.pop("path_or_buf", None)
schema = self.dataset.features.arrow_schema
writer = pq.ParquetWriter(file_obj, schema=schema, **parquet_writer_kwargs)
for offset in hf_tqdm(
range(0, len(self.dataset), batch_size),
unit="ba",
desc="Creating parquet from Arrow format",
):
batch = query_table(
table=self.dataset._data,
key=slice(offset, offset + batch_size),
indices=self.dataset._indices,
)
writer.write_table(batch)
written += batch.nbytes
writer.close()
return written | class_definition | 2,339 | 4,353 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/parquet.py | null | 129 |
class GeneratorDatasetInputStream(AbstractDatasetInputStream):
def __init__(
self,
generator: Callable,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
gen_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
split: NamedSplit = Split.TRAIN,
**kwargs,
):
super().__init__(
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
self.builder = Generator(
cache_dir=cache_dir,
features=features,
generator=generator,
gen_kwargs=gen_kwargs,
split=split,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.builder.config.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.builder.config.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset | class_definition | 189 | 1,908 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/generator.py | null | 130 |
class CsvDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
self.builder = Csv(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset | class_definition | 365 | 2,124 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/csv.py | null | 131 |
class CsvDatasetWriter:
def __init__(
self,
dataset: Dataset,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
storage_options: Optional[dict] = None,
**to_csv_kwargs,
):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
self.dataset = dataset
self.path_or_buf = path_or_buf
self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
self.num_proc = num_proc
self.encoding = "utf-8"
self.storage_options = storage_options or {}
self.to_csv_kwargs = to_csv_kwargs
def write(self) -> int:
_ = self.to_csv_kwargs.pop("path_or_buf", None)
header = self.to_csv_kwargs.pop("header", True)
index = self.to_csv_kwargs.pop("index", False)
if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf, "wb", **(self.storage_options or {})) as buffer:
written = self._write(file_obj=buffer, header=header, index=index, **self.to_csv_kwargs)
else:
written = self._write(file_obj=self.path_or_buf, header=header, index=index, **self.to_csv_kwargs)
return written
def _batch_csv(self, args):
offset, header, index, to_csv_kwargs = args
batch = query_table(
table=self.dataset.data,
key=slice(offset, offset + self.batch_size),
indices=self.dataset._indices,
)
csv_str = batch.to_pandas().to_csv(
path_or_buf=None, header=header if (offset == 0) else False, index=index, **to_csv_kwargs
)
return csv_str.encode(self.encoding)
def _write(self, file_obj: BinaryIO, header, index, **to_csv_kwargs) -> int:
"""Writes the pyarrow table as CSV to a binary file handle.
Caller is responsible for opening and closing the handle.
"""
written = 0
if self.num_proc is None or self.num_proc == 1:
for offset in hf_tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
desc="Creating CSV from Arrow format",
):
csv_str = self._batch_csv((offset, header, index, to_csv_kwargs))
written += file_obj.write(csv_str)
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for csv_str in hf_tqdm(
pool.imap(
self._batch_csv,
[(offset, header, index, to_csv_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
desc="Creating CSV from Arrow format",
):
written += file_obj.write(csv_str)
return written | class_definition | 2,127 | 5,264 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/csv.py | null | 132 |
class TextDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
self.builder = Text(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset | class_definition | 213 | 1,974 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/text.py | null | 133 |
class SparkDatasetReader(AbstractDatasetReader):
"""A dataset reader that reads from a Spark DataFrame.
When caching, cache materialization is parallelized over Spark; an NFS that is accessible to the driver must be
provided. Streaming is not currently supported.
"""
def __init__(
self,
df: pyspark.sql.DataFrame,
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
streaming: bool = True,
cache_dir: str = None,
keep_in_memory: bool = False,
working_dir: str = None,
load_from_cache_file: bool = True,
file_format: str = "arrow",
**kwargs,
):
super().__init__(
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
**kwargs,
)
self._load_from_cache_file = load_from_cache_file
self._file_format = file_format
self.builder = Spark(
df=df,
features=features,
cache_dir=cache_dir,
working_dir=working_dir,
**kwargs,
)
def read(self):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
download_mode = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=download_mode,
file_format=self._file_format,
)
return self.builder.as_dataset(split=self.split) | class_definition | 207 | 1,796 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/spark.py | null | 134 |
class SqlDatasetReader(AbstractDatasetInputStream):
def __init__(
self,
sql: Union[str, "sqlalchemy.sql.Selectable"],
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
):
super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs)
self.builder = Sql(
cache_dir=cache_dir,
features=features,
sql=sql,
con=con,
**kwargs,
)
def read(self):
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
base_path=base_path,
)
# Build dataset for splits
dataset = self.builder.as_dataset(
split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset | class_definition | 339 | 1,561 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/sql.py | null | 135 |
class SqlDatasetWriter:
def __init__(
self,
dataset: Dataset,
name: str,
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
**to_sql_kwargs,
):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
self.dataset = dataset
self.name = name
self.con = con
self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
self.num_proc = num_proc
self.to_sql_kwargs = to_sql_kwargs
def write(self) -> int:
_ = self.to_sql_kwargs.pop("sql", None)
_ = self.to_sql_kwargs.pop("con", None)
index = self.to_sql_kwargs.pop("index", False)
written = self._write(index=index, **self.to_sql_kwargs)
return written
def _batch_sql(self, args):
offset, index, to_sql_kwargs = args
to_sql_kwargs = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
batch = query_table(
table=self.dataset.data,
key=slice(offset, offset + self.batch_size),
indices=self.dataset._indices,
)
df = batch.to_pandas()
num_rows = df.to_sql(self.name, self.con, index=index, **to_sql_kwargs)
return num_rows or len(df)
def _write(self, index, **to_sql_kwargs) -> int:
"""Writes the pyarrow table as SQL to a database.
Caller is responsible for opening and closing the SQL connection.
"""
written = 0
if self.num_proc is None or self.num_proc == 1:
for offset in hf_tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
desc="Creating SQL from Arrow format",
):
written += self._batch_sql((offset, index, to_sql_kwargs))
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in hf_tqdm(
pool.imap(
self._batch_sql,
[(offset, index, to_sql_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
desc="Creating SQL from Arrow format",
):
written += num_rows
return written | class_definition | 1,564 | 4,233 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/sql.py | null | 136 |
class JsonDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
field: Optional[str] = None,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
self.field = field
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
self.builder = Json(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
field=field,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset | class_definition | 368 | 2,218 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/json.py | null | 137 |
class JsonDatasetWriter:
def __init__(
self,
dataset: Dataset,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
storage_options: Optional[dict] = None,
**to_json_kwargs,
):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
self.dataset = dataset
self.path_or_buf = path_or_buf
self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
self.num_proc = num_proc
self.encoding = "utf-8"
self.storage_options = storage_options or {}
self.to_json_kwargs = to_json_kwargs
def write(self) -> int:
_ = self.to_json_kwargs.pop("path_or_buf", None)
orient = self.to_json_kwargs.pop("orient", "records")
lines = self.to_json_kwargs.pop("lines", True if orient == "records" else False)
if "index" not in self.to_json_kwargs and orient in ["split", "table"]:
self.to_json_kwargs["index"] = False
# Determine the default compression value based on self.path_or_buf type
default_compression = "infer" if isinstance(self.path_or_buf, (str, bytes, os.PathLike)) else None
compression = self.to_json_kwargs.pop("compression", default_compression)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"`datasets` currently does not support {compression} compression")
if not lines and self.batch_size < self.dataset.num_rows:
raise NotImplementedError(
"Output JSON will not be formatted correctly when lines = False and batch_size < number of rows in the dataset. Use pandas.DataFrame.to_json() instead."
)
if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
with fsspec.open(
self.path_or_buf, "wb", compression=compression, **(self.storage_options or {})
) as buffer:
written = self._write(file_obj=buffer, orient=orient, lines=lines, **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
f"The compression parameter is not supported when writing to a buffer, but compression={compression}"
" was passed. Please provide a local path instead."
)
written = self._write(file_obj=self.path_or_buf, orient=orient, lines=lines, **self.to_json_kwargs)
return written
def _batch_json(self, args):
offset, orient, lines, to_json_kwargs = args
batch = query_table(
table=self.dataset.data,
key=slice(offset, offset + self.batch_size),
indices=self.dataset._indices,
)
json_str = batch.to_pandas().to_json(path_or_buf=None, orient=orient, lines=lines, **to_json_kwargs)
if not json_str.endswith("\n"):
json_str += "\n"
return json_str.encode(self.encoding)
def _write(
self,
file_obj: BinaryIO,
orient,
lines,
**to_json_kwargs,
) -> int:
"""Writes the pyarrow table as JSON lines to a binary file handle.
Caller is responsible for opening and closing the handle.
"""
written = 0
if self.num_proc is None or self.num_proc == 1:
for offset in hf_tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
desc="Creating json from Arrow format",
):
json_str = self._batch_json((offset, orient, lines, to_json_kwargs))
written += file_obj.write(json_str)
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in hf_tqdm(
pool.imap(
self._batch_json,
[(offset, orient, lines, to_json_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
desc="Creating json from Arrow format",
):
written += file_obj.write(json_str)
return written | class_definition | 2,221 | 6,696 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/json.py | null | 138 |
class AbstractDatasetReader(ABC):
def __init__(
self,
path_or_paths: Optional[NestedDataStructureLike[PathLike]] = None,
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
self.path_or_paths = path_or_paths
self.split = split if split or isinstance(path_or_paths, dict) else "train"
self.features = features
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.streaming = streaming
self.num_proc = num_proc
self.kwargs = kwargs
@abstractmethod
def read(self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass | class_definition | 231 | 1,087 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/abc.py | null | 139 |
class AbstractDatasetInputStream(ABC):
def __init__(
self,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
self.features = features
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.streaming = streaming
self.num_proc = num_proc
self.kwargs = kwargs
@abstractmethod
def read(self) -> Union[Dataset, IterableDataset]:
pass | class_definition | 1,090 | 1,671 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/abc.py | null | 140 |
class Value:
"""
Scalar feature value of a particular data type.
The possible dtypes of `Value` are as follows:
- `null`
- `bool`
- `int8`
- `int16`
- `int32`
- `int64`
- `uint8`
- `uint16`
- `uint32`
- `uint64`
- `float16`
- `float32` (alias float)
- `float64` (alias double)
- `time32[(s|ms)]`
- `time64[(us|ns)]`
- `timestamp[(s|ms|us|ns)]`
- `timestamp[(s|ms|us|ns), tz=(tzstring)]`
- `date32`
- `date64`
- `duration[(s|ms|us|ns)]`
- `decimal128(precision, scale)`
- `decimal256(precision, scale)`
- `binary`
- `large_binary`
- `string`
- `large_string`
Args:
dtype (`str`):
Name of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'stars': Value(dtype='int32')})
>>> features
{'stars': Value(dtype='int32', id=None)}
```
"""
dtype: str
id: Optional[str] = None
# Automatically constructed
pa_type: ClassVar[Any] = None
_type: str = field(default="Value", init=False, repr=False)
def __post_init__(self):
if self.dtype == "double": # fix inferred type
self.dtype = "float64"
if self.dtype == "float": # fix inferred type
self.dtype = "float32"
self.pa_type = string_to_arrow(self.dtype)
def __call__(self):
return self.pa_type
def encode_example(self, value):
if pa.types.is_boolean(self.pa_type):
return bool(value)
elif pa.types.is_integer(self.pa_type):
return int(value)
elif pa.types.is_floating(self.pa_type):
return float(value)
elif pa.types.is_string(self.pa_type):
return str(value)
else:
return value | class_definition | 19,984 | 21,809 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py | null | 141 |
class _ArrayXD:
def __post_init__(self):
self.shape = tuple(self.shape)
def __call__(self):
pa_type = globals()[self.__class__.__name__ + "ExtensionType"](self.shape, self.dtype)
return pa_type
def encode_example(self, value):
return value | class_definition | 21,812 | 22,097 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py | null | 142 |
class Array2D(_ArrayXD):
"""Create a two-dimensional array.
Args:
shape (`tuple`):
Size of each dimension.
dtype (`str`):
Name of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'x': Array2D(shape=(1, 3), dtype='int32')})
```
"""
shape: tuple
dtype: str
id: Optional[str] = None
# Automatically constructed
_type: str = field(default="Array2D", init=False, repr=False) | class_definition | 22,111 | 22,616 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py | null | 143 |
class Array3D(_ArrayXD):
"""Create a three-dimensional array.
Args:
shape (`tuple`):
Size of each dimension.
dtype (`str`):
Name of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'x': Array3D(shape=(1, 2, 3), dtype='int32')})
```
"""
shape: tuple
dtype: str
id: Optional[str] = None
# Automatically constructed
_type: str = field(default="Array3D", init=False, repr=False) | class_definition | 22,630 | 23,140 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py | null | 144 |
class Array4D(_ArrayXD):
"""Create a four-dimensional array.
Args:
shape (`tuple`):
Size of each dimension.
dtype (`str`):
Name of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'x': Array4D(shape=(1, 2, 2, 3), dtype='int32')})
```
"""
shape: tuple
dtype: str
id: Optional[str] = None
# Automatically constructed
_type: str = field(default="Array4D", init=False, repr=False) | class_definition | 23,154 | 23,666 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py | null | 145 |
class Array5D(_ArrayXD):
"""Create a five-dimensional array.
Args:
shape (`tuple`):
Size of each dimension.
dtype (`str`):
Name of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'x': Array5D(shape=(1, 2, 2, 3, 3), dtype='int32')})
```
"""
shape: tuple
dtype: str
id: Optional[str] = None
# Automatically constructed
_type: str = field(default="Array5D", init=False, repr=False) | class_definition | 23,680 | 24,195 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py | null | 146 |
class _ArrayXDExtensionType(pa.ExtensionType):
ndims: Optional[int] = None
def __init__(self, shape: tuple, dtype: str):
if self.ndims is None or self.ndims <= 1:
raise ValueError("You must instantiate an array type with a value for dim that is > 1")
if len(shape) != self.ndims:
raise ValueError(f"shape={shape} and ndims={self.ndims} don't match")
for dim in range(1, self.ndims):
if shape[dim] is None:
raise ValueError(f"Support only dynamic size on first dimension. Got: {shape}")
self.shape = tuple(shape)
self.value_type = dtype
self.storage_dtype = self._generate_dtype(self.value_type)
pa.ExtensionType.__init__(self, self.storage_dtype, f"{self.__class__.__module__}.{self.__class__.__name__}")
def __arrow_ext_serialize__(self):
return json.dumps((self.shape, self.value_type)).encode()
@classmethod
def __arrow_ext_deserialize__(cls, storage_type, serialized):
args = json.loads(serialized)
return cls(*args)
# This was added to pa.ExtensionType in pyarrow >= 13.0.0
def __reduce__(self):
return self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__())
def __hash__(self):
return hash((self.__class__, self.shape, self.value_type))
def __arrow_ext_class__(self):
return ArrayExtensionArray
def _generate_dtype(self, dtype):
dtype = string_to_arrow(dtype)
for d in reversed(self.shape):
dtype = pa.list_(dtype)
# Don't specify the size of the list, since fixed length list arrays have issues
# being validated after slicing in pyarrow 0.17.1
return dtype
def to_pandas_dtype(self):
return PandasArrayExtensionDtype(self.value_type) | class_definition | 24,198 | 26,043 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py | null | 147 |
class Array2DExtensionType(_ArrayXDExtensionType):
ndims = 2 | class_definition | 26,046 | 26,110 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py | null | 148 |
class Array3DExtensionType(_ArrayXDExtensionType):
ndims = 3 | class_definition | 26,113 | 26,177 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py | null | 149 |
class Array4DExtensionType(_ArrayXDExtensionType):
ndims = 4 | class_definition | 26,180 | 26,244 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py | null | 150 |
class Array5DExtensionType(_ArrayXDExtensionType):
ndims = 5 | class_definition | 26,247 | 26,311 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py | null | 151 |
class ArrayExtensionArray(pa.ExtensionArray):
def __array__(self):
zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
return self.to_numpy(zero_copy_only=zero_copy_only)
def __getitem__(self, i):
return self.storage[i]
def to_numpy(self, zero_copy_only=True):
storage: pa.ListArray = self.storage
null_mask = storage.is_null().to_numpy(zero_copy_only=False)
if self.type.shape[0] is not None:
size = 1
null_indices = np.arange(len(storage))[null_mask] - np.arange(np.sum(null_mask))
for i in range(self.type.ndims):
size *= self.type.shape[i]
storage = storage.flatten()
numpy_arr = storage.to_numpy(zero_copy_only=zero_copy_only)
numpy_arr = numpy_arr.reshape(len(self) - len(null_indices), *self.type.shape)
if len(null_indices):
numpy_arr = np.insert(numpy_arr.astype(np.float64), null_indices, np.nan, axis=0)
else:
shape = self.type.shape
ndims = self.type.ndims
arrays = []
first_dim_offsets = np.array([off.as_py() for off in storage.offsets])
for i, is_null in enumerate(null_mask):
if is_null:
arrays.append(np.nan)
else:
storage_el = storage[i : i + 1]
first_dim = first_dim_offsets[i + 1] - first_dim_offsets[i]
# flatten storage
for _ in range(ndims):
storage_el = storage_el.flatten()
numpy_arr = storage_el.to_numpy(zero_copy_only=zero_copy_only)
arrays.append(numpy_arr.reshape(first_dim, *shape[1:]))
if len(np.unique(np.diff(first_dim_offsets))) > 1:
# ragged
numpy_arr = np.empty(len(arrays), dtype=object)
numpy_arr[:] = arrays
else:
numpy_arr = np.array(arrays)
return numpy_arr
def to_pylist(self):
zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
numpy_arr = self.to_numpy(zero_copy_only=zero_copy_only)
if self.type.shape[0] is None and numpy_arr.dtype == object:
return [arr.tolist() for arr in numpy_arr.tolist()]
else:
return numpy_arr.tolist() | class_definition | 27,874 | 30,290 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py | null | 152 |
class PandasArrayExtensionDtype(PandasExtensionDtype):
_metadata = "value_type"
def __init__(self, value_type: Union["PandasArrayExtensionDtype", np.dtype]):
self._value_type = value_type
def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]):
if isinstance(array, pa.ChunkedArray):
array = array.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in array.chunks]))
zero_copy_only = _is_zero_copy_only(array.storage.type, unnest=True)
numpy_arr = array.to_numpy(zero_copy_only=zero_copy_only)
return PandasArrayExtensionArray(numpy_arr)
@classmethod
def construct_array_type(cls):
return PandasArrayExtensionArray
@property
def type(self) -> type:
return np.ndarray
@property
def kind(self) -> str:
return "O"
@property
def name(self) -> str:
return f"array[{self.value_type}]"
@property
def value_type(self) -> np.dtype:
return self._value_type | class_definition | 30,293 | 31,308 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py | null | 153 |
class PandasArrayExtensionArray(PandasExtensionArray):
def __init__(self, data: np.ndarray, copy: bool = False):
self._data = data if not copy else np.array(data)
self._dtype = PandasArrayExtensionDtype(data.dtype)
def __array__(self, dtype=None):
"""
Convert to NumPy Array.
Note that Pandas expects a 1D array when dtype is set to object.
But for other dtypes, the returned shape is the same as the one of ``data``.
More info about pandas 1D requirement for PandasExtensionArray here:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.api.extensions.ExtensionArray.html#pandas.api.extensions.ExtensionArray
"""
if dtype == np.dtype(object):
out = np.empty(len(self._data), dtype=object)
for i in range(len(self._data)):
out[i] = self._data[i]
return out
if dtype is None:
return self._data
else:
return self._data.astype(dtype)
def copy(self, deep: bool = False) -> "PandasArrayExtensionArray":
return PandasArrayExtensionArray(self._data, copy=True)
@classmethod
def _from_sequence(
cls, scalars, dtype: Optional[PandasArrayExtensionDtype] = None, copy: bool = False
) -> "PandasArrayExtensionArray":
if len(scalars) > 1 and all(
isinstance(x, np.ndarray) and x.shape == scalars[0].shape and x.dtype == scalars[0].dtype for x in scalars
):
data = np.array(scalars, dtype=dtype if dtype is None else dtype.value_type, copy=copy)
else:
data = np.empty(len(scalars), dtype=object)
data[:] = scalars
return cls(data, copy=copy)
@classmethod
def _concat_same_type(cls, to_concat: Sequence_["PandasArrayExtensionArray"]) -> "PandasArrayExtensionArray":
if len(to_concat) > 1 and all(
va._data.shape == to_concat[0]._data.shape and va._data.dtype == to_concat[0]._data.dtype
for va in to_concat
):
data = np.vstack([va._data for va in to_concat])
else:
data = np.empty(len(to_concat), dtype=object)
data[:] = [va._data for va in to_concat]
return cls(data, copy=False)
@property
def dtype(self) -> PandasArrayExtensionDtype:
return self._dtype
@property
def nbytes(self) -> int:
return self._data.nbytes
def isna(self) -> np.ndarray:
return np.array([pd.isna(arr).any() for arr in self._data])
def __setitem__(self, key: Union[int, slice, np.ndarray], value: Any) -> None:
raise NotImplementedError()
def __getitem__(self, item: Union[int, slice, np.ndarray]) -> Union[np.ndarray, "PandasArrayExtensionArray"]:
if isinstance(item, int):
return self._data[item]
return PandasArrayExtensionArray(self._data[item], copy=False)
def take(
self, indices: Sequence_[int], allow_fill: bool = False, fill_value: bool = None
) -> "PandasArrayExtensionArray":
indices: np.ndarray = np.asarray(indices, dtype=int)
if allow_fill:
fill_value = (
self.dtype.na_value if fill_value is None else np.asarray(fill_value, dtype=self.dtype.value_type)
)
mask = indices == -1
if (indices < -1).any():
raise ValueError("Invalid value in `indices`, must be all >= -1 for `allow_fill` is True")
elif len(self) > 0:
pass
elif not np.all(mask):
raise IndexError("Invalid take for empty PandasArrayExtensionArray, must be all -1.")
else:
data = np.array([fill_value] * len(indices), dtype=self.dtype.value_type)
return PandasArrayExtensionArray(data, copy=False)
took = self._data.take(indices, axis=0)
if allow_fill and mask.any():
took[mask] = [fill_value] * np.sum(mask)
return PandasArrayExtensionArray(took, copy=False)
def __len__(self) -> int:
return len(self._data)
def __eq__(self, other) -> np.ndarray:
if not isinstance(other, PandasArrayExtensionArray):
raise NotImplementedError(f"Invalid type to compare to: {type(other)}")
return (self._data == other._data).all() | class_definition | 31,311 | 35,659 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py | null | 154 |
class ClassLabel:
"""Feature type for integer class labels.
There are 3 ways to define a `ClassLabel`, which correspond to the 3 arguments:
* `num_classes`: Create 0 to (num_classes-1) labels.
* `names`: List of label strings.
* `names_file`: File containing the list of labels.
Under the hood the labels are stored as integers.
You can use negative integers to represent unknown/missing labels.
Args:
num_classes (`int`, *optional*):
Number of classes. All labels must be < `num_classes`.
names (`list` of `str`, *optional*):
String names for the integer classes.
The order in which the names are provided is kept.
names_file (`str`, *optional*):
Path to a file with names for the integer classes, one per line.
Example:
```py
>>> from datasets import Features, ClassLabel
>>> features = Features({'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'])})
>>> features
{'label': ClassLabel(names=['bad', 'ok', 'good'], id=None)}
```
"""
num_classes: InitVar[Optional[int]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
names: List[str] = None
names_file: InitVar[Optional[str]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "int64"
pa_type: ClassVar[Any] = pa.int64()
_str2int: ClassVar[Dict[str, int]] = None
_int2str: ClassVar[Dict[int, int]] = None
_type: str = field(default="ClassLabel", init=False, repr=False)
def __post_init__(self, num_classes, names_file):
self.num_classes = num_classes
self.names_file = names_file
if self.names_file is not None and self.names is not None:
raise ValueError("Please provide either names or names_file but not both.")
# Set self.names
if self.names is None:
if self.names_file is not None:
self.names = self._load_names_from_file(self.names_file)
elif self.num_classes is not None:
self.names = [str(i) for i in range(self.num_classes)]
else:
raise ValueError("Please provide either num_classes, names or names_file.")
elif not isinstance(self.names, SequenceABC):
raise TypeError(f"Please provide names as a list, is {type(self.names)}")
# Set self.num_classes
if self.num_classes is None:
self.num_classes = len(self.names)
elif self.num_classes != len(self.names):
raise ValueError(
"ClassLabel number of names do not match the defined num_classes. "
f"Got {len(self.names)} names VS {self.num_classes} num_classes"
)
# Prepare mappings
self._int2str = [str(name) for name in self.names]
self._str2int = {name: i for i, name in enumerate(self._int2str)}
if len(self._int2str) != len(self._str2int):
raise ValueError("Some label names are duplicated. Each label name should be unique.")
def __call__(self):
return self.pa_type
def str2int(self, values: Union[str, Iterable]) -> Union[int, Iterable]:
"""Conversion class name `string` => `integer`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> ds.features["label"].str2int('neg')
0
```
"""
if not isinstance(values, str) and not isinstance(values, Iterable):
raise ValueError(
f"Values {values} should be a string or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
)
return_list = True
if isinstance(values, str):
values = [values]
return_list = False
output = [self._strval2int(value) for value in values]
return output if return_list else output[0]
def _strval2int(self, value: str) -> int:
failed_parse = False
value = str(value)
# first attempt - raw string value
int_value = self._str2int.get(value)
if int_value is None:
# second attempt - strip whitespace
int_value = self._str2int.get(value.strip())
if int_value is None:
# third attempt - convert str to int
try:
int_value = int(value)
except ValueError:
failed_parse = True
else:
if int_value < -1 or int_value >= self.num_classes:
failed_parse = True
if failed_parse:
raise ValueError(f"Invalid string class label {value}")
return int_value
def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]:
"""Conversion `integer` => class name `string`.
Regarding unknown/missing labels: passing negative integers raises `ValueError`.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> ds.features["label"].int2str(0)
'neg'
```
"""
if not isinstance(values, int) and not isinstance(values, Iterable):
raise ValueError(
f"Values {values} should be an integer or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
)
return_list = True
if isinstance(values, int):
values = [values]
return_list = False
for v in values:
if not 0 <= v < self.num_classes:
raise ValueError(f"Invalid integer class label {v:d}")
output = [self._int2str[int(v)] for v in values]
return output if return_list else output[0]
def encode_example(self, example_data):
if self.num_classes is None:
raise ValueError(
"Trying to use ClassLabel feature with undefined number of class. "
"Please set ClassLabel.names or num_classes."
)
# If a string is given, convert to associated integer
if isinstance(example_data, str):
example_data = self.str2int(example_data)
# Allowing -1 to mean no label.
if not -1 <= example_data < self.num_classes:
raise ValueError(f"Class label {example_data:d} greater than configured num_classes {self.num_classes}")
return example_data
def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array:
"""Cast an Arrow array to the `ClassLabel` arrow storage type.
The Arrow types that can be converted to the `ClassLabel` pyarrow storage type are:
- `pa.string()`
- `pa.int()`
Args:
storage (`Union[pa.StringArray, pa.IntegerArray]`):
PyArrow array to cast.
Returns:
`pa.Int64Array`: Array in the `ClassLabel` arrow storage type.
"""
if isinstance(storage, pa.IntegerArray) and len(storage) > 0:
min_max = pc.min_max(storage).as_py()
if min_max["max"] is not None and min_max["max"] >= self.num_classes:
raise ValueError(
f"Class label {min_max['max']} greater than configured num_classes {self.num_classes}"
)
elif isinstance(storage, pa.StringArray):
storage = pa.array(
[self._strval2int(label) if label is not None else None for label in storage.to_pylist()]
)
return array_cast(storage, self.pa_type)
@staticmethod
def _load_names_from_file(names_filepath):
with open(names_filepath, encoding="utf-8") as f:
return [name.strip() for name in f.read().split("\n") if name.strip()] # Filter empty names | class_definition | 35,815 | 43,786 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py | null | 155 |
class Sequence:
"""Construct a list of feature from a single type or a dict of types.
Mostly here for compatiblity with tfds.
Args:
feature ([`FeatureType`]):
A list of features of a single type or a dictionary of types.
length (`int`):
Length of the sequence.
Example:
```py
>>> from datasets import Features, Sequence, Value, ClassLabel
>>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})})
>>> features
{'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(names=['hot', 'cold'], id=None)}, length=-1, id=None)}
```
"""
feature: Any
length: int = -1
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "list"
pa_type: ClassVar[Any] = None
_type: str = field(default="Sequence", init=False, repr=False) | class_definition | 43,800 | 44,842 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py | null | 156 |
class LargeList:
"""Feature type for large list data composed of child feature data type.
It is backed by `pyarrow.LargeListType`, which is like `pyarrow.ListType` but with 64-bit rather than 32-bit offsets.
Args:
feature ([`FeatureType`]):
Child feature data type of each item within the large list.
"""
feature: Any
id: Optional[str] = None
# Automatically constructed
pa_type: ClassVar[Any] = None
_type: str = field(default="LargeList", init=False, repr=False) | class_definition | 44,856 | 45,379 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py | null | 157 |
class Features(dict):
"""A special dictionary that defines the internal structure of a dataset.
Instantiated with a dictionary of type `dict[str, FieldType]`, where keys are the desired column names,
and values are the type of that column.
`FieldType` can be one of the following:
- [`Value`] feature specifies a single data type value, e.g. `int64` or `string`.
- [`ClassLabel`] feature specifies a predefined set of classes which can have labels associated to them and
will be stored as integers in the dataset.
- Python `dict` specifies a composite feature containing a mapping of sub-fields to sub-features.
It's possible to have nested fields of nested fields in an arbitrary manner.
- Python `list`, [`LargeList`] or [`Sequence`] specifies a composite feature containing a sequence of
sub-features, all of the same feature type.
<Tip>
A [`Sequence`] with an internal dictionary feature will be automatically converted into a dictionary of
lists. This behavior is implemented to have a compatibility layer with the TensorFlow Datasets library but may be
un-wanted in some cases. If you don't want this behavior, you can use a Python `list` or a [`LargeList`]
instead of the [`Sequence`].
</Tip>
- [`Array2D`], [`Array3D`], [`Array4D`] or [`Array5D`] feature for multidimensional arrays.
- [`Audio`] feature to store the absolute path to an audio file or a dictionary with the relative path
to an audio file ("path" key) and its bytes content ("bytes" key). This feature extracts the audio data.
- [`Image`] feature to store the absolute path to an image file, an `np.ndarray` object, a `PIL.Image.Image` object
or a dictionary with the relative path to an image file ("path" key) and its bytes content ("bytes" key).
This feature extracts the image data.
- [`Translation`] or [`TranslationVariableLanguages`] feature specific to Machine Translation.
"""
def __init__(*args, **kwargs):
# self not in the signature to allow passing self as a kwarg
if not args:
raise TypeError("descriptor '__init__' of 'Features' object needs an argument")
self, *args = args
super(Features, self).__init__(*args, **kwargs)
self._column_requires_decoding: Dict[str, bool] = {
col: require_decoding(feature) for col, feature in self.items()
}
__setitem__ = keep_features_dicts_synced(dict.__setitem__)
__delitem__ = keep_features_dicts_synced(dict.__delitem__)
update = keep_features_dicts_synced(dict.update)
setdefault = keep_features_dicts_synced(dict.setdefault)
pop = keep_features_dicts_synced(dict.pop)
popitem = keep_features_dicts_synced(dict.popitem)
clear = keep_features_dicts_synced(dict.clear)
def __reduce__(self):
return Features, (dict(self),)
@property
def type(self):
"""
Features field types.
Returns:
:obj:`pyarrow.DataType`
"""
return get_nested_type(self)
@property
def arrow_schema(self):
"""
Features schema.
Returns:
:obj:`pyarrow.Schema`
"""
hf_metadata = {"info": {"features": self.to_dict()}}
return pa.schema(self.type).with_metadata({"huggingface": json.dumps(hf_metadata)})
@classmethod
def from_arrow_schema(cls, pa_schema: pa.Schema) -> "Features":
"""
Construct [`Features`] from Arrow Schema.
It also checks the schema metadata for Hugging Face Datasets features.
Non-nullable fields are not supported and set to nullable.
Also, pa.dictionary is not supported and it uses its underlying type instead.
Therefore datasets convert DictionaryArray objects to their actual values.
Args:
pa_schema (`pyarrow.Schema`):
Arrow Schema.
Returns:
[`Features`]
"""
# try to load features from the arrow schema metadata
metadata_features = Features()
if pa_schema.metadata is not None and "huggingface".encode("utf-8") in pa_schema.metadata:
metadata = json.loads(pa_schema.metadata["huggingface".encode("utf-8")].decode())
if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None:
metadata_features = Features.from_dict(metadata["info"]["features"])
metadata_features_schema = metadata_features.arrow_schema
obj = {
field.name: (
metadata_features[field.name]
if field.name in metadata_features and metadata_features_schema.field(field.name) == field
else generate_from_arrow_type(field.type)
)
for field in pa_schema
}
return cls(**obj)
@classmethod
def from_dict(cls, dic) -> "Features":
"""
Construct [`Features`] from dict.
Regenerate the nested feature object from a deserialized dict.
We use the `_type` key to infer the dataclass name of the feature `FieldType`.
It allows for a convenient constructor syntax
to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
a [`DatasetInfo`] that was dumped to a JSON object. This acts as an analogue to
[`Features.from_arrow_schema`] and handles the recursive field-by-field instantiation, but doesn't require
any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive
dtypes that [`Value`] automatically performs.
Args:
dic (`dict[str, Any]`):
Python dictionary.
Returns:
`Features`
Example::
>>> Features.from_dict({'_type': {'dtype': 'string', 'id': None, '_type': 'Value'}})
{'_type': Value(dtype='string', id=None)}
"""
obj = generate_from_dict(dic)
return cls(**obj)
def to_dict(self):
return asdict(self)
def _to_yaml_list(self) -> list:
# we compute the YAML list from the dict representation that is used for JSON dump
yaml_data = self.to_dict()
def simplify(feature: dict) -> dict:
if not isinstance(feature, dict):
raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
for list_type in ["large_list", "list", "sequence"]:
#
# list_type: -> list_type: int32
# dtype: int32 ->
#
if isinstance(feature.get(list_type), dict) and list(feature[list_type]) == ["dtype"]:
feature[list_type] = feature[list_type]["dtype"]
#
# list_type: -> list_type:
# struct: -> - name: foo
# - name: foo -> dtype: int32
# dtype: int32 ->
#
if isinstance(feature.get(list_type), dict) and list(feature[list_type]) == ["struct"]:
feature[list_type] = feature[list_type]["struct"]
#
# class_label: -> class_label:
# names: -> names:
# - negative -> '0': negative
# - positive -> '1': positive
#
if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), list):
# server-side requirement: keys must be strings
feature["class_label"]["names"] = {
str(label_id): label_name for label_id, label_name in enumerate(feature["class_label"]["names"])
}
return feature
def to_yaml_inner(obj: Union[dict, list]) -> dict:
if isinstance(obj, dict):
_type = obj.pop("_type", None)
if _type == "LargeList":
_feature = obj.pop("feature")
return simplify({"large_list": to_yaml_inner(_feature), **obj})
elif _type == "Sequence":
_feature = obj.pop("feature")
return simplify({"sequence": to_yaml_inner(_feature), **obj})
elif _type == "Value":
return obj
elif _type and not obj:
return {"dtype": camelcase_to_snakecase(_type)}
elif _type:
return {"dtype": simplify({camelcase_to_snakecase(_type): obj})}
else:
return {"struct": [{"name": name, **to_yaml_inner(_feature)} for name, _feature in obj.items()]}
elif isinstance(obj, list):
return simplify({"list": simplify(to_yaml_inner(obj[0]))})
elif isinstance(obj, tuple):
return to_yaml_inner(list(obj))
else:
raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
def to_yaml_types(obj: dict) -> dict:
if isinstance(obj, dict):
return {k: to_yaml_types(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [to_yaml_types(v) for v in obj]
elif isinstance(obj, tuple):
return to_yaml_types(list(obj))
else:
return obj
return to_yaml_types(to_yaml_inner(yaml_data)["struct"])
@classmethod
def _from_yaml_list(cls, yaml_data: list) -> "Features":
yaml_data = copy.deepcopy(yaml_data)
# we convert the list obtained from YAML data into the dict representation that is used for JSON dump
def unsimplify(feature: dict) -> dict:
if not isinstance(feature, dict):
raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
for list_type in ["large_list", "list", "sequence"]:
#
# list_type: int32 -> list_type:
# -> dtype: int32
#
if isinstance(feature.get(list_type), str):
feature[list_type] = {"dtype": feature[list_type]}
#
# class_label: -> class_label:
# names: -> names:
# '0': negative -> - negative
# '1': positive -> - positive
#
if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), dict):
label_ids = sorted(feature["class_label"]["names"], key=int)
if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)):
raise ValueError(
f"ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing."
)
feature["class_label"]["names"] = [feature["class_label"]["names"][label_id] for label_id in label_ids]
return feature
def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]:
if isinstance(obj, dict):
if not obj:
return {}
_type = next(iter(obj))
if _type == "large_list":
_feature = unsimplify(obj).pop(_type)
return {"feature": from_yaml_inner(_feature), **obj, "_type": "LargeList"}
if _type == "sequence":
_feature = unsimplify(obj).pop(_type)
return {"feature": from_yaml_inner(_feature), **obj, "_type": "Sequence"}
if _type == "list":
return [from_yaml_inner(unsimplify(obj)[_type])]
if _type == "struct":
return from_yaml_inner(obj["struct"])
elif _type == "dtype":
if isinstance(obj["dtype"], str):
# e.g. int32, float64, string, audio, image
try:
Value(obj["dtype"])
return {**obj, "_type": "Value"}
except ValueError:
# e.g. Audio, Image, ArrayXD
return {"_type": snakecase_to_camelcase(obj["dtype"])}
else:
return from_yaml_inner(obj["dtype"])
else:
return {"_type": snakecase_to_camelcase(_type), **unsimplify(obj)[_type]}
elif isinstance(obj, list):
names = [_feature.pop("name") for _feature in obj]
return {name: from_yaml_inner(_feature) for name, _feature in zip(names, obj)}
else:
raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
return cls.from_dict(from_yaml_inner(yaml_data))
def encode_example(self, example):
"""
Encode example into a format for Arrow.
Args:
example (`dict[str, Any]`):
Data in a Dataset row.
Returns:
`dict[str, Any]`
"""
example = cast_to_python_objects(example)
return encode_nested_example(self, example)
def encode_column(self, column, column_name: str):
"""
Encode column into a format for Arrow.
Args:
column (`list[Any]`):
Data in a Dataset column.
column_name (`str`):
Dataset column name.
Returns:
`list[Any]`
"""
column = cast_to_python_objects(column)
return [encode_nested_example(self[column_name], obj, level=1) for obj in column]
def encode_batch(self, batch):
"""
Encode batch into a format for Arrow.
Args:
batch (`dict[str, list[Any]]`):
Data in a Dataset batch.
Returns:
`dict[str, list[Any]]`
"""
encoded_batch = {}
if set(batch) != set(self):
raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}")
for key, column in batch.items():
column = cast_to_python_objects(column)
encoded_batch[key] = [encode_nested_example(self[key], obj, level=1) for obj in column]
return encoded_batch
def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
"""Decode example with custom feature decoding.
Args:
example (`dict[str, Any]`):
Dataset row data.
token_per_repo_id (`dict`, *optional*):
To access and decode audio or image files from private repositories on the Hub, you can pass
a dictionary `repo_id (str) -> token (bool or str)`.
Returns:
`dict[str, Any]`
"""
return {
column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id)
if self._column_requires_decoding[column_name]
else value
for column_name, (feature, value) in zip_dict(
{key: value for key, value in self.items() if key in example}, example
)
}
def decode_column(self, column: list, column_name: str):
"""Decode column with custom feature decoding.
Args:
column (`list[Any]`):
Dataset column data.
column_name (`str`):
Dataset column name.
Returns:
`list[Any]`
"""
return (
[decode_nested_example(self[column_name], value) if value is not None else None for value in column]
if self._column_requires_decoding[column_name]
else column
)
def decode_batch(self, batch: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
"""Decode batch with custom feature decoding.
Args:
batch (`dict[str, list[Any]]`):
Dataset batch data.
token_per_repo_id (`dict`, *optional*):
To access and decode audio or image files from private repositories on the Hub, you can pass
a dictionary repo_id (str) -> token (bool or str)
Returns:
`dict[str, list[Any]]`
"""
decoded_batch = {}
for column_name, column in batch.items():
decoded_batch[column_name] = (
[
decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id)
if value is not None
else None
for value in column
]
if self._column_requires_decoding[column_name]
else column
)
return decoded_batch
def copy(self) -> "Features":
"""
Make a deep copy of [`Features`].
Returns:
[`Features`]
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train")
>>> copy_of_features = ds.features.copy()
>>> copy_of_features
{'label': ClassLabel(names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
```
"""
return copy.deepcopy(self)
def reorder_fields_as(self, other: "Features") -> "Features":
"""
Reorder Features fields to match the field order of other [`Features`].
The order of the fields is important since it matters for the underlying arrow data.
Re-ordering the fields allows to make the underlying arrow data type match.
Args:
other ([`Features`]):
The other [`Features`] to align with.
Returns:
[`Features`]
Example::
>>> from datasets import Features, Sequence, Value
>>> # let's say we have two features with a different order of nested fields (for a and b for example)
>>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})})
>>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}})
>>> assert f1.type != f2.type
>>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but makes the fields order match
>>> f1.reorder_fields_as(f2)
{'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)}
>>> assert f1.reorder_fields_as(f2).type == f2.type
"""
def recursive_reorder(source, target, stack=""):
stack_position = " at " + stack[1:] if stack else ""
if isinstance(target, Sequence):
target = target.feature
if isinstance(target, dict):
target = {k: [v] for k, v in target.items()}
else:
target = [target]
if isinstance(source, Sequence):
sequence_kwargs = vars(source).copy()
source = sequence_kwargs.pop("feature")
if isinstance(source, dict):
source = {k: [v] for k, v in source.items()}
reordered = recursive_reorder(source, target, stack)
return Sequence({k: v[0] for k, v in reordered.items()}, **sequence_kwargs)
else:
source = [source]
reordered = recursive_reorder(source, target, stack)
return Sequence(reordered[0], **sequence_kwargs)
elif isinstance(source, dict):
if not isinstance(target, dict):
raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
if sorted(source) != sorted(target):
message = (
f"Keys mismatch: between {source} (source) and {target} (target).\n"
f"{source.keys() - target.keys()} are missing from target "
f"and {target.keys() - source.keys()} are missing from source" + stack_position
)
raise ValueError(message)
return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target}
elif isinstance(source, list):
if not isinstance(target, list):
raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
if len(source) != len(target):
raise ValueError(f"Length mismatch: between {source} and {target}" + stack_position)
return [recursive_reorder(source[i], target[i], stack + ".<list>") for i in range(len(target))]
elif isinstance(source, LargeList):
if not isinstance(target, LargeList):
raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
return LargeList(recursive_reorder(source.feature, target.feature, stack))
else:
return source
return Features(recursive_reorder(self, other))
def flatten(self, max_depth=16) -> "Features":
"""Flatten the features. Every dictionary column is removed and is replaced by
all the subfields it contains. The new fields are named by concatenating the
name of the original column and the subfield name like this: `<original>.<subfield>`.
If a column contains nested dictionaries, then all the lower-level subfields names are
also concatenated to form new columns: `<original>.<subfield>.<subsubfield>`, etc.
Returns:
[`Features`]:
The flattened features.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("squad", split="train")
>>> ds.features.flatten()
{'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None),
'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None),
'context': Value(dtype='string', id=None),
'id': Value(dtype='string', id=None),
'question': Value(dtype='string', id=None),
'title': Value(dtype='string', id=None)}
```
"""
for depth in range(1, max_depth):
no_change = True
flattened = self.copy()
for column_name, subfeature in self.items():
if isinstance(subfeature, dict):
no_change = False
flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()})
del flattened[column_name]
elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict):
no_change = False
flattened.update(
{
f"{column_name}.{k}": Sequence(v) if not isinstance(v, dict) else [v]
for k, v in subfeature.feature.items()
}
)
del flattened[column_name]
elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature:
no_change = False
flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()})
del flattened[column_name]
self = flattened
if no_change:
break
return self | class_definition | 66,382 | 90,660 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py | null | 158 |
class Translation:
"""`Feature` for translations with fixed languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to string translations.
Example:
```python
>>> # At construction time:
>>> datasets.features.Translation(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': 'le chat',
... 'de': 'die katze'
... }
```
"""
languages: List[str]
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="Translation", init=False, repr=False)
def __call__(self):
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the Translation feature into a dictionary."""
from .features import Value
return {k: Value("string") for k in sorted(self.languages)} | class_definition | 211 | 1,349 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/translation.py | null | 159 |
class TranslationVariableLanguages:
"""`Feature` for translations with variable languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to one or more string translations.
The languages present may vary from example to example.
Returns:
- `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`):
Language codes sorted in ascending order or plain text translations, sorted to align with language codes.
Example:
```python
>>> # At construction time:
>>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': ['le chat', 'la chatte,']
... 'de': 'die katze'
... }
>>> # Tensor returned :
>>> {
... 'language': ['en', 'de', 'fr', 'fr'],
... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'],
... }
```
"""
languages: Optional[List] = None
num_languages: Optional[int] = None
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="TranslationVariableLanguages", init=False, repr=False)
def __post_init__(self):
self.languages = sorted(set(self.languages)) if self.languages else None
self.num_languages = len(self.languages) if self.languages else None
def __call__(self):
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
def encode_example(self, translation_dict):
lang_set = set(self.languages)
if set(translation_dict) == {"language", "translation"}:
return translation_dict
elif self.languages and set(translation_dict) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({', '.join(lang_set)})."
)
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
translation_tuples = []
for lang, text in translation_dict.items():
if isinstance(text, str):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
languages, translations = zip(*sorted(translation_tuples))
return {"language": languages, "translation": translations}
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the TranslationVariableLanguages feature into a dictionary."""
from .features import Sequence, Value
return {
"language": Sequence(Value("string")),
"translation": Sequence(Value("string")),
} | class_definition | 1,363 | 4,457 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/translation.py | null | 160 |
class Audio:
"""Audio [`Feature`] to extract audio data from an audio file.
Input: The Audio feature accepts as input:
- A `str`: Absolute path to the audio file (i.e. random access is allowed).
- A `dict` with the keys:
- `path`: String with relative path of the audio file to the archive file.
- `bytes`: Bytes content of the audio file.
This is useful for archived files with sequential access.
- A `dict` with the keys:
- `path`: String with relative path of the audio file to the archive file.
- `array`: Array containing the audio sample
- `sampling_rate`: Integer corresponding to the sampling rate of the audio sample.
This is useful for archived files with sequential access.
Args:
sampling_rate (`int`, *optional*):
Target sampling rate. If `None`, the native sampling rate is used.
mono (`bool`, defaults to `True`):
Whether to convert the audio signal to mono by averaging samples across
channels.
decode (`bool`, defaults to `True`):
Whether to decode the audio data. If `False`,
returns the underlying dictionary in the format `{"path": audio_path, "bytes": audio_bytes}`.
Example:
```py
>>> from datasets import load_dataset, Audio
>>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train")
>>> ds = ds.cast_column("audio", Audio(sampling_rate=16000))
>>> ds[0]["audio"]
{'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ...,
3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32),
'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
'sampling_rate': 16000}
```
"""
sampling_rate: Optional[int] = None
mono: bool = True
decode: bool = True
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
_type: str = field(default="Audio", init=False, repr=False)
def __call__(self):
return self.pa_type
def encode_example(self, value: Union[str, bytes, dict]) -> dict:
"""Encode example into a format for Arrow.
Args:
value (`str` or `dict`):
Data passed as input to Audio feature.
Returns:
`dict`
"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'.") from err
if isinstance(value, str):
return {"bytes": None, "path": value}
elif isinstance(value, bytes):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
buffer = BytesIO()
sf.write(buffer, value["array"], value["sampling_rate"], format="wav")
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path") is not None and os.path.isfile(value["path"]):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm"):
# "PCM" only has raw audio bytes
if value.get("sampling_rate") is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object")
if value.get("bytes"):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
bytes_value = np.frombuffer(value["bytes"], dtype=np.int16).astype(np.float32) / 32767
else:
bytes_value = np.memmap(value["path"], dtype="h", mode="r").astype(np.float32) / 32767
buffer = BytesIO(bytes())
sf.write(buffer, bytes_value, value["sampling_rate"], format="wav")
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path")}
elif value.get("bytes") is not None or value.get("path") is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes"), "path": value.get("path")}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
)
def decode_example(
self, value: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None
) -> dict:
"""Decode example audio file into audio data.
Args:
value (`dict`):
A dictionary with keys:
- `path`: String with relative audio file path.
- `bytes`: Bytes of the audio file.
token_per_repo_id (`dict`, *optional*):
To access and decode
audio files from private repositories on the Hub, you can pass
a dictionary repo_id (`str`) -> token (`bool` or `str`)
Returns:
`dict`
"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.")
path, file = (value["path"], BytesIO(value["bytes"])) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'.") from err
audio_format = xsplitext(path)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
)
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
)
if file is None:
token_per_repo_id = token_per_repo_id or {}
source_url = path.split("::")[-1]
pattern = (
config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL
)
try:
repo_id = string_to_dict(source_url, pattern)["repo_id"]
token = token_per_repo_id[repo_id]
except (ValueError, KeyError):
token = None
download_config = DownloadConfig(token=token)
with xopen(path, "rb", download_config=download_config) as f:
array, sampling_rate = sf.read(f)
else:
array, sampling_rate = sf.read(file)
array = array.T
if self.mono:
array = librosa.to_mono(array)
if self.sampling_rate and self.sampling_rate != sampling_rate:
array = librosa.resample(array, orig_sr=sampling_rate, target_sr=self.sampling_rate)
sampling_rate = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""If in the decodable state, raise an error, otherwise flatten the feature into a dictionary."""
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature.")
return {
"bytes": Value("binary"),
"path": Value("string"),
}
def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
"""Cast an Arrow array to the Audio arrow storage type.
The Arrow types that can be converted to the Audio pyarrow storage type are:
- `pa.string()` - it must contain the "path" data
- `pa.binary()` - it must contain the audio bytes
- `pa.struct({"bytes": pa.binary()})`
- `pa.struct({"path": pa.string()})`
- `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
Args:
storage (`Union[pa.StringArray, pa.StructArray]`):
PyArrow array to cast.
Returns:
`pa.StructArray`: Array in the Audio arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`
"""
if pa.types.is_string(storage.type):
bytes_array = pa.array([None] * len(storage), type=pa.binary())
storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_binary(storage.type):
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("array"):
storage = pa.array([Audio().encode_example(x) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
bytes_array = storage.field("bytes")
else:
bytes_array = pa.array([None] * len(storage), type=pa.binary())
if storage.type.get_field_index("path") >= 0:
path_array = storage.field("path")
else:
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
return array_cast(storage, self.pa_type)
def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
"""Embed audio files into the Arrow array.
Args:
storage (`pa.StructArray`):
PyArrow array to embed.
Returns:
`pa.StructArray`: Array in the Audio arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
"""
@no_op_if_value_is_null
def path_to_bytes(path):
with xopen(path, "rb") as f:
bytes_ = f.read()
return bytes_
bytes_array = pa.array(
[
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],
type=pa.binary(),
)
path_array = pa.array(
[os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
type=pa.string(),
)
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
return array_cast(storage, self.pa_type) | class_definition | 481 | 12,224 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/audio.py | null | 161 |
class Video:
"""
**Experimental.** Video [`Feature`] to read video data from a video file.
Input: The Video feature accepts as input:
- A `str`: Absolute path to the video file (i.e. random access is allowed).
- A `dict` with the keys:
- `path`: String with relative path of the video file in a dataset repository.
- `bytes`: Bytes of the video file.
This is useful for archived files with sequential access.
- A `decord.VideoReader`: decord video reader object.
Args:
mode (`str`, *optional*):
The mode to convert the video to. If `None`, the native mode of the video is used.
decode (`bool`, defaults to `True`):
Whether to decode the video data. If `False`,
returns the underlying dictionary in the format `{"path": video_path, "bytes": video_bytes}`.
Examples:
```py
>>> from datasets import Dataset, Video
>>> ds = Dataset.from_dict({"video":["path/to/Screen Recording.mov"]}).cast_column("video", Video())
>>> ds.features["video"]
Video(decode=True, id=None)
>>> ds[0]["video"]
<decord.video_reader.VideoReader at 0x105525c70>
>>> ds = ds.cast_column('video', Video(decode=False))
{'bytes': None,
'path': 'path/to/Screen Recording.mov'}
```
"""
decode: bool = True
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "decord.VideoReader"
pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
_type: str = field(default="Video", init=False, repr=False)
def __post_init__(self):
if config.DECORD_AVAILABLE:
patch_decord()
def __call__(self):
return self.pa_type
def encode_example(self, value: Union[str, bytes, dict, np.ndarray, "VideoReader"]) -> dict:
"""Encode example into a format for Arrow.
Args:
value (`str`, `np.ndarray`, `VideoReader` or `dict`):
Data passed as input to Video feature.
Returns:
`dict` with "path" and "bytes" fields
"""
if config.DECORD_AVAILABLE:
from decord import VideoReader
else:
VideoReader = None
if isinstance(value, list):
value = np.array(value)
if isinstance(value, str):
return {"path": value, "bytes": None}
elif isinstance(value, bytes):
return {"path": None, "bytes": value}
elif isinstance(value, np.ndarray):
# convert the video array to bytes
return encode_np_array(value)
elif VideoReader and isinstance(value, VideoReader):
# convert the decord video reader to bytes
return encode_decord_video(value)
elif value.get("path") is not None and os.path.isfile(value["path"]):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path")}
elif value.get("bytes") is not None or value.get("path") is not None:
# store the video bytes, and path is used to infer the video format using the file extension
return {"bytes": value.get("bytes"), "path": value.get("path")}
else:
raise ValueError(
f"A video sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
)
def decode_example(self, value: dict, token_per_repo_id=None) -> "VideoReader":
"""Decode example video file into video data.
Args:
value (`str` or `dict`):
A string with the absolute video file path, a dictionary with
keys:
- `path`: String with absolute or relative video file path.
- `bytes`: The bytes of the video file.
token_per_repo_id (`dict`, *optional*):
To access and decode
video files from private repositories on the Hub, you can pass
a dictionary repo_id (`str`) -> token (`bool` or `str`).
Returns:
`decord.VideoReader`
"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Video(decode=True) instead.")
if config.DECORD_AVAILABLE:
from decord import VideoReader
else:
raise ImportError("To support decoding videos, please install 'decord'.")
if token_per_repo_id is None:
token_per_repo_id = {}
path, bytes_ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f"A video should have one of 'path' or 'bytes' but both are None in {value}.")
else:
if is_local_path(path):
video = VideoReader(path)
else:
source_url = path.split("::")[-1]
pattern = (
config.HUB_DATASETS_URL
if source_url.startswith(config.HF_ENDPOINT)
else config.HUB_DATASETS_HFFS_URL
)
try:
repo_id = string_to_dict(source_url, pattern)["repo_id"]
token = token_per_repo_id.get(repo_id)
except ValueError:
token = None
download_config = DownloadConfig(token=token)
with xopen(path, "rb", download_config=download_config) as f:
bytes_ = BytesIO(f.read())
video = VideoReader(bytes_)
else:
video = VideoReader(BytesIO(bytes_))
return video
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary."""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary"),
"path": Value("string"),
}
)
def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray:
"""Cast an Arrow array to the Video arrow storage type.
The Arrow types that can be converted to the Video pyarrow storage type are:
- `pa.string()` - it must contain the "path" data
- `pa.binary()` - it must contain the video bytes
- `pa.struct({"bytes": pa.binary()})`
- `pa.struct({"path": pa.string()})`
- `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
- `pa.list(*)` - it must contain the video array data
Args:
storage (`Union[pa.StringArray, pa.StructArray, pa.ListArray]`):
PyArrow array to cast.
Returns:
`pa.StructArray`: Array in the Video arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
"""
if pa.types.is_string(storage.type):
bytes_array = pa.array([None] * len(storage), type=pa.binary())
storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_binary(storage.type):
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
bytes_array = storage.field("bytes")
else:
bytes_array = pa.array([None] * len(storage), type=pa.binary())
if storage.type.get_field_index("path") >= 0:
path_array = storage.field("path")
else:
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_list(storage.type):
bytes_array = pa.array(
[encode_np_array(np.array(arr))["bytes"] if arr is not None else None for arr in storage.to_pylist()],
type=pa.binary(),
)
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()
)
return array_cast(storage, self.pa_type) | class_definition | 497 | 9,194 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/video.py | null | 162 |
class Image:
"""Image [`Feature`] to read image data from an image file.
Input: The Image feature accepts as input:
- A `str`: Absolute path to the image file (i.e. random access is allowed).
- A `dict` with the keys:
- `path`: String with relative path of the image file to the archive file.
- `bytes`: Bytes of the image file.
This is useful for archived files with sequential access.
- An `np.ndarray`: NumPy array representing an image.
- A `PIL.Image.Image`: PIL image object.
Args:
mode (`str`, *optional*):
The mode to convert the image to. If `None`, the native mode of the image is used.
decode (`bool`, defaults to `True`):
Whether to decode the image data. If `False`,
returns the underlying dictionary in the format `{"path": image_path, "bytes": image_bytes}`.
Examples:
```py
>>> from datasets import load_dataset, Image
>>> ds = load_dataset("beans", split="train")
>>> ds.features["image"]
Image(decode=True, id=None)
>>> ds[0]["image"]
<PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x15E52E7F0>
>>> ds = ds.cast_column('image', Image(decode=False))
{'bytes': None,
'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/healthy/healthy_train.85.jpg'}
```
"""
mode: Optional[str] = None
decode: bool = True
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "PIL.Image.Image"
pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
_type: str = field(default="Image", init=False, repr=False)
def __call__(self):
return self.pa_type
def encode_example(self, value: Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict:
"""Encode example into a format for Arrow.
Args:
value (`str`, `np.ndarray`, `PIL.Image.Image` or `dict`):
Data passed as input to Image feature.
Returns:
`dict` with "path" and "bytes" fields
"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
if isinstance(value, list):
value = np.array(value)
if isinstance(value, str):
return {"path": value, "bytes": None}
elif isinstance(value, bytes):
return {"path": None, "bytes": value}
elif isinstance(value, np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(value)
elif isinstance(value, PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(value)
elif value.get("path") is not None and os.path.isfile(value["path"]):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path")}
elif value.get("bytes") is not None or value.get("path") is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes"), "path": value.get("path")}
else:
raise ValueError(
f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
)
def decode_example(self, value: dict, token_per_repo_id=None) -> "PIL.Image.Image":
"""Decode example image file into image data.
Args:
value (`str` or `dict`):
A string with the absolute image file path, a dictionary with
keys:
- `path`: String with absolute or relative image file path.
- `bytes`: The bytes of the image file.
token_per_repo_id (`dict`, *optional*):
To access and decode
image files from private repositories on the Hub, you can pass
a dictionary repo_id (`str`) -> token (`bool` or `str`).
Returns:
`PIL.Image.Image`
"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.")
if config.PIL_AVAILABLE:
import PIL.Image
import PIL.ImageOps
else:
raise ImportError("To support decoding images, please install 'Pillow'.")
if token_per_repo_id is None:
token_per_repo_id = {}
path, bytes_ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.")
else:
if is_local_path(path):
image = PIL.Image.open(path)
else:
source_url = path.split("::")[-1]
pattern = (
config.HUB_DATASETS_URL
if source_url.startswith(config.HF_ENDPOINT)
else config.HUB_DATASETS_HFFS_URL
)
try:
repo_id = string_to_dict(source_url, pattern)["repo_id"]
token = token_per_repo_id.get(repo_id)
except ValueError:
token = None
download_config = DownloadConfig(token=token)
with xopen(path, "rb", download_config=download_config) as f:
bytes_ = BytesIO(f.read())
image = PIL.Image.open(bytes_)
else:
image = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
if image.getexif().get(PIL.Image.ExifTags.Base.Orientation) is not None:
image = PIL.ImageOps.exif_transpose(image)
if self.mode and self.mode != image.mode:
image = image.convert(self.mode)
return image
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary."""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary"),
"path": Value("string"),
}
)
def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray:
"""Cast an Arrow array to the Image arrow storage type.
The Arrow types that can be converted to the Image pyarrow storage type are:
- `pa.string()` - it must contain the "path" data
- `pa.binary()` - it must contain the image bytes
- `pa.struct({"bytes": pa.binary()})`
- `pa.struct({"path": pa.string()})`
- `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
- `pa.list(*)` - it must contain the image array data
Args:
storage (`Union[pa.StringArray, pa.StructArray, pa.ListArray]`):
PyArrow array to cast.
Returns:
`pa.StructArray`: Array in the Image arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
"""
if pa.types.is_string(storage.type):
bytes_array = pa.array([None] * len(storage), type=pa.binary())
storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_binary(storage.type):
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
bytes_array = storage.field("bytes")
else:
bytes_array = pa.array([None] * len(storage), type=pa.binary())
if storage.type.get_field_index("path") >= 0:
path_array = storage.field("path")
else:
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_list(storage.type):
bytes_array = pa.array(
[encode_np_array(np.array(arr))["bytes"] if arr is not None else None for arr in storage.to_pylist()],
type=pa.binary(),
)
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()
)
return array_cast(storage, self.pa_type)
def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
"""Embed image files into the Arrow array.
Args:
storage (`pa.StructArray`):
PyArrow array to embed.
Returns:
`pa.StructArray`: Array in the Image arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
"""
@no_op_if_value_is_null
def path_to_bytes(path):
with xopen(path, "rb") as f:
bytes_ = f.read()
return bytes_
bytes_array = pa.array(
[
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],
type=pa.binary(),
)
path_array = pa.array(
[os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
type=pa.string(),
)
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
return array_cast(storage, self.pa_type) | class_definition | 1,212 | 11,520 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/image.py | null | 163 |
class WebDataset(datasets.GeneratorBasedBuilder):
DEFAULT_WRITER_BATCH_SIZE = 100
IMAGE_EXTENSIONS: List[str] # definition at the bottom of the script
AUDIO_EXTENSIONS: List[str] # definition at the bottom of the script
VIDEO_EXTENSIONS: List[str] # definition at the bottom of the script
DECODERS: Dict[str, Callable[[Any], Any]] # definition at the bottom of the script
NUM_EXAMPLES_FOR_FEATURES_INFERENCE = 5
@classmethod
def _get_pipeline_from_tar(cls, tar_path, tar_iterator):
current_example = {}
fs: fsspec.AbstractFileSystem = fsspec.filesystem("memory")
streaming_download_manager = datasets.StreamingDownloadManager()
for filename, f in tar_iterator:
example_key, field_name = base_plus_ext(filename)
if example_key is None:
continue
if current_example and current_example["__key__"] != example_key:
# reposition some keys in last position
current_example["__key__"] = current_example.pop("__key__")
current_example["__url__"] = current_example.pop("__url__")
yield current_example
current_example = {}
current_example["__key__"] = example_key
current_example["__url__"] = tar_path
current_example[field_name.lower()] = f.read()
if field_name.split(".")[-1] in SINGLE_FILE_COMPRESSION_EXTENSION_TO_PROTOCOL:
fs.write_bytes(filename, current_example[field_name.lower()])
extracted_file_path = streaming_download_manager.extract(f"memory://{filename}")
with fsspec.open(extracted_file_path) as f:
current_example[field_name.lower()] = f.read()
fs.delete(filename)
data_extension = xbasename(extracted_file_path).split(".")[-1]
else:
data_extension = field_name.split(".")[-1]
if data_extension in cls.DECODERS:
current_example[field_name] = cls.DECODERS[data_extension](current_example[field_name])
if current_example:
yield current_example
def _info(self) -> datasets.DatasetInfo:
return datasets.DatasetInfo()
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
# Download the data files
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download(self.config.data_files)
splits = []
for split_name, tar_paths in data_files.items():
if isinstance(tar_paths, str):
tar_paths = [tar_paths]
tar_iterators = [dl_manager.iter_archive(tar_path) for tar_path in tar_paths]
splits.append(
datasets.SplitGenerator(
name=split_name, gen_kwargs={"tar_paths": tar_paths, "tar_iterators": tar_iterators}
)
)
if not self.info.features:
# Get one example to get the feature types
pipeline = self._get_pipeline_from_tar(tar_paths[0], tar_iterators[0])
first_examples = list(islice(pipeline, self.NUM_EXAMPLES_FOR_FEATURES_INFERENCE))
if any(example.keys() != first_examples[0].keys() for example in first_examples):
raise ValueError(
"The TAR archives of the dataset should be in WebDataset format, "
"but the files in the archive don't share the same prefix or the same types."
)
pa_tables = [
pa.Table.from_pylist(cast_to_python_objects([example], only_1d_for_numpy=True))
for example in first_examples
]
inferred_arrow_schema = pa.concat_tables(pa_tables, promote_options="default").schema
features = datasets.Features.from_arrow_schema(inferred_arrow_schema)
# Set Image types
for field_name in first_examples[0]:
extension = field_name.rsplit(".", 1)[-1]
if extension in self.IMAGE_EXTENSIONS:
features[field_name] = datasets.Image()
# Set Audio types
for field_name in first_examples[0]:
extension = field_name.rsplit(".", 1)[-1]
if extension in self.AUDIO_EXTENSIONS:
features[field_name] = datasets.Audio()
# Set Video types
for field_name in first_examples[0]:
extension = field_name.rsplit(".", 1)[-1]
if extension in self.VIDEO_EXTENSIONS:
features[field_name] = datasets.Video()
self.info.features = features
return splits
def _generate_examples(self, tar_paths, tar_iterators):
image_field_names = [
field_name for field_name, feature in self.info.features.items() if isinstance(feature, datasets.Image)
]
audio_field_names = [
field_name for field_name, feature in self.info.features.items() if isinstance(feature, datasets.Audio)
]
all_field_names = list(self.info.features.keys())
for tar_idx, (tar_path, tar_iterator) in enumerate(zip(tar_paths, tar_iterators)):
for example_idx, example in enumerate(self._get_pipeline_from_tar(tar_path, tar_iterator)):
for field_name in all_field_names:
if field_name not in example:
example[field_name] = None
for field_name in image_field_names + audio_field_names:
if example[field_name] is not None:
example[field_name] = {
"path": example["__key__"] + "." + field_name,
"bytes": example[field_name],
}
yield f"{tar_idx}_{example_idx}", example | class_definition | 392 | 6,421 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/webdataset/webdataset.py | null | 164 |
class FolderBasedBuilderConfig(datasets.BuilderConfig):
"""BuilderConfig for AutoFolder."""
features: Optional[datasets.Features] = None
drop_labels: bool = None
drop_metadata: bool = None
def __post_init__(self):
super().__post_init__() | class_definition | 411 | 678 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py | null | 165 |
class FolderBasedBuilder(datasets.GeneratorBasedBuilder):
"""
Base class for generic data loaders for vision and image data.
Abstract class attributes to be overridden by a child class:
BASE_FEATURE: feature object to decode data (i.e. datasets.Image, datasets.Audio, ...)
BASE_COLUMN_NAME: string key name of a base feature (i.e. "image", "audio", ...)
BUILDER_CONFIG_CLASS: builder config inherited from `folder_based_builder.FolderBasedBuilderConfig`
EXTENSIONS: list of allowed extensions (only files with these extensions and METADATA_FILENAME files
will be included in a dataset)
"""
BASE_FEATURE: Type[FeatureType]
BASE_COLUMN_NAME: str
BUILDER_CONFIG_CLASS: FolderBasedBuilderConfig
EXTENSIONS: List[str]
METADATA_FILENAMES: List[str] = ["metadata.csv", "metadata.jsonl"]
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
# Do an early pass if:
# * `drop_labels` is None (default) or False, to infer the class labels
# * `drop_metadata` is None (default) or False, to find the metadata files
do_analyze = not self.config.drop_labels or not self.config.drop_metadata
labels, path_depths = set(), set()
metadata_files = collections.defaultdict(set)
def analyze(files_or_archives, downloaded_files_or_dirs, split):
if len(downloaded_files_or_dirs) == 0:
return
# The files are separated from the archives at this point, so check the first sample
# to see if it's a file or a directory and iterate accordingly
if os.path.isfile(downloaded_files_or_dirs[0]):
original_files, downloaded_files = files_or_archives, downloaded_files_or_dirs
for original_file, downloaded_file in zip(original_files, downloaded_files):
original_file, downloaded_file = str(original_file), str(downloaded_file)
_, original_file_ext = os.path.splitext(original_file)
if original_file_ext.lower() in self.EXTENSIONS:
if not self.config.drop_labels:
labels.add(os.path.basename(os.path.dirname(original_file)))
path_depths.add(count_path_segments(original_file))
elif os.path.basename(original_file) in self.METADATA_FILENAMES:
metadata_files[split].add((original_file, downloaded_file))
else:
original_file_name = os.path.basename(original_file)
logger.debug(
f"The file '{original_file_name}' was ignored: it is not an image, and is not {self.METADATA_FILENAMES} either."
)
else:
archives, downloaded_dirs = files_or_archives, downloaded_files_or_dirs
for archive, downloaded_dir in zip(archives, downloaded_dirs):
archive, downloaded_dir = str(archive), str(downloaded_dir)
for downloaded_dir_file in dl_manager.iter_files(downloaded_dir):
_, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file)
if downloaded_dir_file_ext in self.EXTENSIONS:
if not self.config.drop_labels:
labels.add(os.path.basename(os.path.dirname(downloaded_dir_file)))
path_depths.add(count_path_segments(downloaded_dir_file))
elif os.path.basename(downloaded_dir_file) in self.METADATA_FILENAMES:
metadata_files[split].add((None, downloaded_dir_file))
else:
archive_file_name = os.path.basename(archive)
original_file_name = os.path.basename(downloaded_dir_file)
logger.debug(
f"The file '{original_file_name}' from the archive '{archive_file_name}' was ignored: it is not an {self.BASE_COLUMN_NAME}, and is not {self.METADATA_FILENAMES} either."
)
data_files = self.config.data_files
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
files, archives = self._split_files_and_archives(files)
downloaded_files = dl_manager.download(files)
downloaded_dirs = dl_manager.download_and_extract(archives)
if do_analyze: # drop_metadata is None or False, drop_labels is None or False
logger.info(f"Searching for labels and/or metadata files in {split_name} data files...")
analyze(files, downloaded_files, split_name)
analyze(archives, downloaded_dirs, split_name)
if metadata_files:
# add metadata if `metadata_files` are found and `drop_metadata` is None (default) or False
add_metadata = not self.config.drop_metadata
# if `metadata_files` are found, add labels only if
# `drop_labels` is set up to False explicitly (not-default behavior)
add_labels = self.config.drop_labels is False
else:
# if `metadata_files` are not found, don't add metadata
add_metadata = False
# if `metadata_files` are not found and `drop_labels` is None (default) -
# add labels if files are on the same level in directory hierarchy and there is more than one label
add_labels = (
(len(labels) > 1 and len(path_depths) == 1)
if self.config.drop_labels is None
else not self.config.drop_labels
)
if add_labels:
logger.info("Adding the labels inferred from data directories to the dataset's features...")
if add_metadata:
logger.info("Adding metadata to the dataset...")
else:
add_labels, add_metadata, metadata_files = False, False, {}
splits.append(
datasets.SplitGenerator(
name=split_name,
gen_kwargs={
"files": list(zip(files, downloaded_files))
+ [(None, dl_manager.iter_files(downloaded_dir)) for downloaded_dir in downloaded_dirs],
"metadata_files": metadata_files,
"split_name": split_name,
"add_labels": add_labels,
"add_metadata": add_metadata,
},
)
)
if add_metadata:
# Verify that:
# * all metadata files have the same set of features
# * the `file_name` key is one of the metadata keys and is of type string
features_per_metadata_file: List[Tuple[str, datasets.Features]] = []
# Check that all metadata files share the same format
metadata_ext = {
os.path.splitext(original_metadata_file)[-1]
for original_metadata_file, _ in itertools.chain.from_iterable(metadata_files.values())
}
if len(metadata_ext) > 1:
raise ValueError(f"Found metadata files with different extensions: {list(metadata_ext)}")
metadata_ext = metadata_ext.pop()
for _, downloaded_metadata_file in itertools.chain.from_iterable(metadata_files.values()):
pa_metadata_table = self._read_metadata(downloaded_metadata_file, metadata_ext=metadata_ext)
features_per_metadata_file.append(
(downloaded_metadata_file, datasets.Features.from_arrow_schema(pa_metadata_table.schema))
)
for downloaded_metadata_file, metadata_features in features_per_metadata_file:
if metadata_features != features_per_metadata_file[0][1]:
raise ValueError(
f"Metadata files {downloaded_metadata_file} and {features_per_metadata_file[0][0]} have different features: {features_per_metadata_file[0]} != {metadata_features}"
)
metadata_features = features_per_metadata_file[0][1]
if "file_name" not in metadata_features:
raise ValueError("`file_name` must be present as dictionary key in metadata files")
if metadata_features["file_name"] != datasets.Value("string"):
raise ValueError("`file_name` key must be a string")
del metadata_features["file_name"]
else:
metadata_features = None
# Normally, we would do this in _info, but we need to know the labels and/or metadata
# before building the features
if self.config.features is None:
if add_labels:
self.info.features = datasets.Features(
{
self.BASE_COLUMN_NAME: self.BASE_FEATURE(),
"label": datasets.ClassLabel(names=sorted(labels)),
}
)
else:
self.info.features = datasets.Features({self.BASE_COLUMN_NAME: self.BASE_FEATURE()})
if add_metadata:
# Warn if there are duplicated keys in metadata compared to the existing features
# (`BASE_COLUMN_NAME`, optionally "label")
duplicated_keys = set(self.info.features) & set(metadata_features)
if duplicated_keys:
logger.warning(
f"Ignoring metadata columns {list(duplicated_keys)} as they are already present in "
f"the features dictionary."
)
# skip metadata duplicated keys
self.info.features.update(
{
feature: metadata_features[feature]
for feature in metadata_features
if feature not in duplicated_keys
}
)
return splits
def _split_files_and_archives(self, data_files):
files, archives = [], []
for data_file in data_files:
_, data_file_ext = os.path.splitext(data_file)
if data_file_ext.lower() in self.EXTENSIONS:
files.append(data_file)
elif os.path.basename(data_file) in self.METADATA_FILENAMES:
files.append(data_file)
else:
archives.append(data_file)
return files, archives
def _read_metadata(self, metadata_file, metadata_ext: str = ""):
if metadata_ext == ".csv":
# Use `pd.read_csv` (although slower) instead of `pyarrow.csv.read_csv` for reading CSV files for consistency with the CSV packaged module
return pa.Table.from_pandas(pd.read_csv(metadata_file))
else:
with open(metadata_file, "rb") as f:
return paj.read_json(f)
def _generate_examples(self, files, metadata_files, split_name, add_metadata, add_labels):
split_metadata_files = metadata_files.get(split_name, [])
sample_empty_metadata = (
{k: None for k in self.info.features if k != self.BASE_COLUMN_NAME} if self.info.features else {}
)
last_checked_dir = None
metadata_dir = None
metadata_dict = None
downloaded_metadata_file = None
metadata_ext = ""
if split_metadata_files:
metadata_ext = {
os.path.splitext(original_metadata_file)[-1] for original_metadata_file, _ in split_metadata_files
}
metadata_ext = metadata_ext.pop()
file_idx = 0
for original_file, downloaded_file_or_dir in files:
if original_file is not None:
_, original_file_ext = os.path.splitext(original_file)
if original_file_ext.lower() in self.EXTENSIONS:
if add_metadata:
# If the file is a file of a needed type, and we've just entered a new directory,
# find the nereast metadata file (by counting path segments) for the directory
current_dir = os.path.dirname(original_file)
if last_checked_dir is None or last_checked_dir != current_dir:
last_checked_dir = current_dir
metadata_file_candidates = [
(
os.path.relpath(original_file, os.path.dirname(metadata_file_candidate)),
metadata_file_candidate,
downloaded_metadata_file,
)
for metadata_file_candidate, downloaded_metadata_file in split_metadata_files
if metadata_file_candidate
is not None # ignore metadata_files that are inside archives
and not os.path.relpath(
original_file, os.path.dirname(metadata_file_candidate)
).startswith("..")
]
if metadata_file_candidates:
_, metadata_file, downloaded_metadata_file = min(
metadata_file_candidates, key=lambda x: count_path_segments(x[0])
)
pa_metadata_table = self._read_metadata(
downloaded_metadata_file, metadata_ext=metadata_ext
)
pa_file_name_array = pa_metadata_table["file_name"]
pa_metadata_table = pa_metadata_table.drop(["file_name"])
metadata_dir = os.path.dirname(metadata_file)
metadata_dict = {
os.path.normpath(file_name).replace("\\", "/"): sample_metadata
for file_name, sample_metadata in zip(
pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist()
)
}
else:
raise ValueError(
f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}."
)
if metadata_dir is not None and downloaded_metadata_file is not None:
file_relpath = os.path.relpath(original_file, metadata_dir)
file_relpath = file_relpath.replace("\\", "/")
if file_relpath not in metadata_dict:
raise ValueError(
f"{self.BASE_COLUMN_NAME} at {file_relpath} doesn't have metadata in {downloaded_metadata_file}."
)
sample_metadata = metadata_dict[file_relpath]
else:
raise ValueError(
f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}."
)
else:
sample_metadata = {}
if add_labels:
sample_label = {"label": os.path.basename(os.path.dirname(original_file))}
else:
sample_label = {}
yield (
file_idx,
{
**sample_empty_metadata,
self.BASE_COLUMN_NAME: downloaded_file_or_dir,
**sample_metadata,
**sample_label,
},
)
file_idx += 1
else:
for downloaded_dir_file in downloaded_file_or_dir:
_, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file)
if downloaded_dir_file_ext.lower() in self.EXTENSIONS:
if add_metadata:
current_dir = os.path.dirname(downloaded_dir_file)
if last_checked_dir is None or last_checked_dir != current_dir:
last_checked_dir = current_dir
metadata_file_candidates = [
(
os.path.relpath(
downloaded_dir_file, os.path.dirname(downloaded_metadata_file)
),
metadata_file_candidate,
downloaded_metadata_file,
)
for metadata_file_candidate, downloaded_metadata_file in split_metadata_files
if metadata_file_candidate
is None # ignore metadata_files that are not inside archives
and not os.path.relpath(
downloaded_dir_file, os.path.dirname(downloaded_metadata_file)
).startswith("..")
]
if metadata_file_candidates:
_, metadata_file, downloaded_metadata_file = min(
metadata_file_candidates, key=lambda x: count_path_segments(x[0])
)
pa_metadata_table = self._read_metadata(
downloaded_metadata_file, metadata_ext=metadata_ext
)
pa_file_name_array = pa_metadata_table["file_name"]
pa_metadata_table = pa_metadata_table.drop(["file_name"])
metadata_dir = os.path.dirname(downloaded_metadata_file)
metadata_dict = {
os.path.normpath(file_name).replace("\\", "/"): sample_metadata
for file_name, sample_metadata in zip(
pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist()
)
}
else:
raise ValueError(
f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}."
)
if metadata_dir is not None and downloaded_metadata_file is not None:
downloaded_dir_file_relpath = os.path.relpath(downloaded_dir_file, metadata_dir)
downloaded_dir_file_relpath = downloaded_dir_file_relpath.replace("\\", "/")
if downloaded_dir_file_relpath not in metadata_dict:
raise ValueError(
f"{self.BASE_COLUMN_NAME} at {downloaded_dir_file_relpath} doesn't have metadata in {downloaded_metadata_file}."
)
sample_metadata = metadata_dict[downloaded_dir_file_relpath]
else:
raise ValueError(
f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}."
)
else:
sample_metadata = {}
if add_labels:
sample_label = {"label": os.path.basename(os.path.dirname(downloaded_dir_file))}
else:
sample_label = {}
yield (
file_idx,
{
**sample_empty_metadata,
self.BASE_COLUMN_NAME: downloaded_dir_file,
**sample_metadata,
**sample_label,
},
)
file_idx += 1 | class_definition | 681 | 22,272 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py | null | 166 |
class Cache(datasets.ArrowBasedBuilder):
def __init__(
self,
cache_dir: Optional[str] = None,
dataset_name: Optional[str] = None,
config_name: Optional[str] = None,
version: Optional[str] = "0.0.0",
hash: Optional[str] = None,
base_path: Optional[str] = None,
info: Optional[datasets.DatasetInfo] = None,
features: Optional[datasets.Features] = None,
token: Optional[Union[bool, str]] = None,
repo_id: Optional[str] = None,
data_files: Optional[Union[str, list, dict, datasets.data_files.DataFilesDict]] = None,
data_dir: Optional[str] = None,
storage_options: Optional[dict] = None,
writer_batch_size: Optional[int] = None,
**config_kwargs,
):
if repo_id is None and dataset_name is None:
raise ValueError("repo_id or dataset_name is required for the Cache dataset builder")
if data_files is not None:
config_kwargs["data_files"] = data_files
if data_dir is not None:
config_kwargs["data_dir"] = data_dir
if hash == "auto" and version == "auto":
config_name, version, hash = _find_hash_in_cache(
dataset_name=repo_id or dataset_name,
config_name=config_name,
cache_dir=cache_dir,
config_kwargs=config_kwargs,
custom_features=features,
)
elif hash == "auto" or version == "auto":
raise NotImplementedError("Pass both hash='auto' and version='auto' instead")
super().__init__(
cache_dir=cache_dir,
dataset_name=dataset_name,
config_name=config_name,
version=version,
hash=hash,
base_path=base_path,
info=info,
token=token,
repo_id=repo_id,
storage_options=storage_options,
writer_batch_size=writer_batch_size,
)
def _info(self) -> datasets.DatasetInfo:
return datasets.DatasetInfo()
def download_and_prepare(self, output_dir: Optional[str] = None, *args, **kwargs):
if not os.path.exists(self.cache_dir):
raise ValueError(f"Cache directory for {self.dataset_name} doesn't exist at {self.cache_dir}")
if output_dir is not None and output_dir != self.cache_dir:
shutil.copytree(self.cache_dir, output_dir)
def _split_generators(self, dl_manager):
# used to stream from cache
if isinstance(self.info.splits, datasets.SplitDict):
split_infos: List[datasets.SplitInfo] = list(self.info.splits.values())
else:
raise ValueError(f"Missing splits info for {self.dataset_name} in cache directory {self.cache_dir}")
return [
datasets.SplitGenerator(
name=split_info.name,
gen_kwargs={
"files": filenames_for_dataset_split(
self.cache_dir,
dataset_name=self.dataset_name,
split=split_info.name,
filetype_suffix="arrow",
shard_lengths=split_info.shard_lengths,
)
},
)
for split_info in split_infos
]
def _generate_tables(self, files):
# used to stream from cache
for file_idx, file in enumerate(files):
with open(file, "rb") as f:
try:
for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", pa_table
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise | class_definition | 3,991 | 8,208 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/cache/cache.py | null | 167 |
class ArrowConfig(datasets.BuilderConfig):
"""BuilderConfig for Arrow."""
features: Optional[datasets.Features] = None
def __post_init__(self):
super().__post_init__() | class_definition | 224 | 413 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/arrow/arrow.py | null | 168 |
class Arrow(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = ArrowConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
data_files = dl_manager.download_and_extract(self.config.data_files)
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
# Infer features if they are stored in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(files):
with open(file, "rb") as f:
try:
reader = pa.ipc.open_stream(f)
except (OSError, pa.lib.ArrowInvalid):
reader = pa.ipc.open_file(f)
self.info.features = datasets.Features.from_arrow_schema(reader.schema)
break
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.info.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
try:
try:
batches = pa.ipc.open_stream(f)
except (OSError, pa.lib.ArrowInvalid):
reader = pa.ipc.open_file(f)
batches = (reader.get_batch(i) for i in range(reader.num_record_batches))
for batch_idx, record_batch in enumerate(batches):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise | class_definition | 416 | 3,493 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/arrow/arrow.py | null | 169 |
class ImageFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""BuilderConfig for ImageFolder."""
drop_labels: bool = None
drop_metadata: bool = None
def __post_init__(self):
super().__post_init__() | class_definition | 155 | 390 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/imagefolder/imagefolder.py | null | 170 |
class ImageFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Image
BASE_COLUMN_NAME = "image"
BUILDER_CONFIG_CLASS = ImageFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script | class_definition | 393 | 630 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/imagefolder/imagefolder.py | null | 171 |
class XmlConfig(datasets.BuilderConfig):
"""BuilderConfig for xml files."""
features: Optional[datasets.Features] = None
encoding: str = "utf-8"
encoding_errors: Optional[str] = None | class_definition | 284 | 483 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/xml/xml.py | null | 172 |
class Xml(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = XmlConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""The `data_files` kwarg in load_dataset() can be a str, List[str], Dict[str,str], or Dict[str,List[str]].
If str or List[str], then the dataset returns only the 'train' split.
If dict, then keys should be from the `datasets.Split` enum.
"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
data_files = dl_manager.download_and_extract(self.config.data_files)
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
schema = self.config.features.arrow_schema
if all(not require_storage_cast(feature) for feature in self.config.features.values()):
# cheaper cast
pa_table = pa_table.cast(schema)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, schema)
return pa_table
else:
return pa_table.cast(pa.schema({"xml": pa.string()}))
def _generate_tables(self, files):
pa_table_names = list(self.config.features) if self.config.features is not None else ["xml"]
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
# open in text mode, by default translates universal newlines ("\n", "\r\n" and "\r") into "\n"
with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
xml = f.read()
pa_table = pa.Table.from_arrays([pa.array([xml])], names=pa_table_names)
yield file_idx, self._cast_table(pa_table) | class_definition | 486 | 2,821 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/xml/xml.py | null | 173 |
class GeneratorConfig(datasets.BuilderConfig):
generator: Optional[Callable] = None
gen_kwargs: Optional[dict] = None
features: Optional[datasets.Features] = None
split: datasets.NamedSplit = datasets.Split.TRAIN
def __post_init__(self):
super().__post_init__()
if self.generator is None:
raise ValueError("generator must be specified")
if self.gen_kwargs is None:
self.gen_kwargs = {} | class_definition | 102 | 557 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/generator/generator.py | null | 174 |
class Generator(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = GeneratorConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=self.config.split, gen_kwargs=self.config.gen_kwargs)]
def _generate_examples(self, **gen_kwargs):
for idx, ex in enumerate(self.config.generator(**gen_kwargs)):
yield idx, ex | class_definition | 560 | 1,032 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/generator/generator.py | null | 175 |
class JsonConfig(datasets.BuilderConfig):
"""BuilderConfig for JSON."""
features: Optional[datasets.Features] = None
encoding: str = "utf-8"
encoding_errors: Optional[str] = None
field: Optional[str] = None
use_threads: bool = True # deprecated
block_size: Optional[int] = None # deprecated
chunksize: int = 10 << 20 # 10MB
newlines_in_values: Optional[bool] = None
def __post_init__(self):
super().__post_init__() | class_definition | 1,077 | 1,544 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/json/json.py | null | 176 |
class Json(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = JsonConfig
def _info(self):
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead")
self.config.chunksize = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore."
)
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported")
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
data_files = dl_manager.download_and_extract(self.config.data_files)
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
type = self.config.features.arrow_schema.field(column_name).type
pa_table = pa_table.append_column(column_name, pa.array([None] * len(pa_table), type=type))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.config.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
# If the file is one json object and if we need to look at the items in one specific field
if self.config.field is not None:
with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
dataset = ujson_loads(f.read())
# We keep only the field we are interested in
dataset = dataset[self.config.field]
df = pandas_read_json(io.StringIO(ujson_dumps(dataset)))
if df.columns.tolist() == [0]:
df.columns = list(self.config.features) if self.config.features else ["text"]
pa_table = pa.Table.from_pandas(df, preserve_index=False)
yield file_idx, self._cast_table(pa_table)
# If the file has one json object per line
else:
with open(file, "rb") as f:
batch_idx = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
block_size = max(self.config.chunksize // 32, 16 << 10)
encoding_errors = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
batch = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(f)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
batch = batch.decode(self.config.encoding, errors=encoding_errors).encode("utf-8")
try:
while True:
try:
pa_table = paj.read_json(
io.BytesIO(batch), read_options=paj.ReadOptions(block_size=block_size)
)
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(e, pa.ArrowInvalid)
and "straddling" not in str(e)
or block_size > len(batch)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"Batch of {len(batch)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}."
)
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
file, encoding=self.config.encoding, errors=self.config.encoding_errors
) as f:
df = pandas_read_json(f)
except ValueError:
logger.error(f"Failed to load JSON from file '{file}' with error {type(e)}: {e}")
raise e
if df.columns.tolist() == [0]:
df.columns = list(self.config.features) if self.config.features else ["text"]
try:
pa_table = pa.Table.from_pandas(df, preserve_index=False)
except pa.ArrowInvalid as e:
logger.error(
f"Failed to convert pandas DataFrame to Arrow Table from file '{file}' with error {type(e)}: {e}"
)
raise ValueError(
f"Failed to convert pandas DataFrame to Arrow Table from file {file}."
) from None
yield file_idx, self._cast_table(pa_table)
break
yield (file_idx, batch_idx), self._cast_table(pa_table)
batch_idx += 1 | class_definition | 1,547 | 8,697 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/json/json.py | null | 177 |
class CsvConfig(datasets.BuilderConfig):
"""BuilderConfig for CSV."""
sep: str = ","
delimiter: Optional[str] = None
header: Optional[Union[int, List[int], str]] = "infer"
names: Optional[List[str]] = None
column_names: Optional[List[str]] = None
index_col: Optional[Union[int, str, List[int], List[str]]] = None
usecols: Optional[Union[List[int], List[str]]] = None
prefix: Optional[str] = None
mangle_dupe_cols: bool = True
engine: Optional[Literal["c", "python", "pyarrow"]] = None
converters: Dict[Union[int, str], Callable[[Any], Any]] = None
true_values: Optional[list] = None
false_values: Optional[list] = None
skipinitialspace: bool = False
skiprows: Optional[Union[int, List[int]]] = None
nrows: Optional[int] = None
na_values: Optional[Union[str, List[str]]] = None
keep_default_na: bool = True
na_filter: bool = True
verbose: bool = False
skip_blank_lines: bool = True
thousands: Optional[str] = None
decimal: str = "."
lineterminator: Optional[str] = None
quotechar: str = '"'
quoting: int = 0
escapechar: Optional[str] = None
comment: Optional[str] = None
encoding: Optional[str] = None
dialect: Optional[str] = None
error_bad_lines: bool = True
warn_bad_lines: bool = True
skipfooter: int = 0
doublequote: bool = True
memory_map: bool = False
float_precision: Optional[str] = None
chunksize: int = 10_000
features: Optional[datasets.Features] = None
encoding_errors: Optional[str] = "strict"
on_bad_lines: Literal["error", "warn", "skip"] = "error"
date_format: Optional[str] = None
def __post_init__(self):
super().__post_init__()
if self.delimiter is not None:
self.sep = self.delimiter
if self.column_names is not None:
self.names = self.column_names
@property
def pd_read_csv_kwargs(self):
pd_read_csv_kwargs = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.2 deprecated arguments
if datasets.config.PANDAS_VERSION.release >= (2, 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_DEPRECATED_2_2_0_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter):
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs | class_definition | 757 | 5,745 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/csv/csv.py | null | 178 |
class Csv(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = CsvConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
data_files = dl_manager.download_and_extract(self.config.data_files)
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
schema = self.config.features.arrow_schema
if all(not require_storage_cast(feature) for feature in self.config.features.values()):
# cheaper cast
pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, schema)
return pa_table
def _generate_tables(self, files):
schema = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
dtype = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(feature) else object
for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
csv_file_reader = pd.read_csv(file, iterator=True, dtype=dtype, **self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(csv_file_reader):
pa_table = pa.Table.from_pandas(df)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise | class_definition | 5,748 | 8,579 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/csv/csv.py | null | 179 |
class TextConfig(datasets.BuilderConfig):
"""BuilderConfig for text files."""
features: Optional[datasets.Features] = None
encoding: str = "utf-8"
encoding_errors: Optional[str] = None
chunksize: int = 10 << 20 # 10MB
keep_linebreaks: bool = False
sample_by: str = "line" | class_definition | 308 | 609 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/text/text.py | null | 180 |
class Text(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = TextConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""The `data_files` kwarg in load_dataset() can be a str, List[str], Dict[str,str], or Dict[str,List[str]].
If str or List[str], then the dataset returns only the 'train' split.
If dict, then keys should be from the `datasets.Split` enum.
"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
data_files = dl_manager.download_and_extract(self.config.data_files)
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
schema = self.config.features.arrow_schema
if all(not require_storage_cast(feature) for feature in self.config.features.values()):
# cheaper cast
pa_table = pa_table.cast(schema)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, schema)
return pa_table
else:
return pa_table.cast(pa.schema({"text": pa.string()}))
def _generate_tables(self, files):
pa_table_names = list(self.config.features) if self.config.features is not None else ["text"]
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
# open in text mode, by default translates universal newlines ("\n", "\r\n" and "\r") into "\n"
with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
if self.config.sample_by == "line":
batch_idx = 0
while True:
batch = f.read(self.config.chunksize)
if not batch:
break
batch += f.readline() # finish current line
# StringIO.readlines, by default splits only on "\n" (and keeps line breaks)
batch = StringIO(batch).readlines()
if not self.config.keep_linebreaks:
batch = [line.rstrip("\n") for line in batch]
pa_table = pa.Table.from_arrays([pa.array(batch)], names=pa_table_names)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(pa_table)
batch_idx += 1
elif self.config.sample_by == "paragraph":
batch_idx = 0
batch = ""
while True:
new_batch = f.read(self.config.chunksize)
if not new_batch:
break
batch += new_batch
batch += f.readline() # finish current line
batch = batch.split("\n\n")
pa_table = pa.Table.from_arrays(
[pa.array([example for example in batch[:-1] if example])], names=pa_table_names
)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(pa_table)
batch_idx += 1
batch = batch[-1]
if batch:
pa_table = pa.Table.from_arrays([pa.array([batch])], names=pa_table_names)
yield (file_idx, batch_idx), self._cast_table(pa_table)
elif self.config.sample_by == "document":
text = f.read()
pa_table = pa.Table.from_arrays([pa.array([text])], names=pa_table_names)
yield file_idx, self._cast_table(pa_table) | class_definition | 612 | 5,515 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/text/text.py | null | 181 |
class ParquetConfig(datasets.BuilderConfig):
"""BuilderConfig for Parquet."""
batch_size: Optional[int] = None
columns: Optional[List[str]] = None
features: Optional[datasets.Features] = None
filters: Optional[Union[ds.Expression, List[tuple], List[List[tuple]]]] = None
def __post_init__(self):
super().__post_init__() | class_definition | 295 | 648 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/parquet/parquet.py | null | 182 |
class Parquet(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = ParquetConfig
def _info(self):
if (
self.config.columns is not None
and self.config.features is not None
and set(self.config.columns) != set(self.config.features)
):
raise ValueError(
"The columns and features argument must contain the same columns, but got ",
f"{self.config.columns} and {self.config.features}",
)
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
data_files = dl_manager.download_and_extract(self.config.data_files)
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
# Infer features if they are stored in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(files):
with open(file, "rb") as f:
self.info.features = datasets.Features.from_arrow_schema(pq.read_schema(f))
break
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
if self.config.columns is not None and set(self.config.columns) != set(self.info.features):
self.info.features = datasets.Features(
{col: feat for col, feat in self.info.features.items() if col in self.config.columns}
)
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.info.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
if self.config.features is not None and self.config.columns is not None:
if sorted(field.name for field in self.info.features.arrow_schema) != sorted(self.config.columns):
raise ValueError(
f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'"
)
filter_expr = (
pq.filters_to_expression(self.config.filters)
if isinstance(self.config.filters, list)
else self.config.filters
)
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
parquet_fragment = ds.ParquetFileFormat().make_fragment(f)
if parquet_fragment.row_groups:
batch_size = self.config.batch_size or parquet_fragment.row_groups[0].num_rows
try:
for batch_idx, record_batch in enumerate(
parquet_fragment.to_batches(
batch_size=batch_size,
columns=self.config.columns,
filter=filter_expr,
batch_readahead=0,
fragment_readahead=0,
)
):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise | class_definition | 651 | 5,104 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/parquet/parquet.py | null | 183 |
class VideoFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""BuilderConfig for ImageFolder."""
drop_labels: bool = None
drop_metadata: bool = None
def __post_init__(self):
super().__post_init__() | class_definition | 155 | 390 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/videofolder/videofolder.py | null | 184 |
class VideoFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Video
BASE_COLUMN_NAME = "video"
BUILDER_CONFIG_CLASS = VideoFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script | class_definition | 393 | 630 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/videofolder/videofolder.py | null | 185 |
class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""Builder Config for AudioFolder."""
drop_labels: bool = None
drop_metadata: bool = None
def __post_init__(self):
super().__post_init__() | class_definition | 155 | 391 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/audiofolder/audiofolder.py | null | 186 |
class AudioFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Audio
BASE_COLUMN_NAME = "audio"
BUILDER_CONFIG_CLASS = AudioFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script | class_definition | 394 | 631 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/audiofolder/audiofolder.py | null | 187 |
class SparkConfig(datasets.BuilderConfig):
"""BuilderConfig for Spark."""
features: Optional[datasets.Features] = None
def __post_init__(self):
super().__post_init__() | class_definition | 702 | 891 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/spark/spark.py | null | 188 |
class SparkExamplesIterable(_BaseExamplesIterable):
def __init__(
self,
df: "pyspark.sql.DataFrame",
partition_order=None,
):
super().__init__()
self.df = df
self.partition_order = partition_order or range(self.df.rdd.getNumPartitions())
def _init_state_dict(self) -> dict:
self._state_dict = {"partition_idx": 0, "partition_example_idx": 0}
return self._state_dict
@experimental
def load_state_dict(self, state_dict: dict) -> dict:
return super().load_state_dict(state_dict)
def __iter__(self):
yield from _generate_iterable_examples(self.df, self.partition_order, self._state_dict)
def shuffle_data_sources(self, generator: np.random.Generator) -> "SparkExamplesIterable":
partition_order = list(range(self.df.rdd.getNumPartitions()))
generator.shuffle(partition_order)
return SparkExamplesIterable(self.df, partition_order=partition_order)
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "SparkExamplesIterable":
partition_order = self.split_shard_indices_by_worker(num_shards=num_shards, index=index, contiguous=contiguous)
return SparkExamplesIterable(self.df, partition_order=partition_order)
@property
def num_shards(self) -> int:
return len(self.partition_order) | class_definition | 2,454 | 3,828 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/spark/spark.py | null | 189 |
class Spark(datasets.DatasetBuilder):
BUILDER_CONFIG_CLASS = SparkConfig
def __init__(
self,
df: "pyspark.sql.DataFrame",
cache_dir: str = None,
working_dir: str = None,
**config_kwargs,
):
import pyspark
self._spark = pyspark.sql.SparkSession.builder.getOrCreate()
self.df = df
self._working_dir = working_dir
super().__init__(
cache_dir=cache_dir,
config_name=str(self.df.semanticHash()),
**config_kwargs,
)
def _validate_cache_dir(self):
# Define this so that we don't reference self in create_cache_and_write_probe, which will result in a pickling
# error due to pickling the SparkContext.
cache_dir = self._cache_dir
# Returns the path of the created file.
def create_cache_and_write_probe(context):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(cache_dir, exist_ok=True)
probe_file = os.path.join(cache_dir, "fs_test" + uuid.uuid4().hex)
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(probe_file, "a")
return [probe_file]
if self._spark.conf.get("spark.master", "").startswith("local"):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
probe = (
self._spark.sparkContext.parallelize(range(1), 1).mapPartitions(create_cache_and_write_probe).collect()
)
if os.path.isfile(probe[0]):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir"
)
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager: datasets.download.download_manager.DownloadManager):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN)]
def _repartition_df_if_needed(self, max_shard_size):
import pyspark
def get_arrow_batch_size(it):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]})
df_num_rows = self.df.count()
sample_num_rows = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
approx_bytes_per_row = (
self.df.limit(sample_num_rows)
.repartition(1)
.mapInArrow(get_arrow_batch_size, "batch_bytes: long")
.agg(pyspark.sql.functions.sum("batch_bytes").alias("sample_bytes"))
.collect()[0]
.sample_bytes
/ sample_num_rows
)
approx_total_size = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
new_num_partitions = min(df_num_rows, int(approx_total_size / max_shard_size))
self.df = self.df.repartition(new_num_partitions)
def _prepare_split_single(
self,
fpath: str,
file_format: str,
max_shard_size: int,
) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
working_fpath = os.path.join(self._working_dir, os.path.basename(fpath)) if self._working_dir else fpath
embed_local_files = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
features = self.config.features
writer_batch_size = self._writer_batch_size
storage_options = self._fs.storage_options
def write_arrow(it):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
task_id = pyspark.TaskContext().taskAttemptId()
first_batch = next(it, None)
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]],
names=["task_id", "num_examples", "num_bytes"],
)
shard_id = 0
writer = writer_class(
features=features,
path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
writer_batch_size=writer_batch_size,
storage_options=storage_options,
embed_local_files=embed_local_files,
)
table = pa.Table.from_batches([first_batch])
writer.write_table(table)
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
num_examples, num_bytes = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]],
names=["task_id", "num_examples", "num_bytes"],
)
shard_id += 1
writer = writer_class(
features=writer._features,
path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
writer_batch_size=writer_batch_size,
storage_options=storage_options,
embed_local_files=embed_local_files,
)
table = pa.Table.from_batches([batch])
writer.write_table(table)
if writer._num_bytes > 0:
num_examples, num_bytes = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]],
names=["task_id", "num_examples", "num_bytes"],
)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(working_fpath)):
dest = os.path.join(os.path.dirname(fpath), os.path.basename(file))
shutil.move(file, dest)
stats = (
self.df.mapInArrow(write_arrow, "task_id: long, num_examples: long, num_bytes: long")
.groupBy("task_id")
.agg(
pyspark.sql.functions.sum("num_examples").alias("total_num_examples"),
pyspark.sql.functions.sum("num_bytes").alias("total_num_bytes"),
pyspark.sql.functions.count("num_bytes").alias("num_shards"),
pyspark.sql.functions.collect_list("num_examples").alias("shard_lengths"),
)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _prepare_split(
self,
split_generator: "datasets.SplitGenerator",
file_format: str = "arrow",
max_shard_size: Optional[Union[str, int]] = None,
num_proc: Optional[int] = None,
**kwargs,
):
self._validate_cache_dir()
max_shard_size = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE)
self._repartition_df_if_needed(max_shard_size)
is_local = not is_remote_filesystem(self._fs)
path_join = os.path.join if is_local else posixpath.join
SUFFIX = "-TTTTT-SSSSS-of-NNNNN"
fname = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
fpath = path_join(self._output_dir, fname)
total_num_examples = 0
total_num_bytes = 0
total_shards = 0
task_id_and_num_shards = []
all_shard_lengths = []
for task_id, content in self._prepare_split_single(fpath, file_format, max_shard_size):
(
num_examples,
num_bytes,
num_shards,
shard_lengths,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards))
all_shard_lengths.extend(shard_lengths)
split_generator.split_info.num_examples = total_num_examples
split_generator.split_info.num_bytes = total_num_bytes
# should rename everything at the end
logger.debug(f"Renaming {total_shards} shards.")
if total_shards > 1:
split_generator.split_info.shard_lengths = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
fs = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
task_id: int,
shard_id: int,
global_shard_id: int,
):
rename(
fs,
fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
fpath.replace("TTTTT-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
)
args = []
global_shard_id = 0
for i in range(len(task_id_and_num_shards)):
task_id, num_shards = task_id_and_num_shards[i]
for shard_id in range(num_shards):
args.append([task_id, shard_id, global_shard_id])
global_shard_id += 1
self._spark.sparkContext.parallelize(args, len(args)).map(lambda args: _rename_shard(*args)).collect()
else:
# don't use any pattern
shard_id = 0
task_id = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
fpath.replace(SUFFIX, ""),
)
def _get_examples_iterable_for_split(
self,
split_generator: "datasets.SplitGenerator",
) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df) | class_definition | 3,831 | 14,674 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/spark/spark.py | null | 190 |
class PandasConfig(datasets.BuilderConfig):
"""BuilderConfig for Pandas."""
features: Optional[datasets.Features] = None
def __post_init__(self):
super().__post_init__() | class_definition | 205 | 396 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/pandas/pandas.py | null | 191 |
class Pandas(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = PandasConfig
def _info(self):
warnings.warn(
"The Pandas builder is deprecated and will be removed in the next major version of datasets.",
FutureWarning,
)
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.config.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for i, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
pa_table = pa.Table.from_pandas(pd.read_pickle(f))
yield i, self._cast_table(pa_table) | class_definition | 399 | 2,546 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/pandas/pandas.py | null | 192 |
class SqlConfig(datasets.BuilderConfig):
"""BuilderConfig for SQL."""
sql: Union[str, "sqlalchemy.sql.Selectable"] = None
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None
index_col: Optional[Union[str, List[str]]] = None
coerce_float: bool = True
params: Optional[Union[List, Tuple, Dict]] = None
parse_dates: Optional[Union[List, Dict]] = None
columns: Optional[List[str]] = None
chunksize: Optional[int] = 10_000
features: Optional[datasets.Features] = None
def __post_init__(self):
super().__post_init__()
if self.sql is None:
raise ValueError("sql must be specified")
if self.con is None:
raise ValueError("con must be specified")
def create_config_id(
self,
config_kwargs: dict,
custom_features: Optional[datasets.Features] = None,
) -> str:
config_kwargs = config_kwargs.copy()
# We need to stringify the Selectable object to make its hash deterministic
# The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html
sql = config_kwargs["sql"]
if not isinstance(sql, str):
if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules:
import sqlalchemy
if isinstance(sql, sqlalchemy.sql.Selectable):
engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://")
sql_str = str(sql.compile(dialect=engine.dialect))
config_kwargs["sql"] = sql_str
else:
raise TypeError(
f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
)
else:
raise TypeError(
f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
)
con = config_kwargs["con"]
if not isinstance(con, str):
config_kwargs["con"] = id(con)
logger.info(
f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead."
)
return super().create_config_id(config_kwargs, custom_features=custom_features)
@property
def pd_read_sql_kwargs(self):
pd_read_sql_kwargs = {
"index_col": self.index_col,
"columns": self.columns,
"params": self.params,
"coerce_float": self.coerce_float,
"parse_dates": self.parse_dates,
}
return pd_read_sql_kwargs | class_definition | 424 | 3,183 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/sql/sql.py | null | 193 |
class Sql(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = SqlConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})]
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
schema = self.config.features.arrow_schema
if all(not require_storage_cast(feature) for feature in self.config.features.values()):
# cheaper cast
pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, schema)
return pa_table
def _generate_tables(self):
chunksize = self.config.chunksize
sql_reader = pd.read_sql(
self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs
)
sql_reader = [sql_reader] if chunksize is None else sql_reader
for chunk_idx, df in enumerate(sql_reader):
pa_table = pa.Table.from_pandas(df)
yield chunk_idx, self._cast_table(pa_table) | class_definition | 3,186 | 4,513 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/sql/sql.py | null | 194 |
class DatasetViewerError(DatasetsError):
"""Dataset viewer error.
Raised when trying to use the dataset viewer HTTP API and when trying to access:
- a missing dataset, or
- a private/gated dataset and the user is not authenticated.
- unavailable /parquet or /info responses
""" | class_definition | 295 | 597 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/_dataset_viewer.py | null | 195 |
class _NoDuplicateSafeLoader(yaml.SafeLoader):
def _check_no_duplicates_on_constructed_node(self, node):
keys = [self.constructed_objects[key_node] for key_node, _ in node.value]
keys = [tuple(key) if isinstance(key, list) else key for key in keys]
counter = Counter(keys)
duplicate_keys = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}")
def construct_mapping(self, node, deep=False):
mapping = super().construct_mapping(node, deep=deep)
self._check_no_duplicates_on_constructed_node(node)
return mapping | class_definition | 469 | 1,136 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/metadata.py | null | 196 |
class MetadataConfigs(Dict[str, Dict[str, Any]]):
"""Should be in format {config_name: {**config_params}}."""
FIELD_NAME: ClassVar[str] = METADATA_CONFIGS_FIELD
@staticmethod
def _raise_if_data_files_field_not_valid(metadata_config: dict):
yaml_data_files = metadata_config.get("data_files")
if yaml_data_files is not None:
yaml_error_message = textwrap.dedent(
f"""
Expected data_files in YAML to be either a string or a list of strings
or a list of dicts with two keys: 'split' and 'path', but got {yaml_data_files}
Examples of data_files in YAML:
data_files: data.csv
data_files: data/*.png
data_files:
- part0/*
- part1/*
data_files:
- split: train
path: train/*
- split: test
path: test/*
data_files:
- split: train
path:
- train/part1/*
- train/part2/*
- split: test
path: test/*
PS: some symbols like dashes '-' are not allowed in split names
"""
)
if not isinstance(yaml_data_files, (list, str)):
raise ValueError(yaml_error_message)
if isinstance(yaml_data_files, list):
for yaml_data_files_item in yaml_data_files:
if (
not isinstance(yaml_data_files_item, (str, dict))
or isinstance(yaml_data_files_item, dict)
and not (
len(yaml_data_files_item) == 2
and "split" in yaml_data_files_item
and re.match(_split_re, yaml_data_files_item["split"])
and isinstance(yaml_data_files_item.get("path"), (str, list))
)
):
raise ValueError(yaml_error_message)
@classmethod
def _from_exported_parquet_files_and_dataset_infos(
cls,
parquet_commit_hash: str,
exported_parquet_files: List[Dict[str, Any]],
dataset_infos: DatasetInfosDict,
) -> "MetadataConfigs":
metadata_configs = {
config_name: {
"data_files": [
{
"split": split_name,
"path": [
parquet_file["url"].replace("refs%2Fconvert%2Fparquet", parquet_commit_hash)
for parquet_file in parquet_files_for_split
],
}
for split_name, parquet_files_for_split in groupby(parquet_files_for_config, itemgetter("split"))
],
"version": str(dataset_infos.get(config_name, DatasetInfo()).version or "0.0.0"),
}
for config_name, parquet_files_for_config in groupby(exported_parquet_files, itemgetter("config"))
}
if dataset_infos:
# Preserve order of configs and splits
metadata_configs = {
config_name: {
"data_files": [
data_file
for split_name in dataset_info.splits
for data_file in metadata_configs[config_name]["data_files"]
if data_file["split"] == split_name
],
"version": metadata_configs[config_name]["version"],
}
for config_name, dataset_info in dataset_infos.items()
}
return cls(metadata_configs)
@classmethod
def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "MetadataConfigs":
if dataset_card_data.get(cls.FIELD_NAME):
metadata_configs = dataset_card_data[cls.FIELD_NAME]
if not isinstance(metadata_configs, list):
raise ValueError(f"Expected {cls.FIELD_NAME} to be a list, but got '{metadata_configs}'")
for metadata_config in metadata_configs:
if "config_name" not in metadata_config:
raise ValueError(
f"Each config must include `config_name` field with a string name of a config, "
f"but got {metadata_config}. "
)
cls._raise_if_data_files_field_not_valid(metadata_config)
return cls(
{
config.pop("config_name"): {
param: value if param != "features" else Features._from_yaml_list(value)
for param, value in config.items()
}
for metadata_config in metadata_configs
if (config := metadata_config.copy())
}
)
return cls()
def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None:
if self:
for metadata_config in self.values():
self._raise_if_data_files_field_not_valid(metadata_config)
current_metadata_configs = self.from_dataset_card_data(dataset_card_data)
total_metadata_configs = dict(sorted({**current_metadata_configs, **self}.items()))
for config_name, config_metadata in total_metadata_configs.items():
config_metadata.pop("config_name", None)
dataset_card_data[self.FIELD_NAME] = [
{"config_name": config_name, **config_metadata}
for config_name, config_metadata in total_metadata_configs.items()
]
def get_default_config_name(self) -> Optional[str]:
default_config_name = None
for config_name, metadata_config in self.items():
if len(self) == 1 or config_name == "default" or metadata_config.get("default"):
if default_config_name is None:
default_config_name = config_name
else:
raise ValueError(
f"Dataset has several default configs: '{default_config_name}' and '{config_name}'."
)
return default_config_name | class_definition | 1,567 | 7,999 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/metadata.py | null | 197 |
class Version:
"""Dataset version `MAJOR.MINOR.PATCH`.
Args:
version_str (`str`):
The dataset version.
description (`str`):
A description of what is new in this version.
major (`str`):
minor (`str`):
patch (`str`):
Example:
```py
>>> VERSION = datasets.Version("1.0.0")
```
"""
version_str: str
description: Optional[str] = None
major: Optional[Union[str, int]] = None
minor: Optional[Union[str, int]] = None
patch: Optional[Union[str, int]] = None
def __post_init__(self):
self.major, self.minor, self.patch = _str_to_version_tuple(self.version_str)
def __repr__(self):
return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def tuple(self):
return self.major, self.minor, self.patch
def _validate_operand(self, other):
if isinstance(other, str):
return Version(other)
elif isinstance(other, Version):
return other
raise TypeError(f"{other} (type {type(other)}) cannot be compared to version.")
def __eq__(self, other):
try:
other = self._validate_operand(other)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__(self, other):
other = self._validate_operand(other)
return self.tuple < other.tuple
def __hash__(self):
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def from_dict(cls, dic):
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def _to_yaml_string(self) -> str:
return self.version_str | class_definition | 928 | 2,716 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/version.py | null | 198 |
class tracked_str(str):
origins = {}
def set_origin(self, origin: str):
if super().__repr__() not in self.origins:
self.origins[super().__repr__()] = origin
def get_origin(self):
return self.origins.get(super().__repr__(), str(self))
def __repr__(self) -> str:
if super().__repr__() not in self.origins or self.origins[super().__repr__()] == self:
return super().__repr__()
else:
return f"{str(self)} (origin={self.origins[super().__repr__()]})" | class_definition | 67 | 599 | 0 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/track.py | null | 199 |