text
stringlengths
1
1.02k
class_index
int64
0
271
source
stringclasses
76 values
self._state_dict["shard_example_idx"] = 0
16
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "ArrowExamplesIterable": """Keep only the requested shard.""" rng = deepcopy(self.generator) kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs) return ArrowExamplesIterable(self.generate_tables_fn, kwargs_with_shuffled_shards).shard_data_sources( num_shards, index, contiguous=contiguous )
16
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class RebatchedArrowExamplesIterable(_BaseExamplesIterable): def __init__(self, ex_iterable: _BaseExamplesIterable, batch_size: Optional[int], drop_last_batch: bool = False): super().__init__() self.ex_iterable = ex_iterable self.batch_size = batch_size self.drop_last_batch = drop_last_batch @property def iter_arrow(self): return self._iter_arrow @property def is_typed(self): return self.ex_iterable.is_typed @property def features(self): return self.ex_iterable.features def _init_state_dict(self) -> dict: self._state_dict = { "ex_iterable": self.ex_iterable._init_state_dict(), "previous_state": None, "batch_idx": 0, "num_chunks_since_previous_state": 0, "cropped_chunk_length": 0, } return self._state_dict def __iter__(self): yield from self.ex_iterable
17
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]: """Iterate over sub-tables of size `batch_size`.""" if self._state_dict and self._state_dict["previous_state"]: self.ex_iterable.load_state_dict(self._state_dict["previous_state"]) if self.ex_iterable.iter_arrow: iterator = self.ex_iterable.iter_arrow() else: iterator = _convert_to_arrow(self.ex_iterable, batch_size=1) if self.batch_size is None or self.batch_size <= 0: if self._state_dict and self._state_dict["batch_idx"] > 0: return all_pa_table = pa.concat_tables([pa_table for _, pa_table in iterator]) if self._state_dict: self._state_dict["batch_idx"] = 1 yield "all", all_pa_table return keys_buffer = [] chunks_buffer = [] chunks_buffer_size = 0 num_chunks_to_skip = self._state_dict["num_chunks_since_previous_state"] if self._state_dict else 0
17
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
chunk_length_to_crop = self._state_dict["cropped_chunk_length"] if self._state_dict else 0 if self._state_dict: previous_state = self.ex_iterable.state_dict() self._state_dict["previous_state"] = previous_state for key, pa_table in iterator: for num_chunks_since_previous_state, chunk in enumerate(pa_table.to_reader(max_chunksize=self.batch_size)): if num_chunks_to_skip > 1: num_chunks_to_skip -= 1 continue elif num_chunks_to_skip == 1 and chunk_length_to_crop == 0: num_chunks_to_skip -= 1 continue elif num_chunks_to_skip == 1 and chunk_length_to_crop > 0: chunk = chunk.slice(chunk_length_to_crop, len(chunk) - chunk_length_to_crop) num_chunks_to_skip = 0 chunk_length_to_crop = 0 if len(chunk) == 0: continue
17
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
if chunks_buffer_size + len(chunk) < self.batch_size: keys_buffer.append(key) chunks_buffer.append(chunk) chunks_buffer_size += len(chunk) continue elif chunks_buffer_size + len(chunk) == self.batch_size: keys_buffer.append(key) chunks_buffer.append(chunk) new_key = "_".join(str(_key) for _key in keys_buffer) if self._state_dict: self._state_dict["batch_idx"] += 1 self._state_dict["num_chunks_since_previous_state"] += len(chunks_buffer) self._state_dict["cropped_chunk_length"] = 0 yield new_key, pa.Table.from_batches(chunks_buffer) keys_buffer = [] chunks_buffer = [] chunks_buffer_size = 0 if self._state_dict:
17
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
self._state_dict["previous_state"] = previous_state self._state_dict["num_chunks_since_previous_state"] = num_chunks_since_previous_state + 1 else: cropped_chunk_length = self.batch_size - chunks_buffer_size keys_buffer.append(f"{key}[:{cropped_chunk_length}]") chunks_buffer.append(chunk.slice(0, cropped_chunk_length)) new_key = "_".join(str(_key) for _key in keys_buffer) if self._state_dict: self._state_dict["batch_idx"] += 1 self._state_dict["num_chunks_since_previous_state"] += len(chunks_buffer) self._state_dict["cropped_chunk_length"] = cropped_chunk_length yield new_key, pa.Table.from_batches(chunks_buffer) keys_buffer = [f"{key}[{cropped_chunk_length}:]"]
17
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
chunks_buffer = [chunk.slice(cropped_chunk_length, len(chunk) - cropped_chunk_length)] chunks_buffer_size = len(chunk) - cropped_chunk_length if self._state_dict: self._state_dict["previous_state"] = previous_state self._state_dict["num_chunks_since_previous_state"] = num_chunks_since_previous_state if self._state_dict: previous_state = self.ex_iterable.state_dict() if not self.drop_last_batch and chunks_buffer: new_key = "_".join(str(_key) for _key in keys_buffer) if self._state_dict: self._state_dict["previous_state"] = previous_state self._state_dict["batch_idx"] += 1 self._state_dict["num_chunks_since_previous_state"] = 0 self._state_dict["cropped_chunk_length"] = 0 yield new_key, pa.Table.from_batches(chunks_buffer)
17
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def shuffle_data_sources(self, generator: np.random.Generator) -> "RebatchedArrowExamplesIterable": return RebatchedArrowExamplesIterable( self.ex_iterable.shuffle_data_sources(generator), self.batch_size, self.drop_last_batch ) def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "RebatchedArrowExamplesIterable": return RebatchedArrowExamplesIterable( self.ex_iterable.shard_data_sources(num_shards, index, contiguous=contiguous), self.batch_size, self.drop_last_batch, ) @property def num_shards(self) -> int: return self.ex_iterable.num_shards
17
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class SelectColumnsIterable(_BaseExamplesIterable): def __init__(self, ex_iterable: _BaseExamplesIterable, column_names: List[str]): super().__init__() self.ex_iterable = ex_iterable self.column_names = column_names @property def iter_arrow(self): if self.ex_iterable.iter_arrow: return self._iter_arrow @property def is_typed(self): return self.ex_iterable.is_typed @property def features(self): return self.ex_iterable.features def _init_state_dict(self) -> dict: self._state_dict = self.ex_iterable._init_state_dict() return self._state_dict def __iter__(self): for idx, row in self.ex_iterable: yield idx, {c: row[c] for c in self.column_names}
18
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]: for idx, pa_table in self.ex_iterable.iter_arrow(): if len(pa_table) > 0: # empty tables have no schema yield idx, pa_table.select(self.column_names) def shuffle_data_sources(self, generator: np.random.Generator) -> "SelectColumnsIterable": return SelectColumnsIterable(self.ex_iterable.shuffle_data_sources(generator), self.column_names) def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "SelectColumnsIterable": return SelectColumnsIterable( self.ex_iterable.shard_data_sources(num_shards, index, contiguous=contiguous), self.column_names ) @property def num_shards(self) -> int: return self.ex_iterable.num_shards
18
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class StepExamplesIterable(_BaseExamplesIterable): def __init__(self, ex_iterable: _BaseExamplesIterable, step: int, offset: int): super().__init__() self.ex_iterable = ex_iterable self.step = step self.offset = offset # TODO(QL): implement iter_arrow @property def is_typed(self): return self.ex_iterable.is_typed @property def features(self): return self.ex_iterable.features def _init_state_dict(self) -> dict: self._state_dict = self.ex_iterable._init_state_dict() return self._state_dict def __iter__(self): ex_iterator = iter(self.ex_iterable) while True: batch = list(islice(ex_iterator, self.step)) if len(batch) > self.offset: yield batch[self.offset] else: break
19
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def shuffle_data_sources(self, generator: np.random.Generator) -> "StepExamplesIterable": return StepExamplesIterable( self.ex_iterable.shuffle_data_sources(generator), step=self.step, offset=self.offset ) def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "StepExamplesIterable": return StepExamplesIterable( self.ex_iterable.shard_data_sources(num_shards, index, contiguous=contiguous), step=self.step, offset=self.offset, ) @property def num_shards(self) -> int: return self.ex_iterable.num_shards
19
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class CyclingMultiSourcesExamplesIterable(_BaseExamplesIterable): def __init__( self, ex_iterables: List[_BaseExamplesIterable], stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ): super().__init__() self.ex_iterables = ex_iterables self.stopping_strategy = stopping_strategy # if undersampling ("first_exhausted"), we stop as soon as one dataset is exhausted # if oversampling ("all_exhausted"), we stop as soons as every dataset is exhausted, i.e as soon as every samples of every dataset has been visited at least once self.bool_strategy_func = np.all if (stopping_strategy == "all_exhausted") else np.any # TODO(QL): implement iter_arrow @property def is_typed(self): return self.ex_iterables[0].is_typed @property def features(self): return self.ex_iterables[0].features
20
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def _get_indices_iterator(self): # this is an infinite iterator to keep track of which iterator we want to pick examples from ex_iterable_idx = self._state_dict["ex_iterable_idx"] if self._state_dict else 0 for next_ex_iterable_idx in islice(cycle(range(len(self.ex_iterables))), ex_iterable_idx + 1, None): if self._state_dict: self._state_dict["ex_iterable_idx"] = next_ex_iterable_idx yield ex_iterable_idx ex_iterable_idx = next_ex_iterable_idx def _init_state_dict(self) -> dict: self._state_dict = { "ex_iterable_idx": 0, "ex_iterables": [ex_iterable._init_state_dict() for ex_iterable in self.ex_iterables], "previous_states": [None] * len(self.ex_iterables), "is_exhausted": [False] * len(self.ex_iterables), } return self._state_dict
20
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def __iter__(self): # we use this to buffer one example of each iterator to know if an iterator is exhausted nexts = [None] * len(self.ex_iterables) # because of that, we need to rewind 1 example when reloading the state dict if self._state_dict: for i in range(len(self.ex_iterables)): if self._state_dict["previous_states"][i] is not None: self.ex_iterables[i].load_state_dict(self._state_dict["previous_states"][i]) iterators = [iter(ex_iterable) for ex_iterable in self.ex_iterables] indices_iterator = self._get_indices_iterator()
20
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
is_exhausted = ( np.array(self._state_dict["is_exhausted"]) if self._state_dict else np.full(len(self.ex_iterables), False) ) for i in indices_iterator: # if the stopping criteria is met, break the main for loop if self.bool_strategy_func(is_exhausted): break # let's pick one example from the iterator at index i if nexts[i] is None: nexts[i] = next(iterators[i], False) result = nexts[i] if self._state_dict: self._state_dict["previous_states"][i] = deepcopy(self._state_dict["ex_iterables"][i]) nexts[i] = next(iterators[i], False)
20
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
# the iterator is exhausted if nexts[i] is False: is_exhausted[i] = True if self._state_dict: self._state_dict["is_exhausted"][i] = True # we reset it in case the stopping crtieria isn't met yet nexts[i] = None if self._state_dict: self._state_dict["ex_iterables"][i] = self.ex_iterables[i]._init_state_dict() self._state_dict["previous_states"][i] = None iterators[i] = iter(self.ex_iterables[i]) if result is not False: yield result def shuffle_data_sources(self, generator: np.random.Generator) -> "CyclingMultiSourcesExamplesIterable": """Shuffle each underlying examples iterable.""" ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in self.ex_iterables] return CyclingMultiSourcesExamplesIterable(ex_iterables, self.stopping_strategy)
20
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
@property def num_shards(self) -> int: return min(ex_iterable.num_shards for ex_iterable in self.ex_iterables) def shard_data_sources( self, num_shards: int, index: int, contiguous=True ) -> "CyclingMultiSourcesExamplesIterable": """Either keep only the requested shard, or propagate the request to the underlying iterable.""" return CyclingMultiSourcesExamplesIterable( [iterable.shard_data_sources(num_shards, index, contiguous=contiguous) for iterable in self.ex_iterables], stopping_strategy=self.stopping_strategy, )
20
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class VerticallyConcatenatedMultiSourcesExamplesIterable(_BaseExamplesIterable): """ VerticallyConcatenatedMultiSourcesExamplesIterable simply chains the input iterables. It doesn't require the examples iterables to always yield the same columns. Instead, this is handled by the `IterableDataset` class or `FormattedExamplesIterable`. For information, `IterableDataset` merges the features of all the datasets to concatenate into one. We use `IterableDataset._resolve_features` to obtain the features of all the datasets to concatenate. Then for each example, `IterableDataset` and `FormattedExamplesIterable` automatically fill missing columns with None. This is done with `_apply_feature_types_on_example`. """ def __init__(self, ex_iterables: List[_BaseExamplesIterable]): super().__init__() self.ex_iterables = ex_iterables @property def is_typed(self): return self.ex_iterables[0].is_typed
21
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
@property def features(self): return self.ex_iterables[0].features @property def iter_arrow(self): if all(ex_iterable.iter_arrow is not None for ex_iterable in self.ex_iterables): return self._iter_arrow def _init_state_dict(self) -> dict: self._state_dict = { "ex_iterable_idx": 0, "ex_iterables": [ex_iterable._init_state_dict() for ex_iterable in self.ex_iterables], } return self._state_dict def __iter__(self): ex_iterable_idx_start = self._state_dict["ex_iterable_idx"] if self._state_dict else 0 for ex_iterable in islice(self.ex_iterables, ex_iterable_idx_start, None): yield from ex_iterable if self._state_dict: self._state_dict["ex_iterable_idx"] += 1
21
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def _iter_arrow(self): ex_iterable_idx_start = self._state_dict["ex_iterable_idx"] if self._state_dict else 0 for ex_iterable in islice(self.ex_iterables, ex_iterable_idx_start, None): yield from ex_iterable.iter_arrow() if self._state_dict: self._state_dict["ex_iterable_idx"] += 1 def shuffle_data_sources( self, generator: np.random.Generator ) -> "VerticallyConcatenatedMultiSourcesExamplesIterable": """Shuffle the list of examples iterable, as well as each underlying examples iterable.""" rng = deepcopy(generator) ex_iterables = list(self.ex_iterables) rng.shuffle(ex_iterables) ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in ex_iterables] return VerticallyConcatenatedMultiSourcesExamplesIterable(ex_iterables) @property def num_shards(self) -> int: return min(ex_iterable.num_shards for ex_iterable in self.ex_iterables)
21
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def shard_data_sources( self, num_shards: int, index: int, contiguous=True ) -> "VerticallyConcatenatedMultiSourcesExamplesIterable": """Either keep only the requested shard, or propagate the request to the underlying iterable.""" return VerticallyConcatenatedMultiSourcesExamplesIterable( [iterable.shard_data_sources(num_shards, index, contiguous=contiguous) for iterable in self.ex_iterables] )
21
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class HorizontallyConcatenatedMultiSourcesExamplesIterable(_BaseExamplesIterable): """ HorizontallyConcatenatedMultiSourcesExamplesIterable merges examples together for the input list of iterables. It also checks that there are no duplicate columns (otherwise we don't know which one to keep). This check is done once when yielding the first example. However it doesn't fill missing columns with None. Instead, this is handled by the `IterableDataset` class or `FormattedExamplesIterable`. For information, `IterableDataset` merges the features of all the datasets to concatenate into one. We use `IterableDataset._resolve_features` to obtain the features of all the datasets to concatenate. Then for each example, `IterableDataset` and `FormattedExamplesIterable` automatically fill missing columns with None. This is done with `_apply_feature_types_on_example`. """
22
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def __init__(self, ex_iterables: List[_BaseExamplesIterable]): super().__init__() self.ex_iterables = ex_iterables # TODO(QL): implement iter_arrow @property def is_typed(self): return self.ex_iterables[0].is_typed @property def features(self): return self.ex_iterables[0].features def _init_state_dict(self) -> dict: self._state_dict = {"ex_iterables": [ex_iterable._init_state_dict() for ex_iterable in self.ex_iterables]} return self._state_dict
22
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def __iter__(self): ex_iterators = [iter(ex_iterable) for ex_iterable in self.ex_iterables] for i in itertools.count(): keys = [] examples = [] for ex_iterator in list(ex_iterators): try: key, example = next(ex_iterator) keys.append(key) examples.append(example) except StopIteration: ex_iterators.remove(ex_iterator) if ex_iterators: if i == 0: _check_column_names([column_name for example in examples for column_name in example]) new_example = {} for example in examples: new_example.update(example) new_key = "_".join(str(key) for key in keys) yield new_key, new_example else: break
22
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def shuffle_data_sources( self, generator: np.random.Generator ) -> "HorizontallyConcatenatedMultiSourcesExamplesIterable": """Doesn't shuffle the wrapped examples iterable since it would break the alignment between them.""" return self @property def num_shards(self) -> int: return 1 def shard_data_sources( self, num_shards: int, index: int, contiguous=True ) -> "HorizontallyConcatenatedMultiSourcesExamplesIterable": """Either keep only the requested shard, or propagate the request to the underlying iterable.""" return HorizontallyConcatenatedMultiSourcesExamplesIterable( [iterable.shard_data_sources(num_shards, index, contiguous=contiguous) for iterable in self.ex_iterables] )
22
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class RandomlyCyclingMultiSourcesExamplesIterable(CyclingMultiSourcesExamplesIterable): def __init__( self, ex_iterables: List[_BaseExamplesIterable], generator: np.random.Generator, probabilities: Optional[List[float]] = None, stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ): super().__init__(ex_iterables, stopping_strategy) self.generator = deepcopy(generator) self.probabilities = probabilities # TODO(QL): implement iter_arrow @property def is_typed(self): return self.ex_iterables[0].is_typed @property def features(self): return self.ex_iterables[0].features
23
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def _get_indices_iterator(self): rng = deepcopy(self.generator) num_sources = len(self.ex_iterables) random_batch_size = 1000 # this is an infinite iterator that randomly samples the index of the source to pick examples from index_offset = self._state_dict["bit_generator_index_offset"] if self._state_dict else 0 if self._state_dict: rng.bit_generator.state = self._state_dict["bit_generator_state"] if self.probabilities is None: while True: for i in islice(rng.integers(0, num_sources, size=random_batch_size), index_offset, None): index_offset = (index_offset + 1) % random_batch_size if self._state_dict: self._state_dict["bit_generator_index_offset"] = index_offset if index_offset == 0: self._state_dict["bit_generator_state"] = rng.bit_generator.state yield int(i) else:
23
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
while True: for i in islice( rng.choice(num_sources, size=random_batch_size, p=self.probabilities), index_offset, None ): index_offset = (index_offset + 1) % random_batch_size if self._state_dict: self._state_dict["bit_generator_index_offset"] = index_offset if index_offset == 0: self._state_dict["bit_generator_state"] = rng.bit_generator.state yield int(i)
23
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def _init_state_dict(self) -> dict: self._state_dict = { "bit_generator_state": self.generator.bit_generator.state, "bit_generator_index_offset": 0, "ex_iterables": [ex_iterable._init_state_dict() for ex_iterable in self.ex_iterables], "previous_states": [None] * len(self.ex_iterables), "is_exhausted": [False] * len(self.ex_iterables), } return self._state_dict def shuffle_data_sources(self, generator: np.random.Generator) -> "RandomlyCyclingMultiSourcesExamplesIterable": """Shuffle the data sources of each wrapped examples iterable.""" ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in self.ex_iterables] return RandomlyCyclingMultiSourcesExamplesIterable( ex_iterables, generator=generator, probabilities=self.probabilities, stopping_strategy=self.stopping_strategy, )
23
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def shard_data_sources( self, num_shards: int, index: int, contiguous=True ) -> "RandomlyCyclingMultiSourcesExamplesIterable": """Either keep only the requested shard, or propagate the request to the underlying iterable.""" return RandomlyCyclingMultiSourcesExamplesIterable( [iterable.shard_data_sources(num_shards, index, contiguous=contiguous) for iterable in self.ex_iterables], self.generator, self.probabilities, self.stopping_strategy, )
23
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class MappedExamplesIterable(_BaseExamplesIterable): def __init__( self, ex_iterable: _BaseExamplesIterable, function: Callable, with_indices: bool = False, input_columns: Optional[List[str]] = None, batched: bool = False, batch_size: Optional[int] = 1000, drop_last_batch: bool = False, remove_columns: Optional[List[str]] = None, fn_kwargs: Optional[dict] = None, formatting: Optional["FormattingConfig"] = None, features: Optional[Features] = None, ): super().__init__() self.ex_iterable = ex_iterable self.function = function self.batched = batched self.batch_size = batch_size self.drop_last_batch = drop_last_batch self.remove_columns = remove_columns self.with_indices = with_indices self.input_columns = input_columns self.fn_kwargs = fn_kwargs or {} self.formatting = formatting # required for iter_arrow
24
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
self._features = features # sanity checks if formatting and formatting.format_type == "arrow": # batch_size should match for iter_arrow if not isinstance(ex_iterable, RebatchedArrowExamplesIterable): raise ValueError( "The Arrow-formatted MappedExamplesIterable has underlying iterable" f"that is a {type(ex_iterable).__name__} instead of a RebatchedArrowExamplesIterable." ) elif ex_iterable.batch_size != (batch_size if batched else 1): raise ValueError( f"The Arrow-formatted MappedExamplesIterable has batch_size={batch_size if batched else 1} which is" f"different from {ex_iterable.batch_size=} from its underlying iterable." )
24
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
@property def iter_arrow(self): if self.formatting and self.formatting.format_type == "arrow": return self._iter_arrow @property def is_typed(self): return self.features is not None # user has extracted features @property def features(self): return self._features def _init_state_dict(self) -> dict: self._state_dict = { "ex_iterable": self.ex_iterable._init_state_dict(), "previous_state": None, "num_examples_since_previous_state": 0, "previous_state_example_idx": 0, } return self._state_dict def __iter__(self): if self.formatting and self.formatting.format_type == "arrow": formatter = PythonFormatter() for key, pa_table in self._iter_arrow(max_chunksize=1): yield key, formatter.format_row(pa_table) else: yield from self._iter()
24
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def _iter(self): current_idx = self._state_dict["previous_state_example_idx"] if self._state_dict else 0 if self._state_dict and self._state_dict["previous_state"]: self.ex_iterable.load_state_dict(self._state_dict["previous_state"]) num_examples_to_skip = self._state_dict["num_examples_since_previous_state"] else: num_examples_to_skip = 0 iterator = iter(self.ex_iterable) if self.formatting: formatter = get_formatter(self.formatting.format_type) format_dict = ( formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects ) else: format_dict = None
24
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
if self.batched: if self._state_dict: self._state_dict["previous_state"] = self.ex_iterable.state_dict() self._state_dict["num_examples_since_previous_state"] = 0 self._state_dict["previous_state_example_idx"] = current_idx for key, example in iterator: # If `batched`, first build the batch, if `batch_size` is None or <=0, then the batch is the whole dataset iterator_batch = ( iterator if self.batch_size is None or self.batch_size <= 0 else islice(iterator, self.batch_size - 1) ) key_examples_list = [(key, example)] + list(iterator_batch) keys, examples = zip(*key_examples_list) if ( self.drop_last_batch and self.batch_size is not None and self.batch_size > 0 and len(examples) < self.batch_size
24
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
): # ignore last batch return batch = _examples_to_batch(examples) batch = format_dict(batch) if format_dict else batch # then apply the transform inputs = batch function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns] if self.with_indices: function_args.append([current_idx + i for i in range(len(key_examples_list))]) inputs_to_merge = dict(batch) processed_inputs = self.function(*function_args, **self.fn_kwargs) # this logic mimics the one in Dataset.map if self.remove_columns: for c in self.remove_columns: if c in inputs_to_merge: del inputs_to_merge[c] if processed_inputs is inputs and c in processed_inputs:
24
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
del processed_inputs[c] transformed_batch = {**inputs_to_merge, **processed_inputs} if transformed_batch: first_col = next(iter(transformed_batch)) bad_cols = [ col for col in transformed_batch if len(transformed_batch[col]) != len(transformed_batch[first_col]) ] if bad_cols: raise ValueError( f"Column lengths mismatch: columns {bad_cols} have length {[len(transformed_batch[col]) for col in bad_cols]} " f"while {first_col} has length {len(transformed_batch[first_col])}." ) if self.features: for c in self.features.keys(): if c not in transformed_batch:
24
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
transformed_batch[c] = [None] * len(transformed_batch[first_col]) transformed_batch = self.features.decode_batch(transformed_batch) # the new key is the concatenation of the examples keys from the batch new_key = "_".join(str(key) for key in keys) # yield one example at a time from the transformed batch for example in _batch_to_examples(transformed_batch): current_idx += 1 if self._state_dict: self._state_dict["num_examples_since_previous_state"] += 1 if num_examples_to_skip > 0: num_examples_to_skip -= 1 continue yield new_key, example if self._state_dict: self._state_dict["previous_state"] = self.ex_iterable.state_dict() self._state_dict["num_examples_since_previous_state"] = 0
24
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
self._state_dict["previous_state_example_idx"] = current_idx else: for key, example in iterator: # If not batched, we can apply the transform and yield the example directly # first copy the example, since we might drop some keys example = dict(example) example = format_dict(example) if format_dict else example # then apply the transform inputs = example function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns] if self.with_indices: function_args.append(current_idx) processed_inputs = self.function(*function_args, **self.fn_kwargs) inputs_to_merge = dict(example) # this logic mimics the one in Dataset.map if self.remove_columns: for c in self.remove_columns:
24
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
if c in inputs_to_merge: del inputs_to_merge[c] if processed_inputs is inputs and c in processed_inputs: del processed_inputs[c] transformed_example = {**inputs_to_merge, **processed_inputs} if self.features: for c in self.features.keys(): if c not in transformed_example: transformed_example[c] = None transformed_example = self.features.decode_example(transformed_example) current_idx += 1 if self._state_dict: self._state_dict["previous_state_example_idx"] += 1 yield key, transformed_example
24
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def _iter_arrow(self, max_chunksize: Optional[int] = None) -> Iterator[Tuple[Key, pa.Table]]: if self.ex_iterable.iter_arrow: iterator = self.ex_iterable.iter_arrow() else: iterator = _convert_to_arrow( self.ex_iterable, batch_size=self.batch_size if self.batched else 1, drop_last_batch=self.drop_last_batch, ) if self._state_dict and self._state_dict["previous_state"]: self.ex_iterable.load_state_dict(self._state_dict["previous_state"]) num_examples_to_skip = self._state_dict["num_examples_since_previous_state"] else: num_examples_to_skip = 0 if self._state_dict and max_chunksize is not None: self._state_dict["previous_state"] = self.ex_iterable.state_dict() self._state_dict["num_examples_since_previous_state"] = 0 current_idx = self._state_dict["previous_state_example_idx"] if self._state_dict else 0
24
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
for key, pa_table in iterator: if ( self.batched and self.batch_size is not None and len(pa_table) < self.batch_size and self.drop_last_batch ): return # first build the batch function_args = [pa_table] if self.input_columns is None else [pa_table[col] for col in self.input_columns] if self.with_indices: if self.batched: function_args.append([current_idx + i for i in range(len(pa_table))]) else: function_args.append(current_idx) # then apply the transform output_table = self.function(*function_args, **self.fn_kwargs) if not isinstance(output_table, pa.Table): raise TypeError( f"Provided `function` which is applied to pyarrow tables returns a variable of type "
24
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
f"{type(output_table)}. Make sure provided `function` returns a a pyarrow table to update the dataset." ) # we don't need to merge results for consistency with Dataset.map which merges iif both input and output are dicts # then remove the unwanted columns if self.remove_columns: for column in self.remove_columns: if column in output_table.column_names: output_table = output_table.remove_column(output_table.column_names.index(column)) # return output if max_chunksize is None: current_idx += len(pa_table) if self._state_dict: self._state_dict["previous_state_example_idx"] += len(pa_table) yield key, output_table else: for i, pa_subtable in enumerate(output_table.to_reader(max_chunksize=max_chunksize)): current_idx += 1
24
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
if self._state_dict: self._state_dict["num_examples_since_previous_state"] += 1 if num_examples_to_skip > 0: num_examples_to_skip -= 1 continue yield f"{key}_{i}", pa_subtable if self._state_dict: self._state_dict["previous_state"] = self.ex_iterable.state_dict() self._state_dict["num_examples_since_previous_state"] = 0 self._state_dict["previous_state_example_idx"] += len(pa_table)
24
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def shuffle_data_sources(self, generator: np.random.Generator) -> "MappedExamplesIterable": """Shuffle the wrapped examples iterable.""" return MappedExamplesIterable( self.ex_iterable.shuffle_data_sources(generator), function=self.function, with_indices=self.with_indices, input_columns=self.input_columns, batched=self.batched, batch_size=self.batch_size, drop_last_batch=self.drop_last_batch, remove_columns=self.remove_columns, fn_kwargs=self.fn_kwargs, formatting=self.formatting, features=self.features, )
24
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "MappedExamplesIterable": """Keep only the requested shard.""" return MappedExamplesIterable( self.ex_iterable.shard_data_sources(num_shards, index, contiguous=contiguous), function=self.function, with_indices=self.with_indices, input_columns=self.input_columns, batched=self.batched, batch_size=self.batch_size, drop_last_batch=self.drop_last_batch, remove_columns=self.remove_columns, fn_kwargs=self.fn_kwargs, formatting=self.formatting, features=self.features, ) @property def num_shards(self) -> int: return self.ex_iterable.num_shards
24
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class FilteredExamplesIterable(_BaseExamplesIterable): def __init__( self, ex_iterable: _BaseExamplesIterable, function: Callable, with_indices: bool = False, input_columns: Optional[List[str]] = None, batched: bool = False, batch_size: Optional[int] = 1000, fn_kwargs: Optional[dict] = None, formatting: Optional["FormattingConfig"] = None, ): super().__init__() self.ex_iterable = ex_iterable self.function = function self.batched = batched self.batch_size = batch_size self.with_indices = with_indices self.input_columns = input_columns self.fn_kwargs = fn_kwargs or {} self.formatting = formatting # required for iter_arrow # sanity checks if formatting and formatting.format_type == "arrow": # batch_size should match for iter_arrow if not isinstance(ex_iterable, RebatchedArrowExamplesIterable):
25
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
raise ValueError( "The Arrow-formatted FilteredExamplesIterable has underlying iterable" f"that is a {type(ex_iterable).__name__} instead of a RebatchedArrowExamplesIterable." ) elif ex_iterable.batch_size != (batch_size if batched else 1): raise ValueError( f"The Arrow-formatted FilteredExamplesIterable has batch_size={batch_size if batched else 1} which is" f"different from {ex_iterable.batch_size=} from its underlying iterable." )
25
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
@property def iter_arrow(self): if self.formatting and self.formatting.format_type == "arrow": return self._iter_arrow @property def is_typed(self): return self.ex_iterable.is_typed @property def features(self): return self.ex_iterable.features def _init_state_dict(self) -> dict: self._state_dict = { "ex_iterable": self.ex_iterable._init_state_dict(), "previous_state": None, "num_examples_since_previous_state": 0, "previous_state_example_idx": 0, } return self._state_dict def __iter__(self): if self.formatting and self.formatting.format_type == "arrow": formatter = PythonFormatter() for key, pa_table in self._iter_arrow(max_chunksize=1): yield key, formatter.format_row(pa_table) else: yield from self._iter()
25
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def _iter(self): current_idx = self._state_dict["previous_state_example_idx"] if self._state_dict else 0 if self._state_dict and self._state_dict["previous_state"]: self.ex_iterable.load_state_dict(self._state_dict["previous_state"]) num_examples_to_skip = self._state_dict["num_examples_since_previous_state"] else: num_examples_to_skip = 0 iterator = iter(self.ex_iterable) if self.formatting: formatter = get_formatter(self.formatting.format_type) format_dict = ( formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects ) else: format_dict = None
25
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
if self.batched: if self._state_dict: self._state_dict["previous_state"] = self.ex_iterable.state_dict() self._state_dict["num_examples_since_previous_state"] = 0 self._state_dict["previous_state_example_idx"] = current_idx for key, example in iterator: # If `batched`, first build the batch, if `batch_size` is None or <=0, then the batch is the whole dataset iterator_batch = ( iterator if self.batch_size is None or self.batch_size <= 0 else islice(iterator, self.batch_size - 1) ) key_examples_list = [(key, example)] + list(iterator_batch) keys, examples = zip(*key_examples_list) batch = _examples_to_batch(examples) batch = format_dict(batch) if format_dict else batch # then compute the mask for the batch inputs = batch
25
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns] if self.with_indices: function_args.append([current_idx + i for i in range(len(key_examples_list))]) mask = self.function(*function_args, **self.fn_kwargs) # yield one example at a time from the batch for key_example, to_keep in zip(key_examples_list, mask): current_idx += 1 if self._state_dict: self._state_dict["num_examples_since_previous_state"] += 1 if num_examples_to_skip > 0: num_examples_to_skip -= 1 continue if to_keep: yield key_example if self._state_dict: self._state_dict["previous_state"] = self.ex_iterable.state_dict()
25
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
self._state_dict["num_examples_since_previous_state"] = 0 self._state_dict["previous_state_example_idx"] = current_idx else: for key, example in iterator: # If not batched, we can apply the filtering function direcly example = dict(example) inputs = format_dict(example) if format_dict else example function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns] if self.with_indices: function_args.append(current_idx) to_keep = self.function(*function_args, **self.fn_kwargs) current_idx += 1 if self._state_dict: self._state_dict["previous_state_example_idx"] += 1 if to_keep: yield key, example
25
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def _iter_arrow(self, max_chunksize: Optional[int] = None): if self.ex_iterable.iter_arrow: iterator = self.ex_iterable.iter_arrow() else: iterator = _convert_to_arrow(self.ex_iterable, batch_size=self.batch_size if self.batched else 1)
25
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
if self._state_dict and self._state_dict["previous_state"]: self.ex_iterable.load_state_dict(self._state_dict["previous_state"]) num_examples_to_skip = self._state_dict["num_examples_since_previous_state"] else: num_examples_to_skip = 0 if self._state_dict and max_chunksize is not None: self._state_dict["previous_state"] = self.ex_iterable.state_dict() self._state_dict["num_examples_since_previous_state"] = 0 current_idx = self._state_dict["previous_state_example_idx"] if self._state_dict else 0 for key, pa_table in iterator: if ( self.batched and self.batch_size is not None and len(pa_table) < self.batch_size and self.drop_last_batch ): return
25
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
function_args = [pa_table] if self.input_columns is None else [pa_table[col] for col in self.input_columns] if self.with_indices: if self.batched: function_args.append([current_idx + i for i in range(len(pa_table))]) else: function_args.append(current_idx) # then apply the transform mask = self.function(*function_args, **self.fn_kwargs) # return output if self.batched: output_table = pa_table.filter(mask) elif mask.as_py() if isinstance(mask, pa.BooleanScalar) else mask: output_table = pa_table else: output_table = pa_table.slice(0, 0)
25
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
if max_chunksize is None: current_idx += len(pa_table) if self._state_dict: self._state_dict["previous_state_example_idx"] += len(pa_table) if len(output_table) > 0: yield key, output_table else: for i, pa_subtable in enumerate(output_table.to_reader(max_chunksize=max_chunksize)): current_idx += 1 if self._state_dict: self._state_dict["num_examples_since_previous_state"] += 1 if num_examples_to_skip > 0: num_examples_to_skip -= 1 continue yield f"{key}_{i}", pa_subtable if self._state_dict: self._state_dict["previous_state"] = self.ex_iterable.state_dict() self._state_dict["num_examples_since_previous_state"] = 0
25
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
self._state_dict["previous_state_example_idx"] += len(pa_table)
25
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def shuffle_data_sources(self, seed: Optional[int]) -> "FilteredExamplesIterable": """Shuffle the wrapped examples iterable.""" return FilteredExamplesIterable( self.ex_iterable.shuffle_data_sources(seed), function=self.function, with_indices=self.with_indices, input_columns=self.input_columns, batched=self.batched, batch_size=self.batch_size, formatting=self.formatting, )
25
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "FilteredExamplesIterable": """Keep only the requested shard.""" return FilteredExamplesIterable( self.ex_iterable.shard_data_sources(num_shards, index, contiguous=contiguous), function=self.function, with_indices=self.with_indices, input_columns=self.input_columns, batched=self.batched, batch_size=self.batch_size, formatting=self.formatting, ) @property def num_shards(self) -> int: return self.ex_iterable.num_shards
25
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class BufferShuffledExamplesIterable(_BaseExamplesIterable): def __init__(self, ex_iterable: _BaseExamplesIterable, buffer_size: int, generator: np.random.Generator): super().__init__() self.ex_iterable = ex_iterable self.buffer_size = buffer_size self.generator = generator # TODO(QL): implement iter_arrow @property def is_typed(self): return self.ex_iterable.is_typed @property def features(self): return self.ex_iterable.features def _init_state_dict(self) -> dict: self._state_dict = self.ex_iterable._init_state_dict() self._original_state_dict = self.state_dict() return self._state_dict
26
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def load_state_dict(self, state_dict: dict) -> dict: if self._state_dict: if state_dict != self._original_state_dict: logger.warning( "Loading a state dict of a shuffle buffer of a dataset without the buffer content." "The shuffle buffer will be refilled before starting to yield new examples." ) return super().load_state_dict(state_dict) @staticmethod def _iter_random_indices(rng: np.random.Generator, buffer_size: int, random_batch_size=1000) -> Iterator[int]: while True: yield from (int(i) for i in rng.integers(0, buffer_size, size=random_batch_size))
26
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def __iter__(self): buffer_size = self.buffer_size rng = deepcopy(self.generator) indices_iterator = self._iter_random_indices(rng, buffer_size) # this is the shuffle buffer that we keep in memory mem_buffer = [] for x in self.ex_iterable: if len(mem_buffer) == buffer_size: # if the buffer is full, pick and example from it i = next(indices_iterator) yield mem_buffer[i] mem_buffer[i] = x # replace the picked example by a new one else: # otherwise, keep filling the buffer mem_buffer.append(x) # when we run out of examples, we shuffle the remaining examples in the buffer and yield them rng.shuffle(mem_buffer) yield from mem_buffer
26
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def shuffle_data_sources(self, generator: np.random.Generator) -> "BufferShuffledExamplesIterable": """Shuffle the wrapped examples iterable as well as the shuffling buffer.""" return BufferShuffledExamplesIterable( self.ex_iterable.shuffle_data_sources(generator), buffer_size=self.buffer_size, generator=generator ) def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "BufferShuffledExamplesIterable": """Keep only the requested shard.""" return BufferShuffledExamplesIterable( self.ex_iterable.shard_data_sources(num_shards, index, contiguous=contiguous), buffer_size=self.buffer_size, generator=self.generator, ) @property def num_shards(self) -> int: return self.ex_iterable.num_shards
26
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class SkipExamplesIterable(_BaseExamplesIterable): def __init__( self, ex_iterable: _BaseExamplesIterable, n: int, block_sources_order_when_shuffling: bool = True, split_when_sharding: bool = True, ): super().__init__() self.ex_iterable = ex_iterable self.n = n self.block_sources_order_when_shuffling = block_sources_order_when_shuffling self.split_when_sharding = split_when_sharding # TODO(QL): implement iter_arrow @property def is_typed(self): return self.ex_iterable.is_typed @property def features(self): return self.ex_iterable.features def _init_state_dict(self) -> dict: self._state_dict = {"skipped": False, "ex_iterable": self.ex_iterable._init_state_dict()} return self._state_dict
27
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def __iter__(self): ex_iterable_idx_start = 0 if self._state_dict and self._state_dict["skipped"] else self.n if self._state_dict: self._state_dict["skipped"] = True yield from islice(self.ex_iterable, ex_iterable_idx_start, None) @staticmethod def split_number(num, n): quotient = num // n remainder = num % n result = [quotient] * n for i in range(remainder): result[i] += 1 return result
27
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def shuffle_data_sources(self, generator: np.random.Generator) -> "SkipExamplesIterable": """May not shuffle the wrapped examples iterable since it would skip examples from other shards instead.""" if self.block_sources_order_when_shuffling: return self else: return SkipExamplesIterable( self.ex_iterable.shuffle_data_sources(generator), n=self.n, block_sources_order_when_shuffling=self.block_sources_order_when_shuffling, split_when_sharding=self.split_when_sharding, )
27
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "SkipExamplesIterable": """Keep only the requested shard.""" if self.split_when_sharding: return SkipExamplesIterable( self.ex_iterable.shard_data_sources(num_shards, index, contiguous=contiguous), n=self.split_number(self.n, num_shards)[index], block_sources_order_when_shuffling=self.block_sources_order_when_shuffling, split_when_sharding=self.split_when_sharding, ) else: return self @property def num_shards(self) -> int: return self.ex_iterable.num_shards
27
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class TakeExamplesIterable(_BaseExamplesIterable): def __init__( self, ex_iterable: _BaseExamplesIterable, n: int, block_sources_order_when_shuffling: bool = True, split_when_sharding: bool = True, ): super().__init__() self.ex_iterable = ex_iterable self.n = n self.block_sources_order_when_shuffling = block_sources_order_when_shuffling self.split_when_sharding = split_when_sharding # TODO(QL): implement iter_arrow @property def is_typed(self): return self.ex_iterable.is_typed @property def features(self): return self.ex_iterable.features def _init_state_dict(self) -> dict: self._state_dict = {"num_taken": 0, "ex_iterable": self.ex_iterable._init_state_dict()} return self._state_dict
28
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def __iter__(self): ex_iterable_num_taken = self._state_dict["num_taken"] if self._state_dict else 0 for key_example in islice(self.ex_iterable, self.n - ex_iterable_num_taken): if self._state_dict: self._state_dict["num_taken"] += 1 yield key_example @staticmethod def split_number(num, n): quotient = num // n remainder = num % n result = [quotient] * n for i in range(remainder): result[i] += 1 return result
28
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def shuffle_data_sources(self, generator: np.random.Generator) -> "TakeExamplesIterable": """May not shuffle the wrapped examples iterable since it would take examples from other shards instead.""" if self.block_sources_order_when_shuffling: return self else: return TakeExamplesIterable( self.ex_iterable.shuffle_data_sources(generator), n=self.n, block_sources_order_when_shuffling=self.block_sources_order_when_shuffling, split_when_sharding=self.split_when_sharding, )
28
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "TakeExamplesIterable": """Keep only the requested shard.""" if self.split_when_sharding: return TakeExamplesIterable( self.ex_iterable.shard_data_sources(num_shards, index, contiguous=contiguous), n=self.split_number(self.n, num_shards)[index], block_sources_order_when_shuffling=self.block_sources_order_when_shuffling, split_when_sharding=self.split_when_sharding, ) else: return TakeExamplesIterable( self.ex_iterable.shard_data_sources(num_shards, index, contiguous=contiguous), n=self.n, block_sources_order_when_shuffling=self.block_sources_order_when_shuffling, split_when_sharding=self.split_when_sharding, ) @property def num_shards(self) -> int: return self.ex_iterable.num_shards
28
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class FormattingConfig: format_type: Optional[str] def __post_init__(self): if self.format_type == "pandas": raise NotImplementedError( "The 'pandas' formatting is not implemented for iterable datasets. You can use 'numpy' or 'arrow' instead." )
29
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class FormattedExamplesIterable(_BaseExamplesIterable): def __init__( self, ex_iterable: _BaseExamplesIterable, formatting: Optional[FormattingConfig], features: Optional[Features], token_per_repo_id: Dict[str, Union[str, bool, None]], ): super().__init__() self.ex_iterable = ex_iterable self._features = features self.formatting = formatting self.token_per_repo_id = token_per_repo_id @property def iter_arrow(self): if self.ex_iterable.iter_arrow and (not self.formatting or self.formatting.format_type == "arrow"): return self._iter_arrow @property def is_typed(self): return self.ex_iterable.is_typed or self._features is not None @property def features(self): return self._features def _init_state_dict(self) -> dict: self._state_dict = self.ex_iterable._init_state_dict() return self._state_dict
30
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def __iter__(self): if not self.formatting or self.formatting.format_type == "arrow": formatter = PythonFormatter() else: formatter = get_formatter( self.formatting.format_type, features=self._features if not self.ex_iterable.is_typed else None, token_per_repo_id=self.token_per_repo_id, ) if self.ex_iterable.iter_arrow: # feature casting (inc column addition) handled within self._iter_arrow() for key, pa_table in self._iter_arrow(): batch = formatter.format_batch(pa_table) for example in _batch_to_examples(batch): yield key, example else: format_dict = ( formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects # cast in case features is None ) for key, example in self.ex_iterable:
30
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
# don't apply feature types if already applied by ex_iterable (e.g. in case of chained with_format) if self.features and not self.ex_iterable.is_typed: example = _apply_feature_types_on_example( example, self.features, token_per_repo_id=self.token_per_repo_id ) yield key, format_dict(example)
30
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]: if not self.features: yield from self.ex_iterable._iter_arrow() for key, pa_table in self.ex_iterable._iter_arrow(): columns = set(pa_table.column_names) schema = self.features.arrow_schema # add missing columns for column_name in self.features: if column_name not in columns: col = pa.NullArray.from_buffers(pa.null(), len(pa_table), [None]) pa_table = pa_table.append_column(column_name, col) if pa_table.schema != schema: pa_table = cast_table_to_features(pa_table, self.features) yield key, pa_table
30
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def shuffle_data_sources(self, generator: np.random.Generator) -> "FormattedExamplesIterable": """Shuffle the wrapped examples iterable.""" return FormattedExamplesIterable( self.ex_iterable.shuffle_data_sources(generator), features=self.features, token_per_repo_id=self.token_per_repo_id, formatting=self.formatting, ) def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "FormattedExamplesIterable": """Keep only the requested shard.""" return FormattedExamplesIterable( self.ex_iterable.shard_data_sources(num_shards, index, contiguous=contiguous), features=self.features, token_per_repo_id=self.token_per_repo_id, formatting=self.formatting, ) @property def num_shards(self) -> int: return self.ex_iterable.num_shards
30
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class ShufflingConfig: generator: np.random.Generator _original_seed: Optional[int] = None
31
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class DistributedConfig: rank: int world_size: int
32
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class IterableDataset(DatasetInfoMixin): """A Dataset backed by an iterable.""" def __init__( self, ex_iterable: _BaseExamplesIterable, info: Optional[DatasetInfo] = None, split: Optional[NamedSplit] = None, formatting: Optional[FormattingConfig] = None, shuffling: Optional[ShufflingConfig] = None, distributed: Optional[DistributedConfig] = None, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None, ): if distributed and distributed.world_size > 1 and shuffling and shuffling._original_seed is None: raise RuntimeError( "The dataset doesn't have a fixed random seed across nodes to shuffle and split the list of dataset shards by node. " "Please pass e.g. `seed=42` in `.shuffle()` to make all the nodes use the same seed. " ) info = info.copy() if info is not None else DatasetInfo() DatasetInfoMixin.__init__(self, info=info, split=split)
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
self._ex_iterable = copy.copy(ex_iterable) self._formatting = formatting self._shuffling = shuffling self._distributed = distributed self._token_per_repo_id: Dict[str, Union[str, bool, None]] = token_per_repo_id or {} self._epoch: Union[int, "torch.Tensor"] = _maybe_share_with_torch_persistent_workers(0) self._starting_state_dict: Optional[dict] = None self._prepared_ex_iterable = self._prepare_ex_iterable_for_iteration() self._state_dict = self._prepared_ex_iterable._init_state_dict() _maybe_add_torch_iterable_dataset_parent_class(self.__class__) def state_dict(self) -> dict: """Get the current state_dict of the dataset. It corresponds to the state at the latest example it yielded. Resuming returns exactly where the checkpoint was saved except in two cases:
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
1. examples from shuffle buffers are lost when resuming and the buffers are refilled with new data 2. combinations of `.with_format(arrow)` and batched `.map()` may skip one batch. Returns: `dict` Example: ```py >>> from datasets import Dataset, concatenate_datasets >>> ds = Dataset.from_dict({"a": range(6)}).to_iterable_dataset(num_shards=3) >>> for idx, example in enumerate(ds): ... print(example) ... if idx == 2: ... state_dict = ds.state_dict() ... print("checkpoint") ... break >>> ds.load_state_dict(state_dict) >>> print(f"restart from checkpoint") >>> for example in ds: ... print(example) ``` which returns: ``` {'a': 0} {'a': 1} {'a': 2} checkpoint restart from checkpoint {'a': 3} {'a': 4} {'a': 5} ```
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
```py >>> from torchdata.stateful_dataloader import StatefulDataLoader >>> ds = load_dataset("deepmind/code_contests", streaming=True, split="train") >>> dataloader = StatefulDataLoader(ds, batch_size=32, num_workers=4) >>> # checkpoint >>> state_dict = dataloader.state_dict() # uses ds.state_dict() under the hood >>> # resume from checkpoint >>> dataloader.load_state_dict(state_dict) # uses ds.load_state_dict() under the hood ``` """ return copy.deepcopy(self._state_dict) def load_state_dict(self, state_dict: dict) -> None: """Load the state_dict of the dataset. The iteration will restart at the next example from when the state was saved. Resuming returns exactly where the checkpoint was saved except in two cases:
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
1. examples from shuffle buffers are lost when resuming and the buffers are refilled with new data 2. combinations of `.with_format(arrow)` and batched `.map()` may skip one batch. Example: ```py >>> from datasets import Dataset, concatenate_datasets >>> ds = Dataset.from_dict({"a": range(6)}).to_iterable_dataset(num_shards=3) >>> for idx, example in enumerate(ds): ... print(example) ... if idx == 2: ... state_dict = ds.state_dict() ... print("checkpoint") ... break >>> ds.load_state_dict(state_dict) >>> print(f"restart from checkpoint") >>> for example in ds: ... print(example) ``` which returns: ``` {'a': 0} {'a': 1} {'a': 2} checkpoint restart from checkpoint {'a': 3} {'a': 4} {'a': 5} ```
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
```py >>> from torchdata.stateful_dataloader import StatefulDataLoader >>> ds = load_dataset("deepmind/code_contests", streaming=True, split="train") >>> dataloader = StatefulDataLoader(ds, batch_size=32, num_workers=4) >>> # checkpoint >>> state_dict = dataloader.state_dict() # uses ds.state_dict() under the hood >>> # resume from checkpoint >>> dataloader.load_state_dict(state_dict) # uses ds.load_state_dict() under the hood ``` """ self._prepared_ex_iterable.load_state_dict(state_dict) self._starting_state_dict = state_dict def __repr__(self): return f"IterableDataset({{\n features: {list(self._info.features.keys()) if self._info.features is not None else 'Unknown'},\n num_shards: {self.num_shards}\n}})" def __getstate__(self): return self.__dict__
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def __setstate__(self, d): self.__dict__ = d # Re-add torch shared memory, since shared memory is not always kept when pickling self._epoch = _maybe_share_with_torch_persistent_workers(self._epoch) # Re-add torch iterable dataset as a parent class, since dynamically added parent classes are not kept when pickling _maybe_add_torch_iterable_dataset_parent_class(self.__class__) def _head(self, n=5): return _examples_to_batch(list(self.take(n))) @property def epoch(self) -> int: return int(self._epoch)
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def _effective_generator(self): if self._shuffling and self.epoch == 0: return self._shuffling.generator elif self._shuffling: # Create effective seed using self.epoch (we subtract in order to avoir overflow in long_scalars) effective_seed = deepcopy(self._shuffling.generator).integers(0, 1 << 63) - self.epoch effective_seed = (1 << 63) + effective_seed if effective_seed < 0 else effective_seed return np.random.default_rng(effective_seed) else: raise ValueError("This dataset is not shuffled") @property def num_shards(self) -> int: if self._distributed and self._ex_iterable.num_shards % self._distributed.world_size == 0: return self._ex_iterable.num_shards // self._distributed.world_size return self._ex_iterable.num_shards @property def n_shards(self) -> int: # backward compatibility return self.num_shards
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def _iter_pytorch(self): ex_iterable = self._prepare_ex_iterable_for_iteration() # Fix for fsspec when using multiprocess to avoid hanging in the ML training loop. (only required for fsspec >= 0.9.0) # See https://github.com/fsspec/gcsfs/issues/379 fsspec.asyn.reset_lock() # check if there aren't too many workers import torch.utils.data
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
worker_info = torch.utils.data.get_worker_info() if self._is_main_process() and ex_iterable.num_shards < worker_info.num_workers: logger.warning( f"Too many dataloader workers: {worker_info.num_workers} (max is dataset.num_shards={ex_iterable.num_shards}). " f"Stopping {worker_info.num_workers - ex_iterable.num_shards} dataloader workers." ) logger.info( f"To parallelize data loading, we give each process some shards (or data sources) to process. " f"Therefore it's unnecessary to have a number of workers greater than dataset.num_shards={ex_iterable.num_shards}. " f"To enable more parallelism, please split the dataset in more files than {ex_iterable.num_shards}." ) # split workload _log_prefix = f"node#{self._distributed.rank} " if self._distributed else "" shards_indices = ex_iterable.split_shard_indices_by_worker(
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
num_shards=worker_info.num_workers, index=worker_info.id, contiguous=False ) if shards_indices: logger.debug( f"{_log_prefix}dataloader worker#{worker_info.id}, ': Starting to iterate over {len(shards_indices)}/{ex_iterable.num_shards} shards." ) ex_iterable = ex_iterable.shard_data_sources( num_shards=worker_info.num_workers, index=worker_info.id, contiguous=False ) self._state_dict = ex_iterable._init_state_dict() if self._starting_state_dict: ex_iterable.load_state_dict(self._starting_state_dict)
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
if self._formatting: formatter = get_formatter(self._formatting.format_type, features=self.features) format_dict = ( formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects ) else: format_dict = None
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"): if ex_iterable.iter_arrow: iterator = ex_iterable.iter_arrow() else: iterator = _convert_to_arrow(ex_iterable, batch_size=1) for key, pa_table in iterator: yield formatter.format_row(pa_table) return else: for key, example in ex_iterable: if self.features and not ex_iterable.is_typed: # `IterableDataset` automatically fills missing columns with None. # This is done with `_apply_feature_types_on_example`. example = _apply_feature_types_on_example( example, self.features, token_per_repo_id=self._token_per_repo_id ) yield format_dict(example) if format_dict else example logger.debug(
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
f"{_log_prefix}dataloader worker#{worker_info.id}, ': Finished iterating over {len(shards_indices)}/{ex_iterable.num_shards} shards." ) else: logger.debug( f"{_log_prefix}dataloader worker#{worker_info.id}, ': Stopping... Number of dataset shards < num_workers ({ex_iterable.num_shards}<{worker_info.num_workers})." )
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def _is_main_process(self): if self._distributed and self._distributed.rank > 0: return False if "torch" in sys.modules: import torch.utils.data worker_info = torch.utils.data.get_worker_info() if worker_info is not None and worker_info.id > 0: return False return True def _prepare_ex_iterable_for_iteration( self, batch_size: int = 1, drop_last_batch: bool = False ) -> _BaseExamplesIterable: ex_iterable = self._ex_iterable if self._formatting and (ex_iterable.iter_arrow or self._formatting.format_type == "arrow"): ex_iterable = RebatchedArrowExamplesIterable( ex_iterable, batch_size=batch_size, drop_last_batch=drop_last_batch ) if self._shuffling: ex_iterable = ex_iterable.shuffle_data_sources(self._effective_generator()) else: ex_iterable = ex_iterable
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
if self._distributed: rank = self._distributed.rank world_size = self._distributed.world_size if ex_iterable.num_shards % world_size == 0: if self._is_main_process(): num_shards_per_node = ex_iterable.num_shards // world_size plural = "s" if num_shards_per_node > 1 else "" logger.info( f"Assigning {num_shards_per_node} shard{plural} (or data source{plural}) of the dataset to each node." ) ex_iterable = ex_iterable.shard_data_sources(num_shards=world_size, index=rank, contiguous=False) else: if self._is_main_process(): logger.info( f"Assigning 1 out of {world_size} examples of the dataset to each node. The others are skipped during the iteration." ) logger.info(
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
f"It is more optimized to distribute the dataset shards (or data sources) across nodes. " f"You can do that by using a dataset with number of shards that is a factor of world_size={world_size}. " f"The current dataset has {ex_iterable.num_shards} which is not a factor of {world_size}" ) ex_iterable = StepExamplesIterable(ex_iterable, step=world_size, offset=rank)
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
self._state_dict = ex_iterable._init_state_dict() if self._starting_state_dict: ex_iterable.load_state_dict(self._starting_state_dict) return ex_iterable def __iter__(self): if "torch" in sys.modules: import torch.utils.data worker_info = torch.utils.data.get_worker_info() if isinstance(self, torch.utils.data.IterableDataset) and worker_info is not None: # We're a torch.utils.data.IterableDataset in a PyTorch worker process yield from self._iter_pytorch() return ex_iterable = self._prepare_ex_iterable_for_iteration() if self._formatting: formatter = get_formatter(self._formatting.format_type, features=self.features) format_dict = ( formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects ) else: format_dict = None
33
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py