text
stringlengths 1
1.02k
| class_index
int64 0
271
| source
stringclasses 76
values |
---|---|---|
def _generate_tables(self, files):
pa_table_names = list(self.config.features) if self.config.features is not None else ["text"]
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
# open in text mode, by default translates universal newlines ("\n", "\r\n" and "\r") into "\n"
with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
if self.config.sample_by == "line":
batch_idx = 0
while True:
batch = f.read(self.config.chunksize)
if not batch:
break
batch += f.readline() # finish current line
# StringIO.readlines, by default splits only on "\n" (and keeps line breaks)
batch = StringIO(batch).readlines()
if not self.config.keep_linebreaks: | 181 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/text/text.py |
batch = [line.rstrip("\n") for line in batch]
pa_table = pa.Table.from_arrays([pa.array(batch)], names=pa_table_names)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(pa_table)
batch_idx += 1
elif self.config.sample_by == "paragraph":
batch_idx = 0
batch = ""
while True:
new_batch = f.read(self.config.chunksize)
if not new_batch:
break
batch += new_batch
batch += f.readline() # finish current line | 181 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/text/text.py |
batch = batch.split("\n\n")
pa_table = pa.Table.from_arrays(
[pa.array([example for example in batch[:-1] if example])], names=pa_table_names
)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(pa_table)
batch_idx += 1
batch = batch[-1]
if batch:
pa_table = pa.Table.from_arrays([pa.array([batch])], names=pa_table_names)
yield (file_idx, batch_idx), self._cast_table(pa_table)
elif self.config.sample_by == "document": | 181 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/text/text.py |
text = f.read()
pa_table = pa.Table.from_arrays([pa.array([text])], names=pa_table_names)
yield file_idx, self._cast_table(pa_table) | 181 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/text/text.py |
class ParquetConfig(datasets.BuilderConfig):
"""BuilderConfig for Parquet."""
batch_size: Optional[int] = None
columns: Optional[List[str]] = None
features: Optional[datasets.Features] = None
filters: Optional[Union[ds.Expression, List[tuple], List[List[tuple]]]] = None
def __post_init__(self):
super().__post_init__() | 182 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/parquet/parquet.py |
class Parquet(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = ParquetConfig
def _info(self):
if (
self.config.columns is not None
and self.config.features is not None
and set(self.config.columns) != set(self.config.features)
):
raise ValueError(
"The columns and features argument must contain the same columns, but got ",
f"{self.config.columns} and {self.config.features}",
)
return datasets.DatasetInfo(features=self.config.features) | 183 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/parquet/parquet.py |
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
data_files = dl_manager.download_and_extract(self.config.data_files)
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
# Infer features if they are stored in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(files):
with open(file, "rb") as f: | 183 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/parquet/parquet.py |
self.info.features = datasets.Features.from_arrow_schema(pq.read_schema(f))
break
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
if self.config.columns is not None and set(self.config.columns) != set(self.info.features):
self.info.features = datasets.Features(
{col: feat for col, feat in self.info.features.items() if col in self.config.columns}
)
return splits | 183 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/parquet/parquet.py |
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.info.features.arrow_schema)
return pa_table | 183 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/parquet/parquet.py |
def _generate_tables(self, files):
if self.config.features is not None and self.config.columns is not None:
if sorted(field.name for field in self.info.features.arrow_schema) != sorted(self.config.columns):
raise ValueError(
f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'"
)
filter_expr = (
pq.filters_to_expression(self.config.filters)
if isinstance(self.config.filters, list)
else self.config.filters
)
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
parquet_fragment = ds.ParquetFileFormat().make_fragment(f)
if parquet_fragment.row_groups:
batch_size = self.config.batch_size or parquet_fragment.row_groups[0].num_rows
try: | 183 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/parquet/parquet.py |
for batch_idx, record_batch in enumerate(
parquet_fragment.to_batches(
batch_size=batch_size,
columns=self.config.columns,
filter=filter_expr,
batch_readahead=0,
fragment_readahead=0,
)
):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
except ValueError as e: | 183 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/parquet/parquet.py |
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise | 183 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/parquet/parquet.py |
class VideoFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""BuilderConfig for ImageFolder."""
drop_labels: bool = None
drop_metadata: bool = None
def __post_init__(self):
super().__post_init__() | 184 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/videofolder/videofolder.py |
class VideoFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Video
BASE_COLUMN_NAME = "video"
BUILDER_CONFIG_CLASS = VideoFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script | 185 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/videofolder/videofolder.py |
class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""Builder Config for AudioFolder."""
drop_labels: bool = None
drop_metadata: bool = None
def __post_init__(self):
super().__post_init__() | 186 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/audiofolder/audiofolder.py |
class AudioFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Audio
BASE_COLUMN_NAME = "audio"
BUILDER_CONFIG_CLASS = AudioFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script | 187 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/audiofolder/audiofolder.py |
class SparkConfig(datasets.BuilderConfig):
"""BuilderConfig for Spark."""
features: Optional[datasets.Features] = None
def __post_init__(self):
super().__post_init__() | 188 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/spark/spark.py |
class SparkExamplesIterable(_BaseExamplesIterable):
def __init__(
self,
df: "pyspark.sql.DataFrame",
partition_order=None,
):
super().__init__()
self.df = df
self.partition_order = partition_order or range(self.df.rdd.getNumPartitions())
def _init_state_dict(self) -> dict:
self._state_dict = {"partition_idx": 0, "partition_example_idx": 0}
return self._state_dict
@experimental
def load_state_dict(self, state_dict: dict) -> dict:
return super().load_state_dict(state_dict)
def __iter__(self):
yield from _generate_iterable_examples(self.df, self.partition_order, self._state_dict)
def shuffle_data_sources(self, generator: np.random.Generator) -> "SparkExamplesIterable":
partition_order = list(range(self.df.rdd.getNumPartitions()))
generator.shuffle(partition_order)
return SparkExamplesIterable(self.df, partition_order=partition_order) | 189 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/spark/spark.py |
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "SparkExamplesIterable":
partition_order = self.split_shard_indices_by_worker(num_shards=num_shards, index=index, contiguous=contiguous)
return SparkExamplesIterable(self.df, partition_order=partition_order)
@property
def num_shards(self) -> int:
return len(self.partition_order) | 189 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/spark/spark.py |
class Spark(datasets.DatasetBuilder):
BUILDER_CONFIG_CLASS = SparkConfig
def __init__(
self,
df: "pyspark.sql.DataFrame",
cache_dir: str = None,
working_dir: str = None,
**config_kwargs,
):
import pyspark
self._spark = pyspark.sql.SparkSession.builder.getOrCreate()
self.df = df
self._working_dir = working_dir
super().__init__(
cache_dir=cache_dir,
config_name=str(self.df.semanticHash()),
**config_kwargs,
)
def _validate_cache_dir(self):
# Define this so that we don't reference self in create_cache_and_write_probe, which will result in a pickling
# error due to pickling the SparkContext.
cache_dir = self._cache_dir | 190 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/spark/spark.py |
# Returns the path of the created file.
def create_cache_and_write_probe(context):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(cache_dir, exist_ok=True)
probe_file = os.path.join(cache_dir, "fs_test" + uuid.uuid4().hex)
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(probe_file, "a")
return [probe_file]
if self._spark.conf.get("spark.master", "").startswith("local"):
return | 190 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/spark/spark.py |
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
probe = (
self._spark.sparkContext.parallelize(range(1), 1).mapPartitions(create_cache_and_write_probe).collect()
)
if os.path.isfile(probe[0]):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir"
)
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager: datasets.download.download_manager.DownloadManager):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN)]
def _repartition_df_if_needed(self, max_shard_size):
import pyspark | 190 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/spark/spark.py |
def get_arrow_batch_size(it):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]}) | 190 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/spark/spark.py |
df_num_rows = self.df.count()
sample_num_rows = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
approx_bytes_per_row = (
self.df.limit(sample_num_rows)
.repartition(1)
.mapInArrow(get_arrow_batch_size, "batch_bytes: long")
.agg(pyspark.sql.functions.sum("batch_bytes").alias("sample_bytes"))
.collect()[0]
.sample_bytes
/ sample_num_rows
)
approx_total_size = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
new_num_partitions = min(df_num_rows, int(approx_total_size / max_shard_size))
self.df = self.df.repartition(new_num_partitions) | 190 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/spark/spark.py |
def _prepare_split_single(
self,
fpath: str,
file_format: str,
max_shard_size: int,
) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
working_fpath = os.path.join(self._working_dir, os.path.basename(fpath)) if self._working_dir else fpath
embed_local_files = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
features = self.config.features
writer_batch_size = self._writer_batch_size
storage_options = self._fs.storage_options | 190 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/spark/spark.py |
def write_arrow(it):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
task_id = pyspark.TaskContext().taskAttemptId()
first_batch = next(it, None)
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]],
names=["task_id", "num_examples", "num_bytes"],
)
shard_id = 0
writer = writer_class(
features=features,
path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
writer_batch_size=writer_batch_size,
storage_options=storage_options,
embed_local_files=embed_local_files,
)
table = pa.Table.from_batches([first_batch])
writer.write_table(table)
for batch in it: | 190 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/spark/spark.py |
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
num_examples, num_bytes = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]],
names=["task_id", "num_examples", "num_bytes"],
)
shard_id += 1
writer = writer_class(
features=writer._features,
path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
writer_batch_size=writer_batch_size,
storage_options=storage_options,
embed_local_files=embed_local_files,
)
table = pa.Table.from_batches([batch])
writer.write_table(table) | 190 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/spark/spark.py |
if writer._num_bytes > 0:
num_examples, num_bytes = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]],
names=["task_id", "num_examples", "num_bytes"],
)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(working_fpath)):
dest = os.path.join(os.path.dirname(fpath), os.path.basename(file))
shutil.move(file, dest) | 190 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/spark/spark.py |
stats = (
self.df.mapInArrow(write_arrow, "task_id: long, num_examples: long, num_bytes: long")
.groupBy("task_id")
.agg(
pyspark.sql.functions.sum("num_examples").alias("total_num_examples"),
pyspark.sql.functions.sum("num_bytes").alias("total_num_bytes"),
pyspark.sql.functions.count("num_bytes").alias("num_shards"),
pyspark.sql.functions.collect_list("num_examples").alias("shard_lengths"),
)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _prepare_split(
self,
split_generator: "datasets.SplitGenerator",
file_format: str = "arrow",
max_shard_size: Optional[Union[str, int]] = None,
num_proc: Optional[int] = None,
**kwargs,
):
self._validate_cache_dir() | 190 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/spark/spark.py |
max_shard_size = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE)
self._repartition_df_if_needed(max_shard_size)
is_local = not is_remote_filesystem(self._fs)
path_join = os.path.join if is_local else posixpath.join
SUFFIX = "-TTTTT-SSSSS-of-NNNNN"
fname = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
fpath = path_join(self._output_dir, fname)
total_num_examples = 0
total_num_bytes = 0
total_shards = 0
task_id_and_num_shards = []
all_shard_lengths = [] | 190 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/spark/spark.py |
for task_id, content in self._prepare_split_single(fpath, file_format, max_shard_size):
(
num_examples,
num_bytes,
num_shards,
shard_lengths,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards))
all_shard_lengths.extend(shard_lengths)
split_generator.split_info.num_examples = total_num_examples
split_generator.split_info.num_bytes = total_num_bytes
# should rename everything at the end
logger.debug(f"Renaming {total_shards} shards.")
if total_shards > 1:
split_generator.split_info.shard_lengths = all_shard_lengths | 190 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/spark/spark.py |
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
fs = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
task_id: int,
shard_id: int,
global_shard_id: int,
):
rename(
fs,
fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
fpath.replace("TTTTT-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
) | 190 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/spark/spark.py |
args = []
global_shard_id = 0
for i in range(len(task_id_and_num_shards)):
task_id, num_shards = task_id_and_num_shards[i]
for shard_id in range(num_shards):
args.append([task_id, shard_id, global_shard_id])
global_shard_id += 1
self._spark.sparkContext.parallelize(args, len(args)).map(lambda args: _rename_shard(*args)).collect()
else:
# don't use any pattern
shard_id = 0
task_id = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
fpath.replace(SUFFIX, ""),
)
def _get_examples_iterable_for_split(
self,
split_generator: "datasets.SplitGenerator",
) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df) | 190 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/spark/spark.py |
class PandasConfig(datasets.BuilderConfig):
"""BuilderConfig for Pandas."""
features: Optional[datasets.Features] = None
def __post_init__(self):
super().__post_init__() | 191 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/pandas/pandas.py |
class Pandas(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = PandasConfig
def _info(self):
warnings.warn(
"The Pandas builder is deprecated and will be removed in the next major version of datasets.",
FutureWarning,
)
return datasets.DatasetInfo(features=self.config.features) | 192 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/pandas/pandas.py |
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive | 192 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/pandas/pandas.py |
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits | 192 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/pandas/pandas.py |
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.config.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
for i, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
pa_table = pa.Table.from_pandas(pd.read_pickle(f))
yield i, self._cast_table(pa_table) | 192 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/pandas/pandas.py |
class SqlConfig(datasets.BuilderConfig):
"""BuilderConfig for SQL."""
sql: Union[str, "sqlalchemy.sql.Selectable"] = None
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None
index_col: Optional[Union[str, List[str]]] = None
coerce_float: bool = True
params: Optional[Union[List, Tuple, Dict]] = None
parse_dates: Optional[Union[List, Dict]] = None
columns: Optional[List[str]] = None
chunksize: Optional[int] = 10_000
features: Optional[datasets.Features] = None
def __post_init__(self):
super().__post_init__()
if self.sql is None:
raise ValueError("sql must be specified")
if self.con is None:
raise ValueError("con must be specified") | 193 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/sql/sql.py |
def create_config_id(
self,
config_kwargs: dict,
custom_features: Optional[datasets.Features] = None,
) -> str:
config_kwargs = config_kwargs.copy()
# We need to stringify the Selectable object to make its hash deterministic
# The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html
sql = config_kwargs["sql"]
if not isinstance(sql, str):
if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules:
import sqlalchemy | 193 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/sql/sql.py |
if isinstance(sql, sqlalchemy.sql.Selectable):
engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://")
sql_str = str(sql.compile(dialect=engine.dialect))
config_kwargs["sql"] = sql_str
else:
raise TypeError(
f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
)
else:
raise TypeError(
f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
)
con = config_kwargs["con"]
if not isinstance(con, str):
config_kwargs["con"] = id(con)
logger.info(
f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead."
) | 193 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/sql/sql.py |
return super().create_config_id(config_kwargs, custom_features=custom_features)
@property
def pd_read_sql_kwargs(self):
pd_read_sql_kwargs = {
"index_col": self.index_col,
"columns": self.columns,
"params": self.params,
"coerce_float": self.coerce_float,
"parse_dates": self.parse_dates,
}
return pd_read_sql_kwargs | 193 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/sql/sql.py |
class Sql(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = SqlConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})]
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
schema = self.config.features.arrow_schema
if all(not require_storage_cast(feature) for feature in self.config.features.values()):
# cheaper cast
pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, schema)
return pa_table | 194 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/sql/sql.py |
def _generate_tables(self):
chunksize = self.config.chunksize
sql_reader = pd.read_sql(
self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs
)
sql_reader = [sql_reader] if chunksize is None else sql_reader
for chunk_idx, df in enumerate(sql_reader):
pa_table = pa.Table.from_pandas(df)
yield chunk_idx, self._cast_table(pa_table) | 194 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/packaged_modules/sql/sql.py |
class DatasetViewerError(DatasetsError):
"""Dataset viewer error.
Raised when trying to use the dataset viewer HTTP API and when trying to access:
- a missing dataset, or
- a private/gated dataset and the user is not authenticated.
- unavailable /parquet or /info responses
""" | 195 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/_dataset_viewer.py |
class _NoDuplicateSafeLoader(yaml.SafeLoader):
def _check_no_duplicates_on_constructed_node(self, node):
keys = [self.constructed_objects[key_node] for key_node, _ in node.value]
keys = [tuple(key) if isinstance(key, list) else key for key in keys]
counter = Counter(keys)
duplicate_keys = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}")
def construct_mapping(self, node, deep=False):
mapping = super().construct_mapping(node, deep=deep)
self._check_no_duplicates_on_constructed_node(node)
return mapping | 196 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/metadata.py |
class MetadataConfigs(Dict[str, Dict[str, Any]]):
"""Should be in format {config_name: {**config_params}}."""
FIELD_NAME: ClassVar[str] = METADATA_CONFIGS_FIELD
@staticmethod
def _raise_if_data_files_field_not_valid(metadata_config: dict):
yaml_data_files = metadata_config.get("data_files")
if yaml_data_files is not None:
yaml_error_message = textwrap.dedent(
f"""
Expected data_files in YAML to be either a string or a list of strings
or a list of dicts with two keys: 'split' and 'path', but got {yaml_data_files}
Examples of data_files in YAML:
data_files: data.csv
data_files: data/*.png
data_files:
- part0/*
- part1/*
data_files:
- split: train
path: train/*
- split: test
path: test/* | 197 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/metadata.py |
data_files:
- split: train
path:
- train/part1/*
- train/part2/*
- split: test
path: test/* | 197 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/metadata.py |
PS: some symbols like dashes '-' are not allowed in split names
"""
)
if not isinstance(yaml_data_files, (list, str)):
raise ValueError(yaml_error_message)
if isinstance(yaml_data_files, list):
for yaml_data_files_item in yaml_data_files:
if (
not isinstance(yaml_data_files_item, (str, dict))
or isinstance(yaml_data_files_item, dict)
and not (
len(yaml_data_files_item) == 2
and "split" in yaml_data_files_item
and re.match(_split_re, yaml_data_files_item["split"])
and isinstance(yaml_data_files_item.get("path"), (str, list))
)
):
raise ValueError(yaml_error_message) | 197 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/metadata.py |
@classmethod
def _from_exported_parquet_files_and_dataset_infos(
cls,
parquet_commit_hash: str,
exported_parquet_files: List[Dict[str, Any]],
dataset_infos: DatasetInfosDict,
) -> "MetadataConfigs":
metadata_configs = {
config_name: {
"data_files": [
{
"split": split_name,
"path": [
parquet_file["url"].replace("refs%2Fconvert%2Fparquet", parquet_commit_hash)
for parquet_file in parquet_files_for_split
],
}
for split_name, parquet_files_for_split in groupby(parquet_files_for_config, itemgetter("split"))
],
"version": str(dataset_infos.get(config_name, DatasetInfo()).version or "0.0.0"),
}
for config_name, parquet_files_for_config in groupby(exported_parquet_files, itemgetter("config")) | 197 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/metadata.py |
}
if dataset_infos:
# Preserve order of configs and splits
metadata_configs = {
config_name: {
"data_files": [
data_file
for split_name in dataset_info.splits
for data_file in metadata_configs[config_name]["data_files"]
if data_file["split"] == split_name
],
"version": metadata_configs[config_name]["version"],
}
for config_name, dataset_info in dataset_infos.items()
}
return cls(metadata_configs) | 197 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/metadata.py |
@classmethod
def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "MetadataConfigs":
if dataset_card_data.get(cls.FIELD_NAME):
metadata_configs = dataset_card_data[cls.FIELD_NAME]
if not isinstance(metadata_configs, list):
raise ValueError(f"Expected {cls.FIELD_NAME} to be a list, but got '{metadata_configs}'")
for metadata_config in metadata_configs:
if "config_name" not in metadata_config:
raise ValueError(
f"Each config must include `config_name` field with a string name of a config, "
f"but got {metadata_config}. "
)
cls._raise_if_data_files_field_not_valid(metadata_config)
return cls(
{
config.pop("config_name"): {
param: value if param != "features" else Features._from_yaml_list(value) | 197 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/metadata.py |
for param, value in config.items()
}
for metadata_config in metadata_configs
if (config := metadata_config.copy())
}
)
return cls() | 197 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/metadata.py |
def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None:
if self:
for metadata_config in self.values():
self._raise_if_data_files_field_not_valid(metadata_config)
current_metadata_configs = self.from_dataset_card_data(dataset_card_data)
total_metadata_configs = dict(sorted({**current_metadata_configs, **self}.items()))
for config_name, config_metadata in total_metadata_configs.items():
config_metadata.pop("config_name", None)
dataset_card_data[self.FIELD_NAME] = [
{"config_name": config_name, **config_metadata}
for config_name, config_metadata in total_metadata_configs.items()
] | 197 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/metadata.py |
def get_default_config_name(self) -> Optional[str]:
default_config_name = None
for config_name, metadata_config in self.items():
if len(self) == 1 or config_name == "default" or metadata_config.get("default"):
if default_config_name is None:
default_config_name = config_name
else:
raise ValueError(
f"Dataset has several default configs: '{default_config_name}' and '{config_name}'."
)
return default_config_name | 197 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/metadata.py |
class Version:
"""Dataset version `MAJOR.MINOR.PATCH`.
Args:
version_str (`str`):
The dataset version.
description (`str`):
A description of what is new in this version.
major (`str`):
minor (`str`):
patch (`str`):
Example:
```py
>>> VERSION = datasets.Version("1.0.0")
```
"""
version_str: str
description: Optional[str] = None
major: Optional[Union[str, int]] = None
minor: Optional[Union[str, int]] = None
patch: Optional[Union[str, int]] = None
def __post_init__(self):
self.major, self.minor, self.patch = _str_to_version_tuple(self.version_str)
def __repr__(self):
return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def tuple(self):
return self.major, self.minor, self.patch | 198 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/version.py |
def _validate_operand(self, other):
if isinstance(other, str):
return Version(other)
elif isinstance(other, Version):
return other
raise TypeError(f"{other} (type {type(other)}) cannot be compared to version.")
def __eq__(self, other):
try:
other = self._validate_operand(other)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__(self, other):
other = self._validate_operand(other)
return self.tuple < other.tuple
def __hash__(self):
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def from_dict(cls, dic):
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def _to_yaml_string(self) -> str:
return self.version_str | 198 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/version.py |
class tracked_str(str):
origins = {}
def set_origin(self, origin: str):
if super().__repr__() not in self.origins:
self.origins[super().__repr__()] = origin
def get_origin(self):
return self.origins.get(super().__repr__(), str(self))
def __repr__(self) -> str:
if super().__repr__() not in self.origins or self.origins[super().__repr__()] == self:
return super().__repr__()
else:
return f"{str(self)} (origin={self.origins[super().__repr__()]})" | 199 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/track.py |
class tracked_list(list):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.last_item = None
def __iter__(self) -> Iterator:
for x in super().__iter__():
self.last_item = x
yield x
self.last_item = None
def __repr__(self) -> str:
if self.last_item is None:
return super().__repr__()
else:
return f"{self.__class__.__name__}(current={self.last_item})" | 200 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/track.py |
class TrackedIterableFromGenerator(Iterable):
"""Utility class to create an iterable from a generator function, in order to reset the generator when needed."""
def __init__(self, generator, *args):
super().__init__()
self.generator = generator
self.args = args
self.last_item = None
def __iter__(self):
for x in self.generator(*self.args):
self.last_item = x
yield x
self.last_item = None
def __repr__(self) -> str:
if self.last_item is None:
return super().__repr__()
else:
return f"{self.__class__.__name__}(current={self.last_item})"
def __reduce__(self):
return (self.__class__, (self.generator, *self.args)) | 201 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/track.py |
class NonMutableDict(dict):
"""Dict where keys can only be added but not modified.
Will raise an error if the user try to overwrite one key. The error message
can be customized during construction. It will be formatted using {key} for
the overwritten key.
"""
def __init__(self, *args, **kwargs):
self._error_msg = kwargs.pop(
"error_msg",
"Try to overwrite existing key: {key}",
)
if kwargs:
raise ValueError("NonMutableDict cannot be initialized with kwargs.")
super().__init__(*args, **kwargs)
def __setitem__(self, key, value):
if key in self:
raise ValueError(self._error_msg.format(key=key))
return super().__setitem__(key, value)
def update(self, other):
if any(k in self for k in other):
raise ValueError(self._error_msg.format(key=set(self) & set(other)))
return super().update(other) | 202 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/py_utils.py |
class classproperty(property): # pylint: disable=invalid-name
"""Descriptor to be used as decorator for @classmethods."""
def __get__(self, obj, objtype=None):
return self.fget.__get__(None, objtype)() | 203 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/py_utils.py |
class NestedDataStructure:
def __init__(self, data=None):
self.data = data if data is not None else []
def flatten(self, data=None):
data = data if data is not None else self.data
if isinstance(data, dict):
return self.flatten(list(data.values()))
elif isinstance(data, (list, tuple)):
return [flattened for item in data for flattened in self.flatten(item)]
else:
return [data] | 204 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/py_utils.py |
class tqdm(old_tqdm):
"""
Class to override `disable` argument in case progress bars are globally disabled.
Taken from https://github.com/tqdm/tqdm/issues/619#issuecomment-619639324.
"""
def __init__(self, *args, **kwargs):
if are_progress_bars_disabled():
kwargs["disable"] = True
super().__init__(*args, **kwargs)
def __delattr__(self, attr: str) -> None:
"""Fix for https://github.com/huggingface/datasets/issues/6066"""
try:
super().__delattr__(attr)
except AttributeError:
if attr != "_lock":
raise | 205 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/tqdm.py |
class _PatchedModuleObj:
"""Set all the modules components as attributes of the _PatchedModuleObj object."""
def __init__(self, module, attrs=None):
attrs = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__"):
setattr(self, key, getattr(module, key))
self._original_module = module._original_module if isinstance(module, _PatchedModuleObj) else module | 206 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/patching.py |
class patch_submodule:
"""
Patch a submodule attribute of an object, by keeping all other submodules intact at all levels.
Example::
>>> import importlib
>>> from datasets.load import dataset_module_factory
>>> from datasets.streaming import patch_submodule, xjoin
>>>
>>> dataset_module = dataset_module_factory("snli")
>>> snli_module = importlib.import_module(dataset_module.module_path)
>>> patcher = patch_submodule(snli_module, "os.path.join", xjoin)
>>> patcher.start()
>>> assert snli_module.os.path.join is xjoin
"""
_active_patches = []
def __init__(self, obj, target: str, new, attrs=None):
self.obj = obj
self.target = target
self.new = new
self.key = target.split(".")[0]
self.original = {}
self.attrs = attrs or []
def __enter__(self):
*submodules, target_attr = self.target.split(".") | 207 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/patching.py |
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path" | 207 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/patching.py |
for i in range(len(submodules)):
try:
submodule = import_module(".".join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
obj_attr = getattr(self.obj, attr)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
isinstance(obj_attr, _PatchedModuleObj) and obj_attr._original_module is submodule
):
self.original[attr] = obj_attr
# patch at top level
setattr(self.obj, attr, _PatchedModuleObj(obj_attr, attrs=self.attrs))
patched = getattr(self.obj, attr) | 207 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/patching.py |
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(patched, key, _PatchedModuleObj(getattr(patched, key, None), attrs=self.attrs))
patched = getattr(patched, key)
# finally set the target attribute
setattr(patched, target_attr, self.new) | 207 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/patching.py |
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join". | 207 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/patching.py |
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
attr_value = getattr(import_module(".".join(submodules)), target_attr)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj, attr) is attr_value:
self.original[attr] = getattr(self.obj, attr)
setattr(self.obj, attr, self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
self.original[target_attr] = globals()["__builtins__"][target_attr]
setattr(self.obj, target_attr, self.new)
else: | 207 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/patching.py |
raise RuntimeError(f"Tried to patch attribute {target_attr} instead of a submodule.") | 207 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/patching.py |
def __exit__(self, *exc_info):
for attr in list(self.original):
setattr(self.obj, attr, self.original.pop(attr))
def start(self):
"""Activate a patch."""
self.__enter__()
self._active_patches.append(self)
def stop(self):
"""Stop an active patch."""
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 207 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/patching.py |
class FileLock(FileLock_):
"""
A `filelock.FileLock` initializer that handles long paths.
It also uses the current umask for lock files.
"""
MAX_FILENAME_LENGTH = 255
def __init__(self, lock_file, *args, **kwargs):
# The "mode" argument is required if we want to use the current umask in filelock >= 3.10
# In previous previous it was already using the current umask.
if "mode" not in kwargs and version.parse(_filelock_version) >= version.parse("3.10.0"):
umask = os.umask(0o666)
os.umask(umask)
kwargs["mode"] = 0o666 & ~umask
lock_file = self.hash_filename_if_too_long(lock_file)
super().__init__(lock_file, *args, **kwargs) | 208 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/_filelock.py |
@classmethod
def hash_filename_if_too_long(cls, path: str) -> str:
path = os.path.abspath(os.path.expanduser(path))
filename = os.path.basename(path)
max_filename_length = cls.MAX_FILENAME_LENGTH
if issubclass(cls, UnixFileLock):
max_filename_length = min(max_filename_length, os.statvfs(os.path.dirname(path)).f_namemax)
if len(filename) > max_filename_length:
dirname = os.path.dirname(path)
hashed_filename = str(hash(filename))
new_filename = (
filename[: max_filename_length - len(hashed_filename) - 8] + "..." + hashed_filename + ".lock"
)
return os.path.join(dirname, new_filename)
else:
return path | 208 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/_filelock.py |
class TqdmCallback(fsspec.callbacks.TqdmCallback):
def __init__(self, tqdm_kwargs=None, *args, **kwargs):
if config.FSSPEC_VERSION < version.parse("2024.2.0"):
super().__init__(tqdm_kwargs, *args, **kwargs)
self._tqdm = _tqdm # replace tqdm module by datasets.utils.tqdm module
else:
kwargs["tqdm_cls"] = _tqdm.tqdm
super().__init__(tqdm_kwargs, *args, **kwargs) | 209 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/file_utils.py |
class NonStreamableDatasetError(Exception):
pass | 210 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/file_utils.py |
class xPath(type(Path())):
"""Extension of `pathlib.Path` to support both local paths and remote URLs."""
def __str__(self):
path_str = super().__str__()
main_hop, *rest_hops = path_str.split("::")
if is_local_path(main_hop):
return main_hop
path_as_posix = path_str.replace("\\", "/")
path_as_posix = SINGLE_SLASH_AFTER_PROTOCOL_PATTERN.sub("://", path_as_posix)
path_as_posix += "//" if path_as_posix.endswith(":") else "" # Add slashes to root of the protocol
return path_as_posix
def exists(self, download_config: Optional[DownloadConfig] = None):
"""Extend `pathlib.Path.exists` method to support both local and remote files.
Args:
download_config : mainly use token or storage_options to support different platforms and auth types.
Returns:
`bool`
"""
return xexists(str(self), download_config=download_config) | 211 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/file_utils.py |
def glob(self, pattern, download_config: Optional[DownloadConfig] = None):
"""Glob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Args:
pattern (`str`): Pattern that resulting paths must match.
download_config : mainly use token or storage_options to support different platforms and auth types. | 211 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/file_utils.py |
Yields:
[`xPath`]
"""
posix_path = self.as_posix()
main_hop, *rest_hops = posix_path.split("::")
if is_local_path(main_hop):
yield from Path(main_hop).glob(pattern)
else:
# globbing inside a zip in a private repo requires authentication
if rest_hops:
urlpath = rest_hops[0]
urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
storage_options = {urlpath.split("://")[0]: storage_options}
posix_path = "::".join([main_hop, urlpath, *rest_hops[1:]])
else:
storage_options = None
fs, *_ = url_to_fs(xjoin(posix_path, pattern), **(storage_options or {}))
globbed_paths = fs.glob(xjoin(main_hop, pattern))
for globbed_path in globbed_paths:
yield type(self)("::".join([f"{fs.protocol}://{globbed_path}"] + rest_hops)) | 211 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/file_utils.py |
def rglob(self, pattern, **kwargs):
"""Rglob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Args:
pattern (`str`): Pattern that resulting paths must match.
Yields:
[`xPath`]
"""
return self.glob("**/" + pattern, **kwargs)
@property
def parent(self) -> "xPath":
"""Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Returns:
[`xPath`]
"""
return type(self)(xdirname(self.as_posix()))
@property
def name(self) -> str:
"""Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Returns:
`str`
"""
return PurePosixPath(self.as_posix().split("::")[0]).name | 211 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/file_utils.py |
@property
def stem(self) -> str:
"""Stem function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Returns:
`str`
"""
return PurePosixPath(self.as_posix().split("::")[0]).stem
@property
def suffix(self) -> str:
"""Suffix function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
Returns:
`str`
"""
return PurePosixPath(self.as_posix().split("::")[0]).suffix
def open(self, *args, **kwargs):
"""Extend :func:`xopen` to support argument of type :obj:`~pathlib.Path`.
Args:
**args: Arguments passed to :func:`fsspec.open`.
**kwargs: Keyword arguments passed to :func:`fsspec.open`.
Returns:
`io.FileIO`: File-like object.
"""
return xopen(str(self), *args, **kwargs) | 211 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/file_utils.py |
def joinpath(self, *p: Tuple[str, ...]) -> "xPath":
"""Extend :func:`xjoin` to support argument of type :obj:`~pathlib.Path`.
Args:
*p (`tuple` of `str`): Other path components.
Returns:
[`xPath`]
"""
return type(self)(xjoin(self.as_posix(), *p))
def __truediv__(self, p: str) -> "xPath":
return self.joinpath(p)
def with_suffix(self, suffix):
main_hop, *rest_hops = str(self).split("::")
if is_local_path(main_hop):
return type(self)(str(super().with_suffix(suffix)))
return type(self)("::".join([type(self)(PurePosixPath(main_hop).with_suffix(suffix)).as_posix()] + rest_hops)) | 211 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/file_utils.py |
class ArchiveIterable(TrackedIterableFromGenerator):
"""An iterable of (path, fileobj) from a TAR archive, used by `iter_archive`"""
@staticmethod
def _iter_tar(f):
stream = tarfile.open(fileobj=f, mode="r|*")
for tarinfo in stream:
file_path = tarinfo.name
if not tarinfo.isreg():
continue
if file_path is None:
continue
if os.path.basename(file_path).startswith((".", "__")):
# skipping hidden files
continue
file_obj = stream.extractfile(tarinfo)
yield file_path, file_obj
stream.members = []
del stream | 212 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/file_utils.py |
@staticmethod
def _iter_zip(f):
zipf = zipfile.ZipFile(f)
for member in zipf.infolist():
file_path = member.filename
if member.is_dir():
continue
if file_path is None:
continue
if os.path.basename(file_path).startswith((".", "__")):
# skipping hidden files
continue
file_obj = zipf.open(member)
yield file_path, file_obj
@classmethod
def _iter_from_fileobj(cls, f) -> Generator[Tuple, None, None]:
compression = _get_extraction_protocol_with_magic_number(f)
if compression == "zip":
yield from cls._iter_zip(f)
else:
yield from cls._iter_tar(f) | 212 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/file_utils.py |
@classmethod
def _iter_from_urlpath(
cls, urlpath: str, download_config: Optional[DownloadConfig] = None
) -> Generator[Tuple, None, None]:
compression = _get_extraction_protocol(urlpath, download_config=download_config)
# Set block_size=0 to get faster streaming
# (e.g. for hf:// and https:// it uses streaming Requests file-like instances)
with xopen(urlpath, "rb", download_config=download_config, block_size=0) as f:
if compression == "zip":
yield from cls._iter_zip(f)
else:
yield from cls._iter_tar(f)
@classmethod
def from_buf(cls, fileobj) -> "ArchiveIterable":
return cls(cls._iter_from_fileobj, fileobj)
@classmethod
def from_urlpath(cls, urlpath_or_buf, download_config: Optional[DownloadConfig] = None) -> "ArchiveIterable":
return cls(cls._iter_from_urlpath, urlpath_or_buf, download_config) | 212 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/file_utils.py |
class FilesIterable(TrackedIterableFromGenerator):
"""An iterable of paths from a list of directories or files""" | 213 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/file_utils.py |
@classmethod
def _iter_from_urlpaths(
cls, urlpaths: Union[str, List[str]], download_config: Optional[DownloadConfig] = None
) -> Generator[str, None, None]:
if not isinstance(urlpaths, list):
urlpaths = [urlpaths]
for urlpath in urlpaths:
if xisfile(urlpath, download_config=download_config):
yield urlpath
elif xisdir(urlpath, download_config=download_config):
for dirpath, dirnames, filenames in xwalk(urlpath, download_config=download_config):
# in-place modification to prune the search
dirnames[:] = sorted([dirname for dirname in dirnames if not dirname.startswith((".", "__"))])
if xbasename(dirpath).startswith((".", "__")):
# skipping hidden directories
continue
for filename in sorted(filenames):
if filename.startswith((".", "__")): | 213 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/file_utils.py |
# skipping hidden files
continue
yield xjoin(dirpath, filename)
else:
raise FileNotFoundError(urlpath) | 213 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/file_utils.py |
@classmethod
def from_urlpaths(cls, urlpaths, download_config: Optional[DownloadConfig] = None) -> "FilesIterable":
return cls(cls._iter_from_urlpaths, urlpaths, download_config) | 213 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/file_utils.py |
class Pickler(dill.Pickler):
dispatch = dill._dill.MetaCatchingDict(dill.Pickler.dispatch.copy())
_legacy_no_dict_keys_sorting = False
def save(self, obj, save_persistent_id=True):
obj_type = type(obj)
if obj_type not in self.dispatch:
if "regex" in sys.modules:
import regex # type: ignore
if obj_type is regex.Pattern:
pklregister(obj_type)(_save_regexPattern)
if "spacy" in sys.modules:
import spacy # type: ignore
if issubclass(obj_type, spacy.Language):
pklregister(obj_type)(_save_spacyLanguage)
if "tiktoken" in sys.modules:
import tiktoken # type: ignore
if obj_type is tiktoken.Encoding:
pklregister(obj_type)(_save_tiktokenEncoding)
if "torch" in sys.modules:
import torch # type: ignore | 214 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/_dill.py |
if issubclass(obj_type, torch.Tensor):
pklregister(obj_type)(_save_torchTensor)
if obj_type is torch.Generator:
pklregister(obj_type)(_save_torchGenerator)
# Unwrap `torch.compile`-ed modules
if issubclass(obj_type, torch.nn.Module):
obj = getattr(obj, "_orig_mod", obj)
if "transformers" in sys.modules:
import transformers # type: ignore
if issubclass(obj_type, transformers.PreTrainedTokenizerBase):
pklregister(obj_type)(_save_transformersPreTrainedTokenizerBase)
# Unwrap `torch.compile`-ed functions
if obj_type is FunctionType:
obj = getattr(obj, "_torchdynamo_orig_callable", obj)
dill.Pickler.save(self, obj, save_persistent_id=save_persistent_id) | 214 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/_dill.py |
def _batch_setitems(self, items):
if self._legacy_no_dict_keys_sorting:
return super()._batch_setitems(items)
# Ignore the order of keys in a dict
try:
# Faster, but fails for unorderable elements
items = sorted(items)
except Exception: # TypeError, decimal.InvalidOperation, etc.
from datasets.fingerprint import Hasher
items = sorted(items, key=lambda x: Hasher.hash(x[0]))
dill.Pickler._batch_setitems(self, items)
def memoize(self, obj):
# Don't memoize strings since two identical strings can have different Python ids
if type(obj) is not str: # noqa: E721
dill.Pickler.memoize(self, obj) | 214 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/_dill.py |
class ExtractManager:
def __init__(self, cache_dir: Optional[str] = None):
self.extract_dir = (
os.path.join(cache_dir, config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
self.extractor = Extractor
def _get_output_path(self, path: str) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
abs_path = os.path.abspath(path)
return os.path.join(self.extract_dir, hash_url_to_filename(abs_path))
def _do_extract(self, output_path: str, force_extract: bool) -> bool:
return force_extract or (
not os.path.isfile(output_path) and not (os.path.isdir(output_path) and os.listdir(output_path))
) | 215 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py |
def extract(self, input_path: str, force_extract: bool = False) -> str:
extractor_format = self.extractor.infer_extractor_format(input_path)
if not extractor_format:
return input_path
output_path = self._get_output_path(input_path)
if self._do_extract(output_path, force_extract):
self.extractor.extract(input_path, output_path, extractor_format)
return output_path | 215 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py |
class BaseExtractor(ABC):
@classmethod
@abstractmethod
def is_extractable(cls, path: Union[Path, str], **kwargs) -> bool: ...
@staticmethod
@abstractmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: ... | 216 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py |
class MagicNumberBaseExtractor(BaseExtractor, ABC):
magic_numbers: List[bytes] = []
@staticmethod
def read_magic_number(path: Union[Path, str], magic_number_length: int):
with open(path, "rb") as f:
return f.read(magic_number_length)
@classmethod
def is_extractable(cls, path: Union[Path, str], magic_number: bytes = b"") -> bool:
if not magic_number:
magic_number_length = max(len(cls_magic_number) for cls_magic_number in cls.magic_numbers)
try:
magic_number = cls.read_magic_number(path, magic_number_length)
except OSError:
return False
return any(magic_number.startswith(cls_magic_number) for cls_magic_number in cls.magic_numbers) | 217 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py |
class TarExtractor(BaseExtractor):
@classmethod
def is_extractable(cls, path: Union[Path, str], **kwargs) -> bool:
return tarfile.is_tarfile(path)
@staticmethod
def safemembers(members, output_path):
"""
Fix for CVE-2007-4559
Desc:
Directory traversal vulnerability in the (1) extract and (2) extractall functions in the tarfile
module in Python allows user-assisted remote attackers to overwrite arbitrary files via a .. (dot dot)
sequence in filenames in a TAR archive, a related issue to CVE-2001-1267.
See: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2007-4559
From: https://stackoverflow.com/a/10077309
"""
def resolved(path: str) -> str:
return os.path.realpath(os.path.abspath(path))
def badpath(path: str, base: str) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(base, path)).startswith(base) | 218 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py |
def badlink(info, base: str) -> bool:
# Links are interpreted relative to the directory containing the link
tip = resolved(os.path.join(base, os.path.dirname(info.name)))
return badpath(info.linkname, base=tip)
base = resolved(output_path)
for finfo in members:
if badpath(finfo.name, base):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)")
elif finfo.issym() and badlink(finfo, base):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}")
elif finfo.islnk() and badlink(finfo, base):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}")
else:
yield finfo | 218 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py |
@staticmethod
def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
os.makedirs(output_path, exist_ok=True)
tar_file = tarfile.open(input_path)
tar_file.extractall(output_path, members=TarExtractor.safemembers(tar_file, output_path))
tar_file.close() | 218 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/utils/extract.py |