import os import tarfile import datasets import pandas as pd from typing import Dict, List import io from tqdm import tqdm import csv import os _DESCRIPTION = """ This dataset consists of various Youtube videos in Persian language. Note: This dataset contains raw, unvalidated transcriptions. Users are advised to: 1. Perform their own quality assessment 2. Create their own train/validation/test splits based on their specific needs 3. Validate a subset of the data if needed for their use case """ _CITATION = """ Use this repo info/link for citation. """ _LICENSE = "CC0" class FarsiYoutubeDataset(datasets.GeneratorBasedBuilder): """Audio dataset with files stored in tar archives.""" DEFAULT_WRITER_BATCH_SIZE = 1000 VERSION = datasets.Version("1.0.0") def _info(self): return datasets.DatasetInfo( features=datasets.Features({ "audio": datasets.Audio(sampling_rate=44_000), # Adjust sampling rate as needed "text": datasets.Value("string"), "file_name": datasets.Value("string"), }), supervised_keys=None, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" # Paths to your tar files and metadata tar_dir = "./clips" # Update this metadata_path = "./clips/metadata.csv" # Update this archive_paths = [f"https://huggingface.co./datasets/PerSets/asr2/resolve/main/clips/clips_{i:03d}.tar" for i in range(1, 8)] #22)] local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {} return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, # Or adjust splits as needed gen_kwargs={ #"tar_dir": tar_dir, #"metadata_path": metadata_path, "local_extracted_archive_paths": local_extracted_archive_paths, "archives": [dl_manager.iter_archive(path) for path in archive_paths], "meta_path": "https://huggingface.co./datasets/PerSets/asr2/resolve/main/clips/metadata.csv", }, ), ] def _generate_examples(self, local_extracted_archive_paths, archives, meta_path): """Yields examples.""" # Load TSV metadata data_fields = list(self._info().features.keys()) metadata = {} with open(meta_path, encoding="utf-8") as f: reader = csv.DictReader(f, delimiter=",", quoting=csv.QUOTE_NONE) for row in tqdm(reader, desc="Reading metadata..."): if not row["file_name"].endswith(".mp3"): row["file_name"] += ".mp3" if "sentence" in row: row['text'] = row['sentence'] del row['sentence'] for field in data_fields: if field not in row: row[field] = "" metadata[row["file_name"]] = row for i, audio_archive in enumerate(archives): for path, file in audio_archive: _, filename = os.path.split(path) if filename in metadata: result = dict(metadata[filename]) # set the audio feature and the path to the extracted file path = os.path.join(local_extracted_archive_paths[i], path) if local_extracted_archive_paths else path result["audio"] = {"path": path, "bytes": file.read()} result["file_name"] = path yield path, result