File size: 3,789 Bytes
92f9dc0 193d667 92f9dc0 8dbed1b 92f9dc0 8dbed1b 92f9dc0 193d667 92f9dc0 193d667 92f9dc0 193d667 92f9dc0 8dbed1b 92f9dc0 8dbed1b 193d667 92f9dc0 8dbed1b 92f9dc0 193d667 8dbed1b 92f9dc0 193d667 92f9dc0 193d667 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
import os
import tarfile
import datasets
import pandas as pd
from typing import Dict, List
import io
from tqdm import tqdm
import csv
import os
_DESCRIPTION = """
This dataset consists of over 385 hours of audio extracted from various YouTube videos in the Persian language.
Note: This dataset contains raw, unvalidated transcriptions. Users are advised to:
1. Perform their own quality assessment
2. Create their own train/validation/test splits based on their specific needs
3. Validate a subset of the data if needed for their use case
"""
_CITATION = """
Use this repo info/link for citation.
"""
_LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
_HOMEPAGE = "https://huggingface.co./datasets/PerSets/fytasr"
_BASE_URL = "D:/persets/ytDataset/" #"https://huggingface.co./datasets/PerSets/fytasr/resolve/main/"
_AUDIO_URL = _BASE_URL + "clips/unvalidated_{shard_idx}.tar"
class FarsiYoutubeDataset(datasets.GeneratorBasedBuilder):
DEFAULT_WRITER_BATCH_SIZE = 1000
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features({
"audio": datasets.Audio(sampling_rate=44_000), # Adjust sampling rate as needed
"text": datasets.Value("string"),
"file_name": datasets.Value("string"),
}),
supervised_keys=None,
license=_LICENSE,
citation=_CITATION,
version=self.VERSION,
description=_DESCRIPTION
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
archive_paths = [_AUDIO_URL.format(shard_idx=i) for i in range(1, 3)] #22)]
local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
return [
datasets.SplitGenerator(
name="unvalidated", # Or adjust splits as needed
gen_kwargs={
#"tar_dir": tar_dir,
#"metadata_path": metadata_path,
"local_extracted_archive_paths": local_extracted_archive_paths,
"archives": [dl_manager.iter_archive(path) for path in archive_paths],
"meta_path": _BASE_URL + "/clips/unvalidated.csv",
},
),
]
def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
"""Yields examples."""
# Load TSV metadata
data_fields = list(self._info().features.keys())
metadata = {}
with open(meta_path, encoding="utf-8") as f:
reader = csv.DictReader(f, delimiter=",", quoting=csv.QUOTE_NONE)
for row in tqdm(reader, desc="Reading metadata..."):
if not row["file_name"].endswith(".mp3"):
row["file_name"] += ".mp3"
if "sentence" in row:
row['text'] = row['sentence']
del row['sentence']
for field in data_fields:
if field not in row:
row[field] = ""
metadata[row["file_name"]] = row
for i, audio_archive in enumerate(archives):
for path, file in audio_archive:
_, filename = os.path.split(path)
if filename in metadata:
result = dict(metadata[filename])
# set the audio feature and the path to the extracted file
path = os.path.join(local_extracted_archive_paths[i], path) if local_extracted_archive_paths else path
result["audio"] = {"path": path, "bytes": file.read()}
result["file_name"] = path
yield path, result |