import json import os import tarfile import zipfile import gzip import subprocess from os.path import join as p_join from tqdm import tqdm from multiprocessing import Pool from typing import Optional import pandas as pd # dataset config url_metadata_dict = { "enA-jaA": "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-jaA.tsv.gz", "enA-jpn": "https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-jpn.withduration.tsv.gz" } direction = os.getenv("DIRECTION", "enA-jaA") sides = set(direction.split("-")) cache_dir_audio = p_join("download", "audio", direction) cache_dir_feature = p_join("download", "feature", direction) os.makedirs(cache_dir_audio, exist_ok=True) os.makedirs(cache_dir_feature, exist_ok=True) # processor config n_pool = int(os.getenv("N_POOL", 8)) wget_max_retry = os.getenv("MAX_RETRY", "1") wget_timeout = os.getenv("TIMEOUT", "20") line_no_start = int(os.getenv("LINE_NO_START", 0)) line_no_end = int(os.getenv("LINE_NO_END", 100000)) def wget(url: str, output_file: Optional[str] = None): os.makedirs(os.path.dirname(output_file), exist_ok=True) filename = os.path.basename(url) if not output_file else output_file subprocess.run(["wget", url, "-O", output_file, "--tries", wget_max_retry, "--timeout", wget_timeout]) if not os.path.exists(output_file): return False if output_file.endswith('.tar.gz') or output_file.endswith('.tgz') or output_file.endswith('.tar'): if output_file.endswith('.tar'): tar = tarfile.open(output_file) else: tar = tarfile.open(output_file, "r:gz") tar.extractall(os.path.dirname(filename)) tar.close() os.remove(output_file) elif output_file.endswith('.gz'): with gzip.open(output_file, 'rb') as f: with open(output_file.replace('.gz', ''), 'wb') as f_write: f_write.write(f.read()) os.remove(output_file) elif output_file.endswith('.zip'): with zipfile.ZipFile(output_file, 'r') as zip_ref: zip_ref.extractall() os.remove(output_file) return True def get_metadata(): url_metadata = url_metadata_dict[direction] meta_data_filename = os.path.basename(url_metadata).replace(".gz", "") meta_data_path = p_join("download", "meta", meta_data_filename) if not os.path.exists(meta_data_path): assert wget(url_metadata, output_file=meta_data_path) df = pd.read_csv(meta_data_path, sep=r'[\t\s]', header=None)[[0, 2, 6, 9, 10, 11, 12]] df.columns = ["id", "url", "text_lid_score", "laser_score", "direction", "side", "line_no"] assert len(df["direction"].unique()) == 1 df.pop("direction") return df.sort_values(by=["line_no", "side"]) def get_audio(dataframe: pd.DataFrame): features = {"line_no": dataframe.pop('line_no').values[0]} for side, df in dataframe.groupby("side"): df.pop("side") features.update({f"{side}.{k}": v for k, v in df.iloc[0].to_dict().items()}) features[f"{side}.path"] = p_join(cache_dir_audio, os.path.basename(features[f"{side}.url"])) if not os.path.exists(features[f"{side}.path"]): if not wget(features[f"{side}.url"], output_file=features[f"{side}.path"]): return False with open(cache_dir_feature, "w") as f: json.dump(features, f) return True def process_dataset(): df_metadata = get_metadata() print(f"metadata: {len(df_metadata)}") inputs = [g for line_no, g in df_metadata.groupby("line_no") if line_no_start <= line_no < line_no_end] print(f"filtered unique lines: {len(inputs)}") inputs = [g for g in inputs if len(g) == 2] print(f"removed != 2: {len(inputs)}") inputs = [g for g in inputs if len(g["side"].unique()) == 2 and set(g["side"].unique()) == sides] print(f"removed side != 2: {len(inputs)}") if n_pool == 1: for g in tqdm(inputs, total=len(inputs)): if not get_audio(g): print(f"failed:\n{g['url']}") else: with Pool(n_pool) as pool: pool.map(get_audio, tqdm(inputs, total=len(inputs))) if __name__ == '__main__': process_dataset()