asahi417 commited on
Commit
cc01e7a
·
1 Parent(s): fb589be
Files changed (1) hide show
  1. download_audio.py +16 -15
download_audio.py CHANGED
@@ -18,18 +18,21 @@ url_metadata_dict = {
18
  }
19
  direction = os.getenv("DIRECTION", "enA-jaA")
20
  sides = set(direction.split("-"))
 
 
 
 
21
  # processor config
22
  n_pool = int(os.getenv("N_POOL", 8))
23
  wget_max_retry = os.getenv("MAX_RETRY", "1")
24
  wget_timeout = os.getenv("TIMEOUT", "20")
25
  line_no_start = int(os.getenv("LINE_NO_START", 0))
26
- line_no_end = int(os.getenv("LINE_NO_END", 500000))
27
 
28
 
29
- def wget(url: str, cache_dir: str, filename: Optional[str] = None):
30
- os.makedirs(cache_dir, exist_ok=True)
31
- filename = os.path.basename(url) if not filename else filename
32
- output_file = p_join(cache_dir, filename)
33
  subprocess.run(["wget", url, "-O", output_file, "--tries", wget_max_retry, "--timeout", wget_timeout])
34
  if not os.path.exists(output_file):
35
  return False
@@ -38,7 +41,7 @@ def wget(url: str, cache_dir: str, filename: Optional[str] = None):
38
  tar = tarfile.open(output_file)
39
  else:
40
  tar = tarfile.open(output_file, "r:gz")
41
- tar.extractall(cache_dir)
42
  tar.close()
43
  os.remove(output_file)
44
  elif output_file.endswith('.gz'):
@@ -48,7 +51,7 @@ def wget(url: str, cache_dir: str, filename: Optional[str] = None):
48
  os.remove(output_file)
49
  elif output_file.endswith('.zip'):
50
  with zipfile.ZipFile(output_file, 'r') as zip_ref:
51
- zip_ref.extractall(cache_dir)
52
  os.remove(output_file)
53
  return True
54
 
@@ -56,10 +59,10 @@ def wget(url: str, cache_dir: str, filename: Optional[str] = None):
56
  def get_metadata():
57
  url_metadata = url_metadata_dict[direction]
58
  meta_data_filename = os.path.basename(url_metadata).replace(".gz", "")
59
- cache_dir_metadata = p_join("download", "meta")
60
- if not os.path.exists(p_join(cache_dir_metadata, meta_data_filename)):
61
- assert wget(url_metadata, cache_dir=cache_dir_metadata)
62
- df = pd.read_csv(p_join(cache_dir_metadata, meta_data_filename), sep=r'[\t\s]', header=None)[[0, 2, 6, 9, 10, 11, 12]]
63
  df.columns = ["id", "url", "text_lid_score", "laser_score", "direction", "side", "line_no"]
64
  assert len(df["direction"].unique()) == 1
65
  df.pop("direction")
@@ -67,15 +70,13 @@ def get_metadata():
67
 
68
 
69
  def get_audio(dataframe: pd.DataFrame):
70
- cache_dir_audio = p_join("download", "audio", direction)
71
- cache_dir_feature = p_join("download", "feature", direction)
72
  features = {"line_no": dataframe.pop('line_no').values[0]}
73
  for side, df in dataframe.groupby("side"):
74
  df.pop("side")
75
  features.update({f"{side}.{k}": v for k, v in df.iloc[0].to_dict().items()})
76
  features[f"{side}.path"] = p_join(cache_dir_audio, os.path.basename(features[f"{side}.url"]))
77
  if not os.path.exists(features[f"{side}.path"]):
78
- if not wget(features[f"{side}.url"], filename=features[f"{side}.path"], cache_dir=cache_dir_audio):
79
  return False
80
  with open(cache_dir_feature, "w") as f:
81
  json.dump(features, f)
@@ -97,7 +98,7 @@ def process_dataset():
97
  print(f"failed:\n{g['url']}")
98
  else:
99
  with Pool(n_pool) as pool:
100
- pool.starmap(get_audio, tqdm(inputs, total=len(inputs)))
101
 
102
 
103
  if __name__ == '__main__':
 
18
  }
19
  direction = os.getenv("DIRECTION", "enA-jaA")
20
  sides = set(direction.split("-"))
21
+ cache_dir_audio = p_join("download", "audio", direction)
22
+ cache_dir_feature = p_join("download", "feature", direction)
23
+ os.makedirs(cache_dir_audio, exist_ok=True)
24
+ os.makedirs(cache_dir_feature, exist_ok=True)
25
  # processor config
26
  n_pool = int(os.getenv("N_POOL", 8))
27
  wget_max_retry = os.getenv("MAX_RETRY", "1")
28
  wget_timeout = os.getenv("TIMEOUT", "20")
29
  line_no_start = int(os.getenv("LINE_NO_START", 0))
30
+ line_no_end = int(os.getenv("LINE_NO_END", 100000))
31
 
32
 
33
+ def wget(url: str, output_file: Optional[str] = None):
34
+ os.makedirs(os.path.dirname(output_file), exist_ok=True)
35
+ filename = os.path.basename(url) if not output_file else output_file
 
36
  subprocess.run(["wget", url, "-O", output_file, "--tries", wget_max_retry, "--timeout", wget_timeout])
37
  if not os.path.exists(output_file):
38
  return False
 
41
  tar = tarfile.open(output_file)
42
  else:
43
  tar = tarfile.open(output_file, "r:gz")
44
+ tar.extractall(os.path.dirname(filename))
45
  tar.close()
46
  os.remove(output_file)
47
  elif output_file.endswith('.gz'):
 
51
  os.remove(output_file)
52
  elif output_file.endswith('.zip'):
53
  with zipfile.ZipFile(output_file, 'r') as zip_ref:
54
+ zip_ref.extractall()
55
  os.remove(output_file)
56
  return True
57
 
 
59
  def get_metadata():
60
  url_metadata = url_metadata_dict[direction]
61
  meta_data_filename = os.path.basename(url_metadata).replace(".gz", "")
62
+ meta_data_path = p_join("download", "meta", meta_data_filename)
63
+ if not os.path.exists(meta_data_path):
64
+ assert wget(url_metadata, output_file=meta_data_path)
65
+ df = pd.read_csv(meta_data_path, sep=r'[\t\s]', header=None)[[0, 2, 6, 9, 10, 11, 12]]
66
  df.columns = ["id", "url", "text_lid_score", "laser_score", "direction", "side", "line_no"]
67
  assert len(df["direction"].unique()) == 1
68
  df.pop("direction")
 
70
 
71
 
72
  def get_audio(dataframe: pd.DataFrame):
 
 
73
  features = {"line_no": dataframe.pop('line_no').values[0]}
74
  for side, df in dataframe.groupby("side"):
75
  df.pop("side")
76
  features.update({f"{side}.{k}": v for k, v in df.iloc[0].to_dict().items()})
77
  features[f"{side}.path"] = p_join(cache_dir_audio, os.path.basename(features[f"{side}.url"]))
78
  if not os.path.exists(features[f"{side}.path"]):
79
+ if not wget(features[f"{side}.url"], output_file=features[f"{side}.path"]):
80
  return False
81
  with open(cache_dir_feature, "w") as f:
82
  json.dump(features, f)
 
98
  print(f"failed:\n{g['url']}")
99
  else:
100
  with Pool(n_pool) as pool:
101
+ pool.map(get_audio, tqdm(inputs, total=len(inputs)))
102
 
103
 
104
  if __name__ == '__main__':