|
"""Basic audio reconstruction experiment.""" |
|
import os |
|
from os.path import join as p_join |
|
import subprocess |
|
|
|
from datasets import load_dataset |
|
import torch |
|
from audiocraft.data.audio import audio_read, audio_write |
|
from multibanddiffusion import MultiBandDiffusion |
|
|
|
|
|
cache_dir = "audio" |
|
os.makedirs(cache_dir, exist_ok=True) |
|
|
|
|
|
def test_files(mbd_model, num_codebooks: int = 8, skip_enhancer: bool = False): |
|
"""Test with audio files.""" |
|
|
|
output_audio_dir = p_join(cache_dir, "sample_audio", "original") |
|
os.makedirs(output_audio_dir, exist_ok=True) |
|
sample_audio_urls = { |
|
"common_voice_8_0": "https://huggingface.co./datasets/japanese-asr/ja_asr.common_voice_8_0/resolve/main/sample.flac", |
|
"jsut_basic5000": "https://huggingface.co./datasets/japanese-asr/ja_asr.jsut_basic5000/resolve/main/sample.flac", |
|
"reazonspeech_test": "https://huggingface.co./datasets/japanese-asr/ja_asr.reazonspeech_test/resolve/main/sample.flac" |
|
} |
|
for file, url in sample_audio_urls.items(): |
|
subprocess.run(["wget", url, "-O", p_join(output_audio_dir, f"{file}.sample.flac")]) |
|
|
|
output_reconstructed_dir = p_join(cache_dir, "sample_audio", f"reconstructed_{num_codebooks}codes") |
|
os.makedirs(output_reconstructed_dir, exist_ok=True) |
|
for file in sample_audio_urls.keys(): |
|
|
|
single_file = p_join(output_audio_dir, f"{file}.sample.flac") |
|
wav, sr = audio_read(single_file) |
|
wav = wav.unsqueeze(0) |
|
|
|
tokens = mbd_model.wav_to_tokens(wav, sr) |
|
|
|
re_wav, sr = mbd_model.tokens_to_wav(tokens, skip_enhancer=skip_enhancer) |
|
|
|
if skip_enhancer: |
|
output = p_join(output_reconstructed_dir, f"{file}.sample") |
|
else: |
|
output = p_join(f"{output_reconstructed_dir}.enhancer", f"{file}.sample") |
|
audio_write(output, re_wav[0], sr, strategy="loudness", loudness_compressor=True) |
|
|
|
|
|
def test_hf(mbd_model, hf_dataset: str, num_codebooks: int = 8, sample_size: int = 128, batch_size: int = 32, skip_enhancer: bool = False): |
|
"""Test with huggingface audio dataset.""" |
|
output_dir = p_join(cache_dir, os.path.basename(hf_dataset)) |
|
os.makedirs(output_dir, exist_ok=True) |
|
dataset = load_dataset(hf_dataset, split="test") |
|
dataset = dataset.select(range(sample_size)) |
|
dataset = dataset.map( |
|
lambda batch: {k: [v] for k, v in batch.items()}, |
|
batched=True, |
|
batch_size=batch_size |
|
) |
|
for data in dataset: |
|
|
|
sr_list = [d["sampling_rate"] for d in data["audio"]] |
|
assert len(set(sr_list)) == 1, sr_list |
|
sr = sr_list[0] |
|
|
|
array = [d["array"] for d in data["audio"]] |
|
max_length = max([len(a) for a in array]) |
|
array = [a + [0] * (max_length - len(a)) for a in array] |
|
wav = torch.as_tensor(array, dtype=torch.float32).unsqueeze_(1) |
|
|
|
for idx, one_wav in enumerate(wav): |
|
output = p_join(output_dir, "original", str(idx)) |
|
audio_write(output, one_wav, sr, strategy="loudness", loudness_compressor=True) |
|
|
|
tokens = mbd_model.wav_to_tokens(wav, sr) |
|
|
|
re_wav, sr = mbd_model.tokens_to_wav(tokens, skip_enhancer=skip_enhancer) |
|
|
|
for idx, one_wav in enumerate(re_wav): |
|
if skip_enhancer: |
|
output = p_join(output_dir, f"reconstructed_{num_codebooks}codes", str(idx)) |
|
else: |
|
output = p_join(output_dir, f"reconstructed_{num_codebooks}codes.enhancer", str(idx)) |
|
audio_write(output, one_wav, sr, strategy="loudness", loudness_compressor=True) |
|
|
|
|
|
if __name__ == '__main__': |
|
|
|
for n_code in [2, 3, 4, 5, 6]: |
|
model = MultiBandDiffusion.from_pretrained(num_codebooks_decoder=n_code, num_codebooks_encoder=n_code) |
|
test_files(model, n_code, skip_enhancer=True) |
|
test_hf(model, "japanese-asr/ja_asr.reazonspeech_test", num_codebooks=n_code, sample_size=64, batch_size=16, skip_enhancer=True) |
|
test_hf(model, "japanese-asr/ja_asr.jsut_basic5000", num_codebooks=n_code, sample_size=64, batch_size=16, skip_enhancer=True) |
|
|
|
n_code = 3 |
|
model = MultiBandDiffusion.from_pretrained(num_codebooks_decoder=n_code, num_codebooks_encoder=n_code) |
|
test_files(model, n_code) |
|
test_hf(model, "japanese-asr/ja_asr.reazonspeech_test", num_codebooks=n_code, sample_size=64, batch_size=16) |
|
test_hf(model, "japanese-asr/ja_asr.jsut_basic5000", num_codebooks=n_code, sample_size=64, batch_size=16) |
|
|