experiment-process-seamless-align / attach_speaker_embedding_s2s.py
asahi417's picture
init
4578bc8
raw
history blame
6.31 kB
import subprocess
from os.path import join as p_join
from typing import Optional
import librosa
from librosa import feature
import numpy as np
from torch import nn
import os
from os.path import expanduser
import shutil
import torch
from soundfile import LibsndfileError
from datasets import load_dataset, DatasetDict, Audio
direction = os.getenv("DIRECTION", "enA-jaA")
sides = set(direction.split("-"))
dataset_id = os.getenv("DATASET_ID", 0)
num_proc = int(os.getenv("NUM_PROC", 1))
hf_org = os.getenv("HF_ORG", "asahi417")
hf_dataset = f"seamless-align-{direction}"
dataset = load_dataset(f"{hf_org}/{hf_dataset}", f"subset_{dataset_id}", split="train")
audio_loader = Audio()
checkpoint_url = "https://huggingface.co./datasets/asahi417/experiment-speaker-embedding/resolve/main/meta_voice_speaker_encoder.pt"
model_weight = p_join(os.path.expanduser('~'), ".cache", "experiment_speaker_embedding", "meta_voice_speaker_encoder.pt")
def wget(url: str, output_file: Optional[str] = None):
os.makedirs(os.path.dirname(output_file), exist_ok=True)
subprocess.run(["wget", url, "-O", output_file])
if not os.path.exists(output_file):
raise ValueError(f"failed to download {url}")
class MetaVoiceSE(nn.Module):
mel_window_length = 25
mel_window_step = 10
mel_n_channels = 40
sampling_rate = 16000
partials_n_frames = 160
model_hidden_size = 256
model_embedding_size = 256
model_num_layers = 3
def __init__(self):
super().__init__()
if not os.path.exists(model_weight):
wget(checkpoint_url, model_weight)
# Define the network
self.lstm = nn.LSTM(self.mel_n_channels, self.model_hidden_size, self.model_num_layers, batch_first=True)
self.linear = nn.Linear(self.model_hidden_size, self.model_embedding_size)
self.relu = nn.ReLU()
# Load weight
self.load_state_dict(torch.load(model_weight, map_location="cpu")["model_state"], strict=False)
# Get the target device
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.to(self.device)
self.eval()
def compute_partial_slices(self, n_samples: int, rate, min_coverage):
# Compute how many frames separate two partial utterances
samples_per_frame = int((self.sampling_rate * self.mel_window_step / 1000))
n_frames = int(np.ceil((n_samples + 1) / samples_per_frame))
frame_step = int(np.round((self.sampling_rate / rate) / samples_per_frame))
# Compute the slices
wav_slices, mel_slices = [], []
steps = max(1, n_frames - self.partials_n_frames + frame_step + 1)
for i in range(0, steps, frame_step):
mel_range = np.array([i, i + self.partials_n_frames])
wav_range = mel_range * samples_per_frame
mel_slices.append(slice(*mel_range))
wav_slices.append(slice(*wav_range))
# Evaluate whether extra padding is warranted or not
last_wav_range = wav_slices[-1]
coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start)
if coverage < min_coverage and len(mel_slices) > 1:
return wav_slices[:-1], mel_slices[:-1]
return wav_slices, mel_slices
def get_speaker_embedding(self,
wav: np.ndarray,
sampling_rate: Optional[int] = None,
rate: float = 1.3,
min_coverage: float = 0.75) -> np.ndarray:
if sampling_rate != self.sampling_rate:
wav = librosa.resample(wav, orig_sr=sampling_rate, target_sr=self.sampling_rate)
wav, _ = librosa.effects.trim(wav, top_db=20)
wav_slices, mel_slices = self.compute_partial_slices(len(wav), rate, min_coverage)
max_wave_length = wav_slices[-1].stop
if max_wave_length >= len(wav):
wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant")
# Wav -> Mel spectrogram
frames = feature.melspectrogram(
y=wav,
sr=self.sampling_rate,
n_fft=int(self.sampling_rate * self.mel_window_length / 1000),
hop_length=int(self.sampling_rate * self.mel_window_step / 1000),
n_mels=self.mel_n_channels,
)
mel = frames.astype(np.float32).T
mel = np.array([mel[s] for s in mel_slices])
# inference
with torch.no_grad():
mel = torch.from_numpy(mel).to(self.device)
_, (hidden, _) = self.lstm(mel)
embeds_raw = self.relu(self.linear(hidden[-1]))
partial_embeds = embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True)
partial_embeds = partial_embeds.cpu().numpy()
raw_embed = np.mean(partial_embeds, axis=0)
return raw_embed / np.linalg.norm(raw_embed, 2)
speaker_embedder = MetaVoiceSE()
def error_file(example):
for side in sides:
try:
audio_loader.decode_example(example[f"{side}.audio"])
except LibsndfileError:
return False
return True
print(f"Num examples: {len(dataset)}")
for s in sides:
dataset = dataset.cast_column(f"{s}.audio", Audio(decode=False))
dataset = dataset.filter(error_file, num_proc=num_proc, desc="drop broken audio")
for s in sides:
dataset = dataset.cast_column(f"{s}.audio", Audio())
print(f"Num examples (after filtering): {len(dataset)}")
def speaker_embedding(example):
for side in sides:
example[f"{side}.audio.speaker_embedding"] = speaker_embedder.get_speaker_embedding(
example[f"{side}.audio"]["array"], example[f"{side}.audio"]["sampling_rate"]
)
return example
dataset = dataset.map(
function=speaker_embedding,
remove_columns=[f"{s}.audio" for s in sides] + [f"{s}.url" for s in sides] + [f"{s}.duration_start" for s in sides] + [f"{s}.duration_end" for s in sides],
num_proc=num_proc,
desc="attach speaker embedding dataset"
)
DatasetDict({"train": dataset}).push_to_hub(f"{hf_org}/{hf_dataset}.speaker-embedding.metavoice", config_name=f"subset_{dataset_id}")
cache_dir = f"{expanduser('~')}/.cache/huggingface/datasets/{hf_org}___{hf_dataset}/subset_{dataset_id}"
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir)