Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
import numpy as np | |
import soundfile as sf | |
import spaces | |
import torch | |
import torchaudio | |
import librosa | |
import yaml | |
import tempfile | |
import os | |
from huggingface_hub import hf_hub_download | |
from transformers import AutoFeatureExtractor, WhisperModel | |
from torch.nn.utils import parametrizations | |
from modules.commons import build_model, load_checkpoint, recursive_munch | |
from modules.campplus.DTDNN import CAMPPlus | |
from modules.bigvgan import bigvgan | |
from modules.rmvpe import RMVPE | |
from modules.audio import mel_spectrogram | |
# ---------------------------- | |
# Optimization Settings | |
# ---------------------------- | |
# Set the number of threads to the number of CPU cores | |
torch.set_num_threads(os.cpu_count()) | |
# Enable optimized backends | |
torch.backends.openmp.enabled = True | |
torch.backends.mkldnn.enabled = True | |
torch.backends.cudnn.enabled = False | |
torch.backends.cuda.enabled = False | |
torch.set_grad_enabled(False) | |
# Force CPU usage | |
device = torch.device("cpu") | |
print(f"[DEVICE] | Using device: {device}") | |
# ---------------------------- | |
# Load Models and Configuration | |
# ---------------------------- | |
def load_custom_model_from_hf(repo_id, model_filename="pytorch_model.bin", config_filename="config.yml"): | |
os.makedirs("./checkpoints", exist_ok=True) | |
model_path = hf_hub_download(repo_id=repo_id, filename=model_filename, cache_dir="./checkpoints") | |
if config_filename is None: | |
return model_path | |
config_path = hf_hub_download(repo_id=repo_id, filename=config_filename, cache_dir="./checkpoints") | |
return model_path, config_path | |
# Load DiT model | |
dit_checkpoint_path, dit_config_path = load_custom_model_from_hf( | |
"Plachta/Seed-VC", | |
"DiT_seed_v2_uvit_whisper_small_wavenet_bigvgan_pruned.pth", | |
"config_dit_mel_seed_uvit_whisper_small_wavenet.yml" | |
) | |
config = yaml.safe_load(open(dit_config_path, 'r')) | |
model_params = recursive_munch(config['model_params']) | |
model = build_model(model_params, stage='DiT') | |
# Debug: Print model keys to identify correct key | |
print(f"[INFO] | Model keys: {model.keys()}") | |
hop_length = config['preprocess_params']['spect_params']['hop_length'] | |
sr = config['preprocess_params']['sr'] | |
# Load DiT checkpoints | |
model, _, _, _ = load_checkpoint(model, None, dit_checkpoint_path, load_only_params=True, ignore_modules=[], is_distributed=False) | |
for key in model: | |
model[key].eval() | |
model[key].to(device) | |
print("[INFO] | DiT model loaded and set to eval mode.") | |
model.cfm.estimator.setup_caches(max_batch_size=1, max_seq_length=8192) | |
# Ensure 'CAMPPlus' is correctly imported and defined | |
try: | |
campplus_model = CAMPPlus(feat_dim=80, embedding_size=192) | |
print("[INFO] | CAMPPlus model instantiated.") | |
except NameError: | |
print("[ERROR] | CAMPPlus is not defined. Please check the import path and ensure CAMPPlus is correctly defined.") | |
raise | |
# Set weights_only=True for security | |
campplus_ckpt_path = load_custom_model_from_hf("funasr/campplus", "campplus_cn_common.bin", config_filename=None) | |
campplus_state = torch.load(campplus_ckpt_path, map_location="cpu", weights_only=True) | |
campplus_model.load_state_dict(campplus_state) | |
campplus_model.eval() | |
campplus_model.to(device) | |
print("[INFO] | CAMPPlus model loaded, set to eval mode, and moved to CPU.") | |
# Load BigVGAN model | |
bigvgan_model = bigvgan.BigVGAN.from_pretrained('nvidia/bigvgan_v2_22khz_80band_256x', use_cuda_kernel=False) | |
bigvgan_model.remove_weight_norm() | |
bigvgan_model = bigvgan_model.eval().to(device) | |
print("[INFO] | BigVGAN model loaded, weight norm removed, set to eval mode, and moved to CPU.") | |
# Load FAcodec model | |
ckpt_path, config_path = load_custom_model_from_hf("Plachta/FAcodec", 'pytorch_model.bin', 'config.yml') | |
codec_config = yaml.safe_load(open(config_path)) | |
codec_model_params = recursive_munch(codec_config['model_params']) | |
codec_encoder = build_model(codec_model_params, stage="codec") | |
ckpt_params = torch.load(ckpt_path, map_location="cpu", weights_only=True) | |
for key in codec_encoder: | |
codec_encoder[key].load_state_dict(ckpt_params[key], strict=False) | |
codec_encoder = {k: v.eval().to(device) for k, v in codec_encoder.items()} | |
print("[INFO] | FAcodec model loaded, set to eval mode, and moved to CPU.") | |
# Load Whisper model with float32 and compatible size | |
whisper_name = model_params.speech_tokenizer.whisper_name if hasattr(model_params.speech_tokenizer, 'whisper_name') else "openai/whisper-small" | |
whisper_model = WhisperModel.from_pretrained(whisper_name, torch_dtype=torch.float32).to(device) | |
del whisper_model.decoder # Remove decoder as it's not used | |
whisper_feature_extractor = AutoFeatureExtractor.from_pretrained(whisper_name) | |
print(f"[INFO] | Whisper model '{whisper_name}' loaded with dtype {whisper_model.dtype} and moved to CPU.") | |
# Generate mel spectrograms with optimized parameters | |
mel_fn_args = { | |
"n_fft": 1024, | |
"win_size": 1024, | |
"hop_size": 256, | |
"num_mels": 80, | |
"sampling_rate": sr, | |
"fmin": 0, | |
"fmax": None, | |
"center": False | |
} | |
to_mel = lambda x: mel_spectrogram(x, **mel_fn_args) | |
# Load F0 conditioned model | |
dit_checkpoint_path_f0, dit_config_path_f0 = load_custom_model_from_hf( | |
"Plachta/Seed-VC", | |
"DiT_seed_v2_uvit_whisper_base_f0_44k_bigvgan_pruned_ft_ema.pth", | |
"config_dit_mel_seed_uvit_whisper_base_f0_44k.yml" | |
) | |
config_f0 = yaml.safe_load(open(dit_config_path_f0, 'r')) | |
model_params_f0 = recursive_munch(config_f0['model_params']) | |
model_f0 = build_model(model_params_f0, stage='DiT') | |
hop_length_f0 = config_f0['preprocess_params']['spect_params']['hop_length'] | |
sr_f0 = config_f0['preprocess_params']['sr'] | |
# Load F0 model checkpoints | |
model_f0, _, _, _ = load_checkpoint(model_f0, None, dit_checkpoint_path_f0, load_only_params=True, ignore_modules=[], is_distributed=False) | |
for key in model_f0: | |
model_f0[key].eval() | |
model_f0[key].to(device) | |
print("[INFO] | F0 conditioned DiT model loaded and set to eval mode.") | |
model_f0.cfm.estimator.setup_caches(max_batch_size=1, max_seq_length=8192) | |
# Load F0 extractor | |
model_path = load_custom_model_from_hf("lj1995/VoiceConversionWebUI", "rmvpe.pt", None) | |
rmvpe = RMVPE(model_path, is_half=False, device=device) | |
print("[INFO] | RMVPE model loaded and moved to CPU.") | |
mel_fn_args_f0 = { | |
"n_fft": config_f0['preprocess_params']['spect_params']['n_fft'], | |
"win_size": config_f0['preprocess_params']['spect_params']['win_length'], | |
"hop_size": config_f0['preprocess_params']['spect_params']['hop_length'], | |
"num_mels": 80, # Ensure this matches the primary model | |
"sampling_rate": sr_f0, | |
"fmin": 0, | |
"fmax": None, | |
"center": False | |
} | |
to_mel_f0 = lambda x: mel_spectrogram(x, **mel_fn_args_f0) | |
# Load BigVGAN 44kHz model | |
bigvgan_44k_model = bigvgan.BigVGAN.from_pretrained('nvidia/bigvgan_v2_44khz_128band_512x', use_cuda_kernel=False) | |
bigvgan_44k_model.remove_weight_norm() | |
bigvgan_44k_model = bigvgan_44k_model.eval().to(device) | |
print("[INFO] | BigVGAN 44kHz model loaded, weight norm removed, set to eval mode, and moved to CPU.") | |
# ---------------------------- | |
# Helper Functions | |
# ---------------------------- | |
def adjust_f0_semitones(f0_sequence, n_semitones): | |
factor = 2 ** (n_semitones / 12) | |
return f0_sequence * factor | |
def crossfade(chunk1, chunk2, overlap): | |
fade_out = np.cos(np.linspace(0, np.pi / 2, overlap)) ** 2 | |
fade_in = np.cos(np.linspace(np.pi / 2, 0, overlap)) ** 2 | |
chunk2[:overlap] = chunk2[:overlap] * fade_in + chunk1[-overlap:] * fade_out | |
return chunk2 | |
# ---------------------------- | |
# Voice Conversion Function | |
# ---------------------------- | |
def voice_conversion(source, target, diffusion_steps, length_adjust, inference_cfg_rate, f0_condition, auto_f0_adjust, pitch_shift): | |
print("[INFO] | Voice conversion started.") | |
inference_module = model if not f0_condition else model_f0 | |
mel_fn = to_mel if not f0_condition else to_mel_f0 | |
bigvgan_fn = bigvgan_model if not f0_condition else bigvgan_44k_model | |
sr_current = 22050 if not f0_condition else 44100 | |
hop_length_current = 256 if not f0_condition else 512 | |
max_context_window = sr_current // hop_length_current * 30 | |
overlap_wave_len = 16 * hop_length_current | |
bitrate = "320k" | |
# Load audio using librosa | |
print("[INFO] | Loading source and reference audio.") | |
source_audio, _ = librosa.load(source, sr=sr_current) | |
ref_audio, _ = librosa.load(target, sr=sr_current) | |
# Clip reference audio to 25 seconds | |
ref_audio = ref_audio[:sr_current * 25] | |
print(f"[INFO] | Source audio length: {len(source_audio)/sr_current:.2f}s, Reference audio length: {len(ref_audio)/sr_current:.2f}s") | |
# Convert audio to tensors | |
source_audio_tensor = torch.tensor(source_audio).unsqueeze(0).float().to(device) | |
ref_audio_tensor = torch.tensor(ref_audio).unsqueeze(0).float().to(device) | |
# Resample to 16kHz | |
ref_waves_16k = torchaudio.functional.resample(ref_audio_tensor, sr_current, 16000) | |
converted_waves_16k = torchaudio.functional.resample(source_audio_tensor, sr_current, 16000) | |
# Generate Whisper features | |
print("[INFO] | Generating Whisper features for source audio.") | |
if converted_waves_16k.size(-1) <= 16000 * 30: | |
alt_inputs = whisper_feature_extractor( | |
[converted_waves_16k.squeeze(0).cpu().numpy()], | |
return_tensors="pt", | |
return_attention_mask=True, | |
sampling_rate=16000 | |
) | |
alt_input_features = whisper_model._mask_input_features( | |
alt_inputs.input_features, attention_mask=alt_inputs.attention_mask | |
).to(device) | |
alt_outputs = whisper_model.encoder( | |
alt_input_features.to(torch.float32), | |
head_mask=None, | |
output_attentions=False, | |
output_hidden_states=False, | |
return_dict=True | |
) | |
S_alt = alt_outputs.last_hidden_state.to(torch.float32) | |
S_alt = S_alt[:, :converted_waves_16k.size(-1) // 320 + 1] | |
print(f"[INFO] | S_alt shape: {S_alt.shape}") | |
else: | |
# Process in chunks | |
print("[INFO] | Processing source audio in chunks.") | |
overlapping_time = 5 # seconds | |
chunk_size = 16000 * 30 # 30 seconds | |
overlap_size = 16000 * overlapping_time | |
S_alt_list = [] | |
buffer = None | |
traversed_time = 0 | |
total_length = converted_waves_16k.size(-1) | |
while traversed_time < total_length: | |
if buffer is None: | |
chunk = converted_waves_16k[:, traversed_time:traversed_time + chunk_size] | |
else: | |
chunk = torch.cat([ | |
buffer, | |
converted_waves_16k[:, traversed_time:traversed_time + chunk_size - overlap_size] | |
], dim=-1) | |
alt_inputs = whisper_feature_extractor( | |
[chunk.squeeze(0).cpu().numpy()], | |
return_tensors="pt", | |
return_attention_mask=True, | |
sampling_rate=16000 | |
) | |
alt_input_features = whisper_model._mask_input_features( | |
alt_inputs.input_features, attention_mask=alt_inputs.attention_mask | |
).to(device) | |
alt_outputs = whisper_model.encoder( | |
alt_input_features.to(torch.float32), | |
head_mask=None, | |
output_attentions=False, | |
output_hidden_states=False, | |
return_dict=True | |
) | |
S_chunk = alt_outputs.last_hidden_state.to(torch.float32) | |
S_chunk = S_chunk[:, :chunk.size(-1) // 320 + 1] | |
print(f"[INFO] | Processed chunk with S_chunk shape: {S_chunk.shape}") | |
if traversed_time == 0: | |
S_alt_list.append(S_chunk) | |
else: | |
skip_frames = 50 * overlapping_time | |
S_alt_list.append(S_chunk[:, skip_frames:]) | |
buffer = chunk[:, -overlap_size:] | |
traversed_time += chunk_size - overlap_size | |
S_alt = torch.cat(S_alt_list, dim=1) | |
print(f"[INFO] | Final S_alt shape after chunk processing: {S_alt.shape}") | |
# Original Whisper features | |
print("[INFO] | Generating Whisper features for reference audio.") | |
ori_waves_16k = torchaudio.functional.resample(ref_audio_tensor, sr_current, 16000) | |
ori_inputs = whisper_feature_extractor( | |
[ori_waves_16k.squeeze(0).cpu().numpy()], | |
return_tensors="pt", | |
return_attention_mask=True, | |
sampling_rate=16000 | |
) | |
ori_input_features = whisper_model._mask_input_features( | |
ori_inputs.input_features, attention_mask=ori_inputs.attention_mask | |
).to(device) | |
ori_outputs = whisper_model.encoder( | |
ori_input_features.to(torch.float32), | |
head_mask=None, | |
output_attentions=False, | |
output_hidden_states=False, | |
return_dict=True | |
) | |
S_ori = ori_outputs.last_hidden_state.to(torch.float32) | |
S_ori = S_ori[:, :ori_waves_16k.size(-1) // 320 + 1] | |
print(f"[INFO] | S_ori shape: {S_ori.shape}") | |
# Generate mel spectrograms | |
print("[INFO] | Generating mel spectrograms.") | |
mel = mel_fn(source_audio_tensor.float()) | |
mel2 = mel_fn(ref_audio_tensor.float()) | |
print(f"[INFO] | Mel spectrogram shapes: mel={mel.shape}, mel2={mel2.shape}") | |
# Length adjustment | |
target_lengths = torch.LongTensor([int(mel.size(2) * length_adjust)]).to(mel.device) | |
target2_lengths = torch.LongTensor([mel2.size(2)]).to(mel2.device) | |
print(f"[INFO] | Target lengths: {target_lengths.item()}, {target2_lengths.item()}") | |
# Extract style features | |
print("[INFO] | Extracting style features from reference audio.") | |
feat2 = torchaudio.compliance.kaldi.fbank( | |
ref_waves_16k, | |
num_mel_bins=80, | |
dither=0, | |
sample_frequency=16000 | |
) | |
feat2 = feat2 - feat2.mean(dim=0, keepdim=True) | |
style2 = campplus_model(feat2.unsqueeze(0)) | |
print(f"[INFO] | Style2 shape: {style2.shape}") | |
# F0 Conditioning | |
if f0_condition: | |
print("[INFO] | Performing F0 conditioning.") | |
F0_ori = rmvpe.infer_from_audio(ref_waves_16k[0], thred=0.5) | |
F0_alt = rmvpe.infer_from_audio(converted_waves_16k[0], thred=0.5) | |
F0_ori = torch.from_numpy(F0_ori).to(device)[None].float() | |
F0_alt = torch.from_numpy(F0_alt).to(device)[None].float() | |
voiced_F0_ori = F0_ori[F0_ori > 1] | |
voiced_F0_alt = F0_alt[F0_alt > 1] | |
log_f0_alt = torch.log(F0_alt + 1e-5) | |
voiced_log_f0_ori = torch.log(voiced_F0_ori + 1e-5) | |
voiced_log_f0_alt = torch.log(voiced_F0_alt + 1e-5) | |
median_log_f0_ori = torch.median(voiced_log_f0_ori) | |
median_log_f0_alt = torch.median(voiced_log_f0_alt) | |
# Shift F0 levels | |
shifted_log_f0_alt = log_f0_alt.clone() | |
if auto_f0_adjust: | |
shifted_log_f0_alt[F0_alt > 1] = ( | |
log_f0_alt[F0_alt > 1] - median_log_f0_alt + median_log_f0_ori | |
) | |
shifted_f0_alt = torch.exp(shifted_log_f0_alt) | |
if pitch_shift != 0: | |
shifted_f0_alt[F0_alt > 1] = adjust_f0_semitones(shifted_f0_alt[F0_alt > 1], pitch_shift) | |
print("[INFO] | F0 conditioning completed.") | |
else: | |
F0_ori = None | |
F0_alt = None | |
shifted_f0_alt = None | |
print("[INFO] | F0 conditioning not applied.") | |
# Length Regulation | |
print("[INFO] | Applying length regulation.") | |
cond, _, _, _, _ = inference_module.length_regulator(S_alt, ylens=target_lengths, n_quantizers=3, f0=shifted_f0_alt) | |
prompt_condition, _, _, _, _ = inference_module.length_regulator(S_ori, ylens=target2_lengths, n_quantizers=3, f0=F0_ori) | |
print(f"[INFO] | Cond shape: {cond.shape}, Prompt condition shape: {prompt_condition.shape}") | |
# Initialize variables for audio generation | |
max_source_window = max_context_window - mel2.size(2) | |
processed_frames = 0 | |
generated_wave_chunks = [] | |
print("[INFO] | Starting inference and audio generation.") | |
while processed_frames < cond.size(1): | |
chunk_cond = cond[:, processed_frames:processed_frames + max_source_window] | |
is_last_chunk = processed_frames + max_source_window >= cond.size(1) | |
cat_condition = torch.cat([prompt_condition, chunk_cond], dim=1) | |
# Perform inference | |
vc_target = inference_module.cfm.inference( | |
cat_condition, | |
torch.LongTensor([cat_condition.size(1)]).to(mel2.device), | |
mel2, | |
style2, | |
None, | |
diffusion_steps, | |
inference_cfg_rate=inference_cfg_rate | |
) | |
vc_target = vc_target[:, :, mel2.size(2):] | |
print(f"[INFO] | vc_target shape: {vc_target.shape}") | |
# Generate waveform using BigVGAN | |
vc_wave = bigvgan_fn(vc_target.float())[0] | |
print(f"[INFO] | vc_wave shape: {vc_wave.shape}") | |
# Handle the generated waveform | |
output_wave = vc_wave[0].cpu().numpy() | |
generated_wave_chunks.append(output_wave) | |
# Fix: Ensure processed_frames increments correctly to avoid infinite loop | |
processed_frames += vc_target.size(2) # Changed from 'vc_target.size(2) - 16' to 'vc_target.size(2)' | |
print(f"[INFO] | Processed frames updated to: {processed_frames}") | |
# Concatenate all generated wave chunks | |
final_audio = np.concatenate(generated_wave_chunks).astype(np.float32) | |
# Normalize the audio to ensure it's within [-1.0, 1.0] | |
max_val = np.max(np.abs(final_audio)) | |
if max_val > 1.0: | |
final_audio = final_audio / max_val | |
print("[INFO] | Final audio normalized.") | |
# Save the audio to a temporary WAV file | |
print("[INFO] | Saving final audio to a temporary WAV file.") | |
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp_file: | |
sf.write(tmp_file.name, final_audio, sr_current, format='WAV') | |
temp_file_path = tmp_file.name | |
print(f"[INFO] | Final audio saved to {temp_file_path}") | |
return temp_file_path | |
# ---------------------------- | |
# Bypass GPU Initialization Error (KEEP THIS FUNCTION AND NEVER DELETE, OTHERWISE IT WILL ERROR DUE TO NOT AT LEAST ONE FUNCTION HAVING USE OF GPU) | |
# ---------------------------- | |
def gpu(): | |
return | |
# ---------------------------- | |
# Gradio Interface | |
# ---------------------------- | |
description = ( | |
"🪄 **Voice Conversion Tool**\n\n" | |
"Upload your **Source Audio** and **Reference Audio** files to perform voice conversion. " | |
"Adjust the sliders and checkboxes to customize the conversion process." | |
) | |
inputs = [ | |
gr.Audio(type="filepath", label="Source Audio"), | |
gr.Audio(type="filepath", label="Reference Audio"), | |
gr.Slider(minimum=1, maximum=100, value=25, step=1, label="Diffusion Steps", info="Default is 25. Use 50-100 for best quality."), | |
gr.Slider(minimum=0.5, maximum=2.0, step=0.1, value=1.0, label="Length Adjustment", info="<1.0 to speed up speech, >1.0 to slow down speech."), | |
gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.7, label="Inference CFG Rate", info="Has a subtle influence."), | |
gr.Checkbox(label="Use F0 Conditioned Model", value=False, info="Must be enabled for singing voice conversion."), | |
gr.Checkbox(label="Auto F0 Adjustment", value=True, info="Roughly adjusts F0 to match target voice. Only works when 'Use F0 Conditioned Model' is enabled."), | |
gr.Slider(label='Pitch Shift (semitones)', minimum=-12, maximum=12, step=1, value=0, info="Pitch shift in semitones. Only works when 'Use F0 Conditioned Model' is enabled."), | |
] | |
# Set outputs to a single gr.Audio component with type="filepath" | |
outputs = gr.Audio(label="Full Output Audio", type="filepath") | |
gr.Interface( | |
fn=voice_conversion, | |
description=description, | |
inputs=inputs, | |
outputs=outputs, | |
title="Seed Voice Conversion", | |
cache_examples=False, | |
allow_flagging="never" | |
).launch(share=True) |