Eempostor commited on
Commit
66ffd51
1 Parent(s): 41a7bbb

Upload 4 files

Browse files
Files changed (4) hide show
  1. lib/infer.py +211 -0
  2. lib/modules.py +578 -0
  3. lib/pipeline.py +766 -0
  4. lib/split_audio.py +91 -0
lib/infer.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from multiprocessing import cpu_count
4
+ from lib.infer.modules import VC
5
+ from split_audio import split_silence_nonsilent, adjust_audio_lengths, combine_silence_nonsilent
6
+
7
+ class Configs:
8
+ def __init__(self, device, is_half):
9
+ self.device = device
10
+ self.is_half = is_half
11
+ self.n_cpu = 0
12
+ self.gpu_name = None
13
+ self.gpu_mem = None
14
+ self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
15
+
16
+ def device_config(self) -> tuple:
17
+ if torch.cuda.is_available():
18
+ i_device = int(self.device.split(":")[-1])
19
+ self.gpu_name = torch.cuda.get_device_name(i_device)
20
+ #if (
21
+ # ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
22
+ # or "P40" in self.gpu_name.upper()
23
+ # or "1060" in self.gpu_name
24
+ # or "1070" in self.gpu_name
25
+ # or "1080" in self.gpu_name
26
+ # ):
27
+ # print("16 series/10 series P40 forced single precision")
28
+ # self.is_half = False
29
+ # for config_file in ["32k.json", "40k.json", "48k.json"]:
30
+ # with open(BASE_DIR / "src" / "configs" / config_file, "r") as f:
31
+ # strr = f.read().replace("true", "false")
32
+ # with open(BASE_DIR / "src" / "configs" / config_file, "w") as f:
33
+ # f.write(strr)
34
+ # with open(BASE_DIR / "src" / "trainset_preprocess_pipeline_print.py", "r") as f:
35
+ # strr = f.read().replace("3.7", "3.0")
36
+ # with open(BASE_DIR / "src" / "trainset_preprocess_pipeline_print.py", "w") as f:
37
+ # f.write(strr)
38
+ # else:
39
+ # self.gpu_name = None
40
+ # self.gpu_mem = int(
41
+ # torch.cuda.get_device_properties(i_device).total_memory
42
+ # / 1024
43
+ # / 1024
44
+ # / 1024
45
+ # + 0.4
46
+ # )
47
+ # if self.gpu_mem <= 4:
48
+ # with open(BASE_DIR / "src" / "trainset_preprocess_pipeline_print.py", "r") as f:
49
+ # strr = f.read().replace("3.7", "3.0")
50
+ # with open(BASE_DIR / "src" / "trainset_preprocess_pipeline_print.py", "w") as f:
51
+ # f.write(strr)
52
+ elif torch.backends.mps.is_available():
53
+ print("No supported N-card found, use MPS for inference")
54
+ self.device = "mps"
55
+ else:
56
+ print("No supported N-card found, use CPU for inference")
57
+ self.device = "cpu"
58
+ self.is_half = True
59
+
60
+ if self.n_cpu == 0:
61
+ self.n_cpu = cpu_count()
62
+
63
+ if self.is_half:
64
+ # 6G memory config
65
+ x_pad = 3
66
+ x_query = 10
67
+ x_center = 60
68
+ x_max = 65
69
+ else:
70
+ # 5G memory config
71
+ x_pad = 1
72
+ x_query = 6
73
+ x_center = 38
74
+ x_max = 41
75
+
76
+ if self.gpu_mem != None and self.gpu_mem <= 4:
77
+ x_pad = 1
78
+ x_query = 5
79
+ x_center = 30
80
+ x_max = 32
81
+
82
+ return x_pad, x_query, x_center, x_max
83
+
84
+ def get_model(voice_model):
85
+ model_dir = os.path.join(models_dir, voice_model)
86
+ model_filename, index_filename = None, None
87
+ for file in os.listdir(model_dir):
88
+ ext = os.path.splitext(file)[1]
89
+ if ext == '.pth':
90
+ model_filename = file
91
+ if ext == '.index':
92
+ index_filename = file
93
+
94
+ if model_filename is None:
95
+ print(f'No model file exists in {models_dir}.')
96
+ return None, None
97
+
98
+ return os.path.join(model_dir, model_filename), os.path.join(model_dir, index_filename) if index_filename else ''
99
+
100
+ def infer_audio(
101
+ model_name,
102
+ audio_path,
103
+ f0_change=0,
104
+ f0_method="rmvpe+",
105
+ min_pitch="50",
106
+ max_pitch="1100",
107
+ crepe_hop_length=128,
108
+ index_rate=0.75,
109
+ filter_radius=3,
110
+ rms_mix_rate=0.25,
111
+ protect=0.33,
112
+ split_infer=False,
113
+ min_silence=500,
114
+ silence_threshold=-50,
115
+ seek_step=1,
116
+ keep_silence=100,
117
+ do_formant=False,
118
+ quefrency=0,
119
+ timbre=1,
120
+ f0_autotune=False,
121
+ audio_format="wav",
122
+ resample_sr=0
123
+ ):
124
+ configs = Configs('cuda:0', True)
125
+ vc = VC(configs)
126
+ pth_path, index_path = get_model(model_name)
127
+ vc_data = vc.get_vc(pth_path, protect, 0.5)
128
+
129
+ if split_infer:
130
+ inferred_files = []
131
+ temp_dir = os.path.join(main_dir, "seperate", "temp")
132
+ os.makedirs(temp_dir, exist_ok=True)
133
+ print("Splitting audio to silence and nonsilent segments.")
134
+ silence_files, nonsilent_files = split_silence_nonsilent(audio_path, min_silence, silence_threshold, seek_step, keep_silence)
135
+ print(f"Total silence segments: {len(silence_files)}.\nTotal nonsilent segments: {len(nonsilent_files)}.")
136
+ for i, nonsilent_file in enumerate(nonsilent_files):
137
+ print(f"Inferring nonsilent audio {i+1}")
138
+ inference_info, audio_data, output_path = vc.vc_single(
139
+ 0,
140
+ audio_path,
141
+ f0_up_key,
142
+ f0_method,
143
+ index_path,
144
+ index_path,
145
+ index_rate,
146
+ filter_radius,
147
+ resample_sr,
148
+ rms_mix_rate,
149
+ protect,
150
+ audio_format,
151
+ crepe_hop_length,
152
+ do_formant,
153
+ quefrency,
154
+ timbre,
155
+ f0_min_pitch,
156
+ f0_max_pitch,
157
+ f0_autotune
158
+ )
159
+ if inference_info[0] == "Success.":
160
+ print("Inference ran successfully.")
161
+ print(inference_info[1])
162
+ print("Times:\nnpy: %.2fs f0: %.2fs infer: %.2fs\nTotal time: %.2fs" % (inference_info[2]*))
163
+ else:
164
+ print(f"An error occurred while processing.\n{inference_info[0]}")
165
+ return None
166
+ shutil.move(output_path, temp_dir)
167
+ inferred_files.append(os.path.join(temp_dir, os.path.basename(output_path)))
168
+ print("Adjusting inferred audio lengths.")
169
+ adjusted_inferred_files = adjust_audio_lengths(nonsilent_files, inferred_files)
170
+ print("Combining silence and inferred audios.")
171
+ output_count = 1
172
+ while True:
173
+ output_path = os.path.join(os.getcwd(), "output", f"{os.path.splitext(os.path.basename(audio_path))[0]}{model_name}{f0_method.capitalize()}_{output_count}.{audio_format}")
174
+ if not os.path.exists(output_path):
175
+ break
176
+ output_count += 1
177
+ inferred_audio = combine_silence_nonsilent(silence_files, adjusted_inferred_files, keep_silence, output_path)
178
+ shutil.rmtree(os.path.join(main_dir, "seperate", "temp"))
179
+ else:
180
+ inference_info, audio_data, output_path = vc.vc_single(
181
+ 0,
182
+ audio_path,
183
+ f0_up_key,
184
+ f0_method,
185
+ index_path,
186
+ index_path,
187
+ index_rate,
188
+ filter_radius,
189
+ resample_sr,
190
+ rms_mix_rate,
191
+ protect,
192
+ audio_format,
193
+ crepe_hop_length,
194
+ do_formant,
195
+ quefrency,
196
+ timbre,
197
+ f0_min_pitch,
198
+ f0_max_pitch,
199
+ f0_autotune
200
+ )
201
+ if inference_info[0] == "Success.":
202
+ print("Inference ran successfully.")
203
+ print(inference_info[1])
204
+ print("Times:\nnpy: %.2fs f0: %.2fs infer: %.2fs\nTotal time: %.2fs" % (inference_info[2]*))
205
+ else:
206
+ print(f"An error occurred while processing.\n{inference_info[0]}")
207
+ return None
208
+
209
+ del configs, vc
210
+ gc.collect()
211
+ return output_path
lib/modules.py ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, sys
2
+ import traceback
3
+ import logging
4
+ now_dir = os.getcwd()
5
+ sys.path.append(now_dir)
6
+ logger = logging.getLogger(__name__)
7
+ import numpy as np
8
+ import soundfile as sf
9
+ import torch
10
+ from io import BytesIO
11
+ from lib.infer_libs.audio import load_audio
12
+ from lib.infer_libs.audio import wav2
13
+ from lib.infer_libs.infer_pack.models import (
14
+ SynthesizerTrnMs256NSFsid,
15
+ SynthesizerTrnMs256NSFsid_nono,
16
+ SynthesizerTrnMs768NSFsid,
17
+ SynthesizerTrnMs768NSFsid_nono,
18
+ )
19
+ from lib.pipeline import Pipeline
20
+ import time
21
+ import glob
22
+ from shutil import move
23
+ from fairseq import checkpoint_utils
24
+
25
+ sup_audioext = {
26
+ "wav",
27
+ "mp3",
28
+ "flac",
29
+ "ogg",
30
+ "opus",
31
+ "m4a",
32
+ "mp4",
33
+ "aac",
34
+ "alac",
35
+ "wma",
36
+ "aiff",
37
+ "webm",
38
+ "ac3",
39
+ }
40
+
41
+ def note_to_hz(note_name):
42
+ try:
43
+ SEMITONES = {'C': -9, 'C#': -8, 'D': -7, 'D#': -6, 'E': -5, 'F': -4, 'F#': -3, 'G': -2, 'G#': -1, 'A': 0, 'A#': 1, 'B': 2}
44
+ pitch_class, octave = note_name[:-1], int(note_name[-1])
45
+ semitone = SEMITONES[pitch_class]
46
+ note_number = 12 * (octave - 4) + semitone
47
+ frequency = 440.0 * (2.0 ** (1.0/12)) ** note_number
48
+ return frequency
49
+ except:
50
+ return None
51
+
52
+ def load_hubert(hubert_model_path, config):
53
+ models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
54
+ [hubert_model_path],
55
+ suffix="",
56
+ )
57
+ hubert_model = models[0]
58
+ hubert_model = hubert_model.to(config.device)
59
+ if config.is_half:
60
+ hubert_model = hubert_model.half()
61
+ else:
62
+ hubert_model = hubert_model.float()
63
+ return hubert_model.eval()
64
+
65
+ class VC:
66
+ def __init__(self, config):
67
+ self.n_spk = None
68
+ self.tgt_sr = None
69
+ self.net_g = None
70
+ self.pipeline = None
71
+ self.cpt = None
72
+ self.version = None
73
+ self.if_f0 = None
74
+ self.version = None
75
+ self.hubert_model = None
76
+
77
+ self.config = config
78
+
79
+ def get_vc(self, sid, *to_return_protect):
80
+ logger.info("Get sid: " + sid)
81
+
82
+ to_return_protect0 = {
83
+ "visible": self.if_f0 != 0,
84
+ "value": to_return_protect[0]
85
+ if self.if_f0 != 0 and to_return_protect
86
+ else 0.5,
87
+ "__type__": "update",
88
+ }
89
+ to_return_protect1 = {
90
+ "visible": self.if_f0 != 0,
91
+ "value": to_return_protect[1]
92
+ if self.if_f0 != 0 and to_return_protect
93
+ else 0.33,
94
+ "__type__": "update",
95
+ }
96
+
97
+ if sid == "" or sid == []:
98
+ if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
99
+ logger.info("Clean model cache")
100
+ del (
101
+ self.net_g,
102
+ self.n_spk,
103
+ self.vc,
104
+ self.hubert_model,
105
+ self.tgt_sr,
106
+ ) # ,cpt
107
+ self.hubert_model = (
108
+ self.net_g
109
+ ) = self.n_spk = self.vc = self.hubert_model = self.tgt_sr = None
110
+ if torch.cuda.is_available():
111
+ torch.cuda.empty_cache()
112
+ ###楼下不这么折腾清理不干净
113
+ self.if_f0 = self.cpt.get("f0", 1)
114
+ self.version = self.cpt.get("version", "v1")
115
+ if self.version == "v1":
116
+ if self.if_f0 == 1:
117
+ self.net_g = SynthesizerTrnMs256NSFsid(
118
+ *self.cpt["config"], is_half=self.config.is_half
119
+ )
120
+ else:
121
+ self.net_g = SynthesizerTrnMs256NSFsid_nono(*self.cpt["config"])
122
+ elif self.version == "v2":
123
+ if self.if_f0 == 1:
124
+ self.net_g = SynthesizerTrnMs768NSFsid(
125
+ *self.cpt["config"], is_half=self.config.is_half
126
+ )
127
+ else:
128
+ self.net_g = SynthesizerTrnMs768NSFsid_nono(*self.cpt["config"])
129
+ del self.net_g, self.cpt
130
+ if torch.cuda.is_available():
131
+ torch.cuda.empty_cache()
132
+ return (
133
+ {"visible": False, "__type__": "update"},
134
+ {
135
+ "visible": True,
136
+ "value": to_return_protect0,
137
+ "__type__": "update",
138
+ },
139
+ {
140
+ "visible": True,
141
+ "value": to_return_protect1,
142
+ "__type__": "update",
143
+ },
144
+ "",
145
+ "",
146
+ )
147
+ #person = f'{os.getenv("weight_root")}/{sid}'
148
+ person = f'{sid}'
149
+ #logger.info(f"Loading: {person}")
150
+ logger.info(f"Loading...")
151
+ self.cpt = torch.load(person, map_location="cpu")
152
+ self.tgt_sr = self.cpt["config"][-1]
153
+ self.cpt["config"][-3] = self.cpt["weight"]["emb_g.weight"].shape[0] # n_spk
154
+ self.if_f0 = self.cpt.get("f0", 1)
155
+ self.version = self.cpt.get("version", "v1")
156
+
157
+ synthesizer_class = {
158
+ ("v1", 1): SynthesizerTrnMs256NSFsid,
159
+ ("v1", 0): SynthesizerTrnMs256NSFsid_nono,
160
+ ("v2", 1): SynthesizerTrnMs768NSFsid,
161
+ ("v2", 0): SynthesizerTrnMs768NSFsid_nono,
162
+ }
163
+
164
+ self.net_g = synthesizer_class.get(
165
+ (self.version, self.if_f0), SynthesizerTrnMs256NSFsid
166
+ )(*self.cpt["config"], is_half=self.config.is_half)
167
+
168
+ del self.net_g.enc_q
169
+
170
+ self.net_g.load_state_dict(self.cpt["weight"], strict=False)
171
+ self.net_g.eval().to(self.config.device)
172
+ if self.config.is_half:
173
+ self.net_g = self.net_g.half()
174
+ else:
175
+ self.net_g = self.net_g.float()
176
+
177
+ self.pipeline = Pipeline(self.tgt_sr, self.config)
178
+ n_spk = self.cpt["config"][-3]
179
+ #index = {"value": get_index_path_from_model(sid), "__type__": "update"}
180
+ #logger.info("Select index: " + index["value"])
181
+
182
+ return (
183
+ (
184
+ {"visible": False, "maximum": n_spk, "__type__": "update"},
185
+ to_return_protect0,
186
+ to_return_protect1
187
+ )
188
+ if to_return_protect
189
+ else {"visible": False, "maximum": n_spk, "__type__": "update"}
190
+ )
191
+
192
+ def vc_single_dont_save(
193
+ self,
194
+ sid,
195
+ input_audio_path1,
196
+ f0_up_key,
197
+ f0_method,
198
+ file_index,
199
+ file_index2,
200
+ index_rate,
201
+ filter_radius,
202
+ resample_sr,
203
+ rms_mix_rate,
204
+ protect,
205
+ crepe_hop_length,
206
+ do_formant,
207
+ quefrency,
208
+ timbre,
209
+ f0_min,
210
+ f0_max,
211
+ f0_autotune,
212
+ hubert_model_path = "assets/hubert/hubert_base.pt"
213
+ ):
214
+ """
215
+ Performs inference without saving
216
+
217
+ Parameters:
218
+ - sid (int)
219
+ - input_audio_path1 (str)
220
+ - f0_up_key (int)
221
+ - f0_method (str)
222
+ - file_index (str)
223
+ - file_index2 (str)
224
+ - index_rate (float)
225
+ - filter_radius (int)
226
+ - resample_sr (int)
227
+ - rms_mix_rate (float)
228
+ - protect (float)
229
+ - crepe_hop_length (int)
230
+ - do_formant (bool)
231
+ - quefrency (float)
232
+ - timbre (float)
233
+ - f0_min (str)
234
+ - f0_max (str)
235
+ - f0_autotune (bool)
236
+ - hubert_model_path (str)
237
+
238
+ Returns:
239
+ Tuple(Tuple(status, index_info, times), Tuple(sr, data)):
240
+ - Tuple(status, index_info, times):
241
+ - status (str): either "Success." or an error
242
+ - index_info (str): index path if used
243
+ - times (list): [npy_time, f0_time, infer_time, total_time]
244
+ - Tuple(sr, data): Audio data results.
245
+ """
246
+ global total_time
247
+ total_time = 0
248
+ start_time = time.time()
249
+
250
+ if not input_audio_path1:
251
+ return "You need to upload an audio", None
252
+
253
+ if not os.path.exists(input_audio_path1):
254
+ return "Audio was not properly selected or doesn't exist", None
255
+
256
+ f0_up_key = int(f0_up_key)
257
+ if not isdigit(f0_min):
258
+ f0_min = note_to_hz(f0_min)
259
+ if f0_min:
260
+ print(f"Converted Min pitch: freq - {f0_min}")
261
+ else:
262
+ f0_min = 50
263
+ print("Invalid minimum pitch note. Defaulting to 50hz.")
264
+ else:
265
+ f0_min = float(f0_min)
266
+ if not isdigit(f0_max):
267
+ f0_max = note_to_hz(f0_max)
268
+ if f0_max:
269
+ print(f"Converted Max pitch: freq - {f0_max}")
270
+ else:
271
+ f0_max = 1100
272
+ print("Invalid maximum pitch note. Defaulting to 1100hz.")
273
+ else:
274
+ f0_max = float(f0_max)
275
+
276
+ try:
277
+ print(f"Attempting to load {input_audio_path1}....")
278
+ audio = load_audio(file=input_audio_path1,
279
+ sr=16000,
280
+ DoFormant=do_formant,
281
+ Quefrency=quefrency,
282
+ Timbre=timbre)
283
+
284
+ audio_max = np.abs(audio).max() / 0.95
285
+ if audio_max > 1:
286
+ audio /= audio_max
287
+ times = [0, 0, 0]
288
+
289
+ if self.hubert_model is None:
290
+ self.hubert_model = load_hubert(hubert_model_path, self.config)
291
+
292
+ try:
293
+ self.if_f0 = self.cpt.get("f0", 1)
294
+ except NameError:
295
+ message = "Model was not properly selected"
296
+ print(message)
297
+ return message, None
298
+
299
+ if file_index and not file_index == "" and isinstance(file_index, str):
300
+ file_index = file_index.strip(" ") \
301
+ .strip('"') \
302
+ .strip("\n") \
303
+ .strip('"') \
304
+ .strip(" ") \
305
+ .replace("trained", "added")
306
+ elif file_index2:
307
+ file_index = file_index2
308
+ else:
309
+ file_index = ""
310
+
311
+ try:
312
+ audio_opt = self.pipeline.pipeline(
313
+ self.hubert_model,
314
+ self.net_g,
315
+ sid,
316
+ audio,
317
+ input_audio_path1,
318
+ times,
319
+ f0_up_key,
320
+ f0_method,
321
+ file_index,
322
+ index_rate,
323
+ self.if_f0,
324
+ filter_radius,
325
+ self.tgt_sr,
326
+ resample_sr,
327
+ rms_mix_rate,
328
+ self.version,
329
+ protect,
330
+ crepe_hop_length,
331
+ f0_autotune,
332
+ f0_min=f0_min,
333
+ f0_max=f0_max
334
+ )
335
+ except AssertionError:
336
+ message = "Mismatching index version detected (v1 with v2, or v2 with v1)."
337
+ print(message)
338
+ return message, None
339
+ except NameError:
340
+ message = "RVC libraries are still loading. Please try again in a few seconds."
341
+ print(message)
342
+ return message, None
343
+
344
+ if self.tgt_sr != resample_sr >= 16000:
345
+ tgt_sr = resample_sr
346
+ else:
347
+ tgt_sr = self.tgt_sr
348
+ index_info = (
349
+ "Index: %s." % file_index
350
+ if isinstance(file_index, str) and os.path.exists(file_index)
351
+ else "Index not used."
352
+ )
353
+ end_time = time.time()
354
+ total_time = end_time - start_time
355
+ times.append(total_time)
356
+ return (
357
+ ("Success.", index_info, times),
358
+ (tgt_sr, audio_opt),
359
+ )
360
+ except:
361
+ info = traceback.format_exc()
362
+ logger.warn(info)
363
+ return (
364
+ (info, None, [None, None, None, None]),
365
+ (None, None)
366
+ )
367
+
368
+ def vc_single(
369
+ self,
370
+ sid,
371
+ input_audio_path1,
372
+ f0_up_key,
373
+ f0_file,
374
+ f0_method,
375
+ file_index,
376
+ file_index2,
377
+ index_rate,
378
+ filter_radius,
379
+ resample_sr,
380
+ rms_mix_rate,
381
+ protect,
382
+ format1,
383
+ crepe_hop_length,
384
+ do_formant,
385
+ quefrency,
386
+ timbre,
387
+ f0_min,
388
+ f0_max,
389
+ f0_autotune,
390
+ hubert_model_path = "assets/hubert/hubert_base.pt"
391
+ ):
392
+ """
393
+ Performs inference with saving
394
+
395
+ Parameters:
396
+ - sid (int)
397
+ - input_audio_path1 (str)
398
+ - f0_up_key (int)
399
+ - f0_method (str)
400
+ - file_index (str)
401
+ - file_index2 (str)
402
+ - index_rate (float)
403
+ - filter_radius (int)
404
+ - resample_sr (int)
405
+ - rms_mix_rate (float)
406
+ - protect (float)
407
+ - format1 (str)
408
+ - crepe_hop_length (int)
409
+ - do_formant (bool)
410
+ - quefrency (float)
411
+ - timbre (float)
412
+ - f0_min (str)
413
+ - f0_max (str)
414
+ - f0_autotune (bool)
415
+ - hubert_model_path (str)
416
+
417
+ Returns:
418
+ Tuple(Tuple(status, index_info, times), Tuple(sr, data), output_path):
419
+ - Tuple(status, index_info, times):
420
+ - status (str): either "Success." or an error
421
+ - index_info (str): index path if used
422
+ - times (list): [npy_time, f0_time, infer_time, total_time]
423
+ - Tuple(sr, data): Audio data results.
424
+ - output_path (str): Audio results path
425
+ """
426
+ global total_time
427
+ total_time = 0
428
+ start_time = time.time()
429
+
430
+ if not input_audio_path1:
431
+ return "You need to upload an audio", None, None
432
+
433
+ if not os.path.exists(input_audio_path1):
434
+ return "Audio was not properly selected or doesn't exist", None, None
435
+
436
+ f0_up_key = int(f0_up_key)
437
+ if not isdigit(f0_min):
438
+ f0_min = note_to_hz(f0_min)
439
+ if f0_min:
440
+ print(f"Converted Min pitch: freq - {f0_min}")
441
+ else:
442
+ f0_min = 50
443
+ print("Invalid minimum pitch note. Defaulting to 50hz.")
444
+ else:
445
+ f0_min = float(f0_min)
446
+ if not isdigit(f0_max):
447
+ f0_max = note_to_hz(f0_max)
448
+ if f0_max:
449
+ print(f"Converted Max pitch: freq - {f0_max}")
450
+ else:
451
+ f0_max = 1100
452
+ print("Invalid maximum pitch note. Defaulting to 1100hz.")
453
+ else:
454
+ f0_max = float(f0_max)
455
+
456
+ try:
457
+ print(f"Attempting to load {input_audio_path1}...")
458
+ audio = load_audio(file=input_audio_path1,
459
+ sr=16000,
460
+ DoFormant=do_formant,
461
+ Quefrency=quefrency,
462
+ Timbre=timbre)
463
+
464
+ audio_max = np.abs(audio).max() / 0.95
465
+ if audio_max > 1:
466
+ audio /= audio_max
467
+ times = [0, 0, 0]
468
+
469
+ if self.hubert_model is None:
470
+ self.hubert_model = load_hubert(hubert_model_path, self.config)
471
+
472
+ try:
473
+ self.if_f0 = self.cpt.get("f0", 1)
474
+ except NameError:
475
+ message = "Model was not properly selected"
476
+ print(message)
477
+ return message, None
478
+ if file_index and not file_index == "" and isinstance(file_index, str):
479
+ file_index = file_index.strip(" ") \
480
+ .strip('"') \
481
+ .strip("\n") \
482
+ .strip('"') \
483
+ .strip(" ") \
484
+ .replace("trained", "added")
485
+ elif file_index2:
486
+ file_index = file_index2
487
+ else:
488
+ file_index = ""
489
+
490
+ try:
491
+ audio_opt = self.pipeline.pipeline(
492
+ self.hubert_model,
493
+ self.net_g,
494
+ sid,
495
+ audio,
496
+ input_audio_path1,
497
+ times,
498
+ f0_up_key,
499
+ f0_method,
500
+ file_index,
501
+ index_rate,
502
+ self.if_f0,
503
+ filter_radius,
504
+ self.tgt_sr,
505
+ resample_sr,
506
+ rms_mix_rate,
507
+ self.version,
508
+ protect,
509
+ crepe_hop_length,
510
+ f0_autotune,
511
+ f0_min=f0_min,
512
+ f0_max=f0_max
513
+ )
514
+ except AssertionError:
515
+ message = "Mismatching index version detected (v1 with v2, or v2 with v1)."
516
+ print(message)
517
+ return message, None
518
+ except NameError:
519
+ message = "RVC libraries are still loading. Please try again in a few seconds."
520
+ print(message)
521
+ return message, None
522
+
523
+ if self.tgt_sr != resample_sr >= 16000:
524
+ tgt_sr = resample_sr
525
+ else:
526
+ tgt_sr = self.tgt_sr
527
+ index_info = (
528
+ "Index: %s." % file_index
529
+ if isinstance(file_index, str) and os.path.exists(file_index)
530
+ else "Index not used."
531
+ )
532
+
533
+ opt_root = os.path.join(os.getcwd(), "output")
534
+ os.makedirs(opt_root, exist_ok=True)
535
+ output_count = 1
536
+
537
+ while True:
538
+ opt_filename = f"{os.path.splitext(os.path.basename(input_audio_path1))[0]}{os.path.basename(os.path.dirname(file_index))}{f0_method.capitalize()}_{output_count}.{format1}"
539
+ current_output_path = os.path.join(opt_root, opt_filename)
540
+ if not os.path.exists(current_output_path):
541
+ break
542
+ output_count += 1
543
+ try:
544
+ if format1 in ["wav", "flac"]:
545
+ sf.write(
546
+ current_output_path,
547
+ audio_opt,
548
+ self.tgt_sr,
549
+ )
550
+ else:
551
+ with BytesIO() as wavf:
552
+ sf.write(
553
+ wavf,
554
+ audio_opt,
555
+ self.tgt_sr,
556
+ format="wav"
557
+ )
558
+ wavf.seek(0, 0)
559
+ with open(current_output_path, "wb") as outf:
560
+ wav2(wavf, outf, format1)
561
+ except:
562
+ info = traceback.format_exc()
563
+ end_time = time.time()
564
+ total_time = end_time - start_time
565
+ times.append(total_time)
566
+ return (
567
+ ("Success.", index_info, times),
568
+ (tgt_sr, audio_opt),
569
+ current_output_path
570
+ )
571
+ except:
572
+ info = traceback.format_exc()
573
+ logger.warn(info)
574
+ return (
575
+ (info, None, [None, None, None, None]),
576
+ (None, None),
577
+ None
578
+ )
lib/pipeline.py ADDED
@@ -0,0 +1,766 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import gc
4
+ import traceback
5
+ import logging
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+ from functools import lru_cache
10
+ from time import time as ttime
11
+ from torch import Tensor
12
+ import faiss
13
+ import librosa
14
+ import numpy as np
15
+ import parselmouth
16
+ import pyworld
17
+ import torch.nn.functional as F
18
+ from scipy import signal
19
+ from tqdm import tqdm
20
+
21
+ import random
22
+ now_dir = os.getcwd()
23
+ sys.path.append(now_dir)
24
+ import re
25
+ from functools import partial
26
+ bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
27
+
28
+ input_audio_path2wav = {}
29
+ import torchcrepe # Fork Feature. Crepe algo for training and preprocess
30
+ import torch
31
+ from lib.infer_libs.rmvpe import RMVPE
32
+ from lib.infer_libs.fcpe import FCPE
33
+
34
+ @lru_cache
35
+ def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period):
36
+ audio = input_audio_path2wav[input_audio_path]
37
+ f0, t = pyworld.harvest(
38
+ audio,
39
+ fs=fs,
40
+ f0_ceil=f0max,
41
+ f0_floor=f0min,
42
+ frame_period=frame_period,
43
+ )
44
+ f0 = pyworld.stonemask(audio, f0, t, fs)
45
+ return f0
46
+
47
+
48
+ def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比
49
+ # print(data1.max(),data2.max())
50
+ rms1 = librosa.feature.rms(
51
+ y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2
52
+ ) # 每半秒一个点
53
+ rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2)
54
+ rms1 = torch.from_numpy(rms1)
55
+ rms1 = F.interpolate(
56
+ rms1.unsqueeze(0), size=data2.shape[0], mode="linear"
57
+ ).squeeze()
58
+ rms2 = torch.from_numpy(rms2)
59
+ rms2 = F.interpolate(
60
+ rms2.unsqueeze(0), size=data2.shape[0], mode="linear"
61
+ ).squeeze()
62
+ rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6)
63
+ data2 *= (
64
+ torch.pow(rms1, torch.tensor(1 - rate))
65
+ * torch.pow(rms2, torch.tensor(rate - 1))
66
+ ).numpy()
67
+ return data2
68
+
69
+
70
+ class Pipeline(object):
71
+ def __init__(self, tgt_sr, config):
72
+ self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (
73
+ config.x_pad,
74
+ config.x_query,
75
+ config.x_center,
76
+ config.x_max,
77
+ config.is_half,
78
+ )
79
+ self.sr = 16000 # hubert输入采样率
80
+ self.window = 160 # 每帧点数
81
+ self.t_pad = self.sr * self.x_pad # 每条前后pad时间
82
+ self.t_pad_tgt = tgt_sr * self.x_pad
83
+ self.t_pad2 = self.t_pad * 2
84
+ self.t_query = self.sr * self.x_query # 查询切点前后查询时间
85
+ self.t_center = self.sr * self.x_center # 查询切点位置
86
+ self.t_max = self.sr * self.x_max # 免查询时长阈值
87
+ self.device = config.device
88
+ self.model_rmvpe = RMVPE("%s/rmvpe.pt" % os.environ["rmvpe_root"], is_half=self.is_half, device=self.device)
89
+
90
+ self.note_dict = [
91
+ 65.41, 69.30, 73.42, 77.78, 82.41, 87.31,
92
+ 92.50, 98.00, 103.83, 110.00, 116.54, 123.47,
93
+ 130.81, 138.59, 146.83, 155.56, 164.81, 174.61,
94
+ 185.00, 196.00, 207.65, 220.00, 233.08, 246.94,
95
+ 261.63, 277.18, 293.66, 311.13, 329.63, 349.23,
96
+ 369.99, 392.00, 415.30, 440.00, 466.16, 493.88,
97
+ 523.25, 554.37, 587.33, 622.25, 659.25, 698.46,
98
+ 739.99, 783.99, 830.61, 880.00, 932.33, 987.77,
99
+ 1046.50, 1108.73, 1174.66, 1244.51, 1318.51, 1396.91,
100
+ 1479.98, 1567.98, 1661.22, 1760.00, 1864.66, 1975.53,
101
+ 2093.00, 2217.46, 2349.32, 2489.02, 2637.02, 2793.83,
102
+ 2959.96, 3135.96, 3322.44, 3520.00, 3729.31, 3951.07
103
+ ]
104
+
105
+ # Fork Feature: Get the best torch device to use for f0 algorithms that require a torch device. Will return the type (torch.device)
106
+ def get_optimal_torch_device(self, index: int = 0) -> torch.device:
107
+ if torch.cuda.is_available():
108
+ return torch.device(
109
+ f"cuda:{index % torch.cuda.device_count()}"
110
+ ) # Very fast
111
+ elif torch.backends.mps.is_available():
112
+ return torch.device("mps")
113
+ return torch.device("cpu")
114
+
115
+ # Fork Feature: Compute f0 with the crepe method
116
+ def get_f0_crepe_computation(
117
+ self,
118
+ x,
119
+ f0_min,
120
+ f0_max,
121
+ p_len,
122
+ *args, # 512 before. Hop length changes the speed that the voice jumps to a different dramatic pitch. Lower hop lengths means more pitch accuracy but longer inference time.
123
+ **kwargs, # Either use crepe-tiny "tiny" or crepe "full". Default is full
124
+ ):
125
+ x = x.astype(
126
+ np.float32
127
+ ) # fixes the F.conv2D exception. We needed to convert double to float.
128
+ x /= np.quantile(np.abs(x), 0.999)
129
+ torch_device = self.get_optimal_torch_device()
130
+ audio = torch.from_numpy(x).to(torch_device, copy=True)
131
+ audio = torch.unsqueeze(audio, dim=0)
132
+ if audio.ndim == 2 and audio.shape[0] > 1:
133
+ audio = torch.mean(audio, dim=0, keepdim=True).detach()
134
+ audio = audio.detach()
135
+ hop_length = kwargs.get('crepe_hop_length', 160)
136
+ model = kwargs.get('model', 'full')
137
+ print("Initiating prediction with a crepe_hop_length of: " + str(hop_length))
138
+ pitch: Tensor = torchcrepe.predict(
139
+ audio,
140
+ self.sr,
141
+ hop_length,
142
+ f0_min,
143
+ f0_max,
144
+ model,
145
+ batch_size=hop_length * 2,
146
+ device=torch_device,
147
+ pad=True,
148
+ )
149
+ p_len = p_len or x.shape[0] // hop_length
150
+ # Resize the pitch for final f0
151
+ source = np.array(pitch.squeeze(0).cpu().float().numpy())
152
+ source[source < 0.001] = np.nan
153
+ target = np.interp(
154
+ np.arange(0, len(source) * p_len, len(source)) / p_len,
155
+ np.arange(0, len(source)),
156
+ source,
157
+ )
158
+ f0 = np.nan_to_num(target)
159
+ return f0 # Resized f0
160
+
161
+ def get_f0_official_crepe_computation(
162
+ self,
163
+ x,
164
+ f0_min,
165
+ f0_max,
166
+ *args,
167
+ **kwargs
168
+ ):
169
+ # Pick a batch size that doesn't cause memory errors on your gpu
170
+ batch_size = 512
171
+ # Compute pitch using first gpu
172
+ audio = torch.tensor(np.copy(x))[None].float()
173
+ model = kwargs.get('model', 'full')
174
+ f0, pd = torchcrepe.predict(
175
+ audio,
176
+ self.sr,
177
+ self.window,
178
+ f0_min,
179
+ f0_max,
180
+ model,
181
+ batch_size=batch_size,
182
+ device=self.device,
183
+ return_periodicity=True,
184
+ )
185
+ pd = torchcrepe.filter.median(pd, 3)
186
+ f0 = torchcrepe.filter.mean(f0, 3)
187
+ f0[pd < 0.1] = 0
188
+ f0 = f0[0].cpu().numpy()
189
+ return f0
190
+
191
+ # Fork Feature: Compute pYIN f0 method
192
+ def get_f0_pyin_computation(self, x, f0_min, f0_max):
193
+ y, sr = librosa.load(x, sr=self.sr, mono=True)
194
+ f0, _, _ = librosa.pyin(y, fmin=f0_min, fmax=f0_max, sr=self.sr)
195
+ f0 = f0[1:] # Get rid of extra first frame
196
+ return f0
197
+
198
+ def get_pm(self, x, p_len, *args, **kwargs):
199
+ f0 = parselmouth.Sound(x, self.sr).to_pitch_ac(
200
+ time_step=160 / 16000,
201
+ voicing_threshold=0.6,
202
+ pitch_floor=kwargs.get('f0_min'),
203
+ pitch_ceiling=kwargs.get('f0_max'),
204
+ ).selected_array["frequency"]
205
+
206
+ return np.pad(
207
+ f0,
208
+ [[max(0, (p_len - len(f0) + 1) // 2), max(0, p_len - len(f0) - (p_len - len(f0) + 1) // 2)]],
209
+ mode="constant"
210
+ )
211
+
212
+ def get_harvest(self, x, *args, **kwargs):
213
+ f0_spectral = pyworld.harvest(
214
+ x.astype(np.double),
215
+ fs=self.sr,
216
+ f0_ceil=kwargs.get('f0_max'),
217
+ f0_floor=kwargs.get('f0_min'),
218
+ frame_period=1000 * kwargs.get('hop_length', 160) / self.sr,
219
+ )
220
+ return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.sr)
221
+
222
+ def get_dio(self, x, *args, **kwargs):
223
+ f0_spectral = pyworld.dio(
224
+ x.astype(np.double),
225
+ fs=self.sr,
226
+ f0_ceil=kwargs.get('f0_max'),
227
+ f0_floor=kwargs.get('f0_min'),
228
+ frame_period=1000 * kwargs.get('hop_length', 160) / self.sr,
229
+ )
230
+ return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.sr)
231
+
232
+
233
+ def get_rmvpe(self, x, *args, **kwargs):
234
+ if not hasattr(self, "model_rmvpe"):
235
+ from lib.infer.infer_libs.rmvpe import RMVPE
236
+
237
+ logger.info(
238
+ "Loading rmvpe model,%s" % "%s/rmvpe.pt" % os.environ["rmvpe_root"]
239
+ )
240
+ self.model_rmvpe = RMVPE(
241
+ "%s/rmvpe.pt" % os.environ["rmvpe_root"],
242
+ is_half=self.is_half,
243
+ device=self.device,
244
+ )
245
+ f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)
246
+
247
+ if "privateuseone" in str(self.device): # clean ortruntime memory
248
+ del self.model_rmvpe.model
249
+ del self.model_rmvpe
250
+ logger.info("Cleaning ortruntime memory")
251
+
252
+ return f0
253
+
254
+
255
+ def get_pitch_dependant_rmvpe(self, x, f0_min=1, f0_max=40000, *args, **kwargs):
256
+ if not hasattr(self, "model_rmvpe"):
257
+ from lib.infer.infer_libs.rmvpe import RMVPE
258
+
259
+ logger.info(
260
+ "Loading rmvpe model,%s" % "%s/rmvpe.pt" % os.environ["rmvpe_root"]
261
+ )
262
+ self.model_rmvpe = RMVPE(
263
+ "%s/rmvpe.pt" % os.environ["rmvpe_root"],
264
+ is_half=self.is_half,
265
+ device=self.device,
266
+ )
267
+ f0 = self.model_rmvpe.infer_from_audio_with_pitch(x, thred=0.03, f0_min=f0_min, f0_max=f0_max)
268
+ if "privateuseone" in str(self.device): # clean ortruntime memory
269
+ del self.model_rmvpe.model
270
+ del self.model_rmvpe
271
+ logger.info("Cleaning ortruntime memory")
272
+
273
+ return f0
274
+
275
+ def get_fcpe(self, x, f0_min, f0_max, p_len, *args, **kwargs):
276
+ self.model_fcpe = FCPEF0Predictor("%s/fcpe.pt" % os.environ["fcpe_root"], f0_min=f0_min, f0_max=f0_max, dtype=torch.float32, device=self.device, sampling_rate=self.sr, threshold=0.03)
277
+ f0 = self.model_fcpe.compute_f0(x, p_len=p_len)
278
+ del self.model_fcpe
279
+ gc.collect()
280
+ return f0
281
+
282
+ def autotune_f0(self, f0):
283
+ autotuned_f0 = []
284
+ for freq in f0:
285
+ closest_notes = [x for x in self.note_dict if abs(x - freq) == min(abs(n - freq) for n in self.note_dict)]
286
+ autotuned_f0.append(random.choice(closest_notes))
287
+ return np.array(autotuned_f0, np.float64)
288
+
289
+
290
+ # Fork Feature: Acquire median hybrid f0 estimation calculation
291
+ def get_f0_hybrid_computation(
292
+ self,
293
+ methods_str,
294
+ input_audio_path,
295
+ x,
296
+ f0_min,
297
+ f0_max,
298
+ p_len,
299
+ filter_radius,
300
+ crepe_hop_length,
301
+ time_step,
302
+ ):
303
+ # Get various f0 methods from input to use in the computation stack
304
+ methods_str = re.search('hybrid\[(.+)\]', methods_str)
305
+ if methods_str: # Ensure a match was found
306
+ methods = [method.strip() for method in methods_str.group(1).split('+')]
307
+ f0_computation_stack = []
308
+
309
+ print("Calculating f0 pitch estimations for methods: %s" % str(methods))
310
+ x = x.astype(np.float32)
311
+ x /= np.quantile(np.abs(x), 0.999)
312
+ # Get f0 calculations for all methods specified
313
+ for method in methods:
314
+ f0 = None
315
+ if method == "pm":
316
+ f0 = self.get_pm(x, p_len=p_len)
317
+ elif method == "crepe":
318
+ f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, model="full")
319
+ f0 = f0[1:]
320
+ elif method == "crepe-tiny":
321
+ f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, model="tiny")
322
+ f0 = f0[1:] # Get rid of extra first frame
323
+ elif method == "mangio-crepe":
324
+ f0 = self.get_f0_crepe_computation(
325
+ x, f0_min, f0_max, p_len, crepe_hop_length=crepe_hop_length
326
+ )
327
+ elif method == "mangio-crepe-tiny":
328
+ f0 = self.get_f0_crepe_computation(
329
+ x, f0_min, f0_max, p_len, crepe_hop_length=crepe_hop_length, model="tiny"
330
+ )
331
+ elif method == "harvest":
332
+ f0 = self.get_harvest(x)
333
+ f0 = f0[1:]
334
+ elif method == "dio":
335
+ f0 = self.get_dio(x)
336
+ f0 = f0[1:]
337
+ elif method == "rmvpe":
338
+ f0 = self.get_rmvpe(x)
339
+ f0 = f0[1:]
340
+ elif method == "fcpe":
341
+ f0 = self.get_fcpe(x, f0_min=f0_min, f0_max=f0_max, p_len=p_len)
342
+ elif method == "pyin":
343
+ f0 = self.get_f0_pyin_computation(input_audio_path, f0_min, f0_max)
344
+ # Push method to the stack
345
+ f0_computation_stack.append(f0)
346
+
347
+ for fc in f0_computation_stack:
348
+ print(len(fc))
349
+
350
+ print("Calculating hybrid median f0 from the stack of: %s" % str(methods))
351
+ f0_median_hybrid = None
352
+ if len(f0_computation_stack) == 1:
353
+ f0_median_hybrid = f0_computation_stack[0]
354
+ else:
355
+ f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0)
356
+ return f0_median_hybrid
357
+
358
+ def get_f0(
359
+ self,
360
+ input_audio_path,
361
+ x,
362
+ p_len,
363
+ f0_up_key,
364
+ f0_method,
365
+ filter_radius,
366
+ crepe_hop_length,
367
+ f0_autotune,
368
+ inp_f0=None,
369
+ f0_min=50,
370
+ f0_max=1100,
371
+ ):
372
+ global input_audio_path2wav
373
+ time_step = self.window / self.sr * 1000
374
+ f0_min = f0_min
375
+ f0_max = f0_max
376
+ f0_mel_min = 1127 * np.log(1 + f0_min / 700)
377
+ f0_mel_max = 1127 * np.log(1 + f0_max / 700)
378
+
379
+ if f0_method == "pm":
380
+ f0 = (
381
+ parselmouth.Sound(x, self.sr)
382
+ .to_pitch_ac(
383
+ time_step=time_step / 1000,
384
+ voicing_threshold=0.6,
385
+ pitch_floor=f0_min,
386
+ pitch_ceiling=f0_max,
387
+ )
388
+ .selected_array["frequency"]
389
+ )
390
+ pad_size = (p_len - len(f0) + 1) // 2
391
+ if pad_size > 0 or p_len - len(f0) - pad_size > 0:
392
+ f0 = np.pad(
393
+ f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
394
+ )
395
+ elif f0_method == "harvest":
396
+ input_audio_path2wav[input_audio_path] = x.astype(np.double)
397
+ f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)
398
+ if filter_radius > 2:
399
+ f0 = signal.medfilt(f0, 3)
400
+ elif f0_method == "dio": # Potentially Buggy?
401
+ f0, t = pyworld.dio(
402
+ x.astype(np.double),
403
+ fs=self.sr,
404
+ f0_ceil=f0_max,
405
+ f0_floor=f0_min,
406
+ frame_period=10,
407
+ )
408
+ f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)
409
+ f0 = signal.medfilt(f0, 3)
410
+ elif f0_method == "crepe":
411
+ model = "full"
412
+ # Pick a batch size that doesn't cause memory errors on your gpu
413
+ batch_size = 512
414
+ # Compute pitch using first gpu
415
+ audio = torch.tensor(np.copy(x))[None].float()
416
+ f0, pd = torchcrepe.predict(
417
+ audio,
418
+ self.sr,
419
+ self.window,
420
+ f0_min,
421
+ f0_max,
422
+ model,
423
+ batch_size=batch_size,
424
+ device=self.device,
425
+ return_periodicity=True,
426
+ )
427
+ pd = torchcrepe.filter.median(pd, 3)
428
+ f0 = torchcrepe.filter.mean(f0, 3)
429
+ f0[pd < 0.1] = 0
430
+ f0 = f0[0].cpu().numpy()
431
+ elif f0_method == "crepe-tiny":
432
+ f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, model="tiny")
433
+ elif f0_method == "mangio-crepe":
434
+ f0 = self.get_f0_crepe_computation(
435
+ x, f0_min, f0_max, p_len, crepe_hop_length=crepe_hop_length
436
+ )
437
+ elif f0_method == "mangio-crepe-tiny":
438
+ f0 = self.get_f0_crepe_computation(
439
+ x, f0_min, f0_max, p_len, crepe_hop_length=crepe_hop_length, model="tiny"
440
+ )
441
+ elif f0_method == "rmvpe":
442
+ if not hasattr(self, "model_rmvpe"):
443
+ from lib.infer.infer_libs.rmvpe import RMVPE
444
+
445
+ logger.info(
446
+ "Loading rmvpe model,%s" % "%s/rmvpe.pt" % os.environ["rmvpe_root"]
447
+ )
448
+ self.model_rmvpe = RMVPE(
449
+ "%s/rmvpe.pt" % os.environ["rmvpe_root"],
450
+ is_half=self.is_half,
451
+ device=self.device,
452
+ )
453
+ f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)
454
+
455
+ if "privateuseone" in str(self.device): # clean ortruntime memory
456
+ del self.model_rmvpe.model
457
+ del self.model_rmvpe
458
+ logger.info("Cleaning ortruntime memory")
459
+ elif f0_method == "rmvpe+":
460
+ params = {'x': x, 'p_len': p_len, 'f0_up_key': f0_up_key, 'f0_min': f0_min,
461
+ 'f0_max': f0_max, 'time_step': time_step, 'filter_radius': filter_radius,
462
+ 'crepe_hop_length': crepe_hop_length, 'model': "full"
463
+ }
464
+ f0 = self.get_pitch_dependant_rmvpe(**params)
465
+ elif f0_method == "pyin":
466
+ f0 = self.get_f0_pyin_computation(input_audio_path, f0_min, f0_max)
467
+ elif f0_method == "fcpe":
468
+ f0 = self.get_fcpe(x, f0_min=f0_min, f0_max=f0_max, p_len=p_len)
469
+ elif "hybrid" in f0_method:
470
+ # Perform hybrid median pitch estimation
471
+ input_audio_path2wav[input_audio_path] = x.astype(np.double)
472
+ f0 = self.get_f0_hybrid_computation(
473
+ f0_method,
474
+ input_audio_path,
475
+ x,
476
+ f0_min,
477
+ f0_max,
478
+ p_len,
479
+ filter_radius,
480
+ crepe_hop_length,
481
+ time_step,
482
+ )
483
+ #print("Autotune:", f0_autotune)
484
+ if f0_autotune == True:
485
+ print("Autotune:", f0_autotune)
486
+ f0 = self.autotune_f0(f0)
487
+
488
+ f0 *= pow(2, f0_up_key / 12)
489
+ # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
490
+ tf0 = self.sr // self.window # 每秒f0点数
491
+ if inp_f0 is not None:
492
+ delta_t = np.round(
493
+ (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
494
+ ).astype("int16")
495
+ replace_f0 = np.interp(
496
+ list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
497
+ )
498
+ shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]
499
+ f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[
500
+ :shape
501
+ ]
502
+ # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
503
+ f0bak = f0.copy()
504
+ f0_mel = 1127 * np.log(1 + f0 / 700)
505
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
506
+ f0_mel_max - f0_mel_min
507
+ ) + 1
508
+ f0_mel[f0_mel <= 1] = 1
509
+ f0_mel[f0_mel > 255] = 255
510
+ f0_coarse = np.rint(f0_mel).astype(np.int32)
511
+ return f0_coarse, f0bak # 1-0
512
+
513
+ def vc(
514
+ self,
515
+ model,
516
+ net_g,
517
+ sid,
518
+ audio0,
519
+ pitch,
520
+ pitchf,
521
+ times,
522
+ index,
523
+ big_npy,
524
+ index_rate,
525
+ version,
526
+ protect,
527
+ ): # ,file_index,file_big_npy
528
+ feats = torch.from_numpy(audio0)
529
+ if self.is_half:
530
+ feats = feats.half()
531
+ else:
532
+ feats = feats.float()
533
+ if feats.dim() == 2: # double channels
534
+ feats = feats.mean(-1)
535
+ assert feats.dim() == 1, feats.dim()
536
+ feats = feats.view(1, -1)
537
+ padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
538
+
539
+ inputs = {
540
+ "source": feats.to(self.device),
541
+ "padding_mask": padding_mask,
542
+ "output_layer": 9 if version == "v1" else 12,
543
+ }
544
+ t0 = ttime()
545
+ with torch.no_grad():
546
+ logits = model.extract_features(**inputs)
547
+ feats = model.final_proj(logits[0]) if version == "v1" else logits[0]
548
+ if protect < 0.5 and pitch is not None and pitchf is not None:
549
+ feats0 = feats.clone()
550
+ if (
551
+ not isinstance(index, type(None))
552
+ and not isinstance(big_npy, type(None))
553
+ and index_rate != 0
554
+ ):
555
+ npy = feats[0].cpu().numpy()
556
+ if self.is_half:
557
+ npy = npy.astype("float32")
558
+
559
+ # _, I = index.search(npy, 1)
560
+ # npy = big_npy[I.squeeze()]
561
+
562
+ score, ix = index.search(npy, k=8)
563
+ weight = np.square(1 / score)
564
+ weight /= weight.sum(axis=1, keepdims=True)
565
+ npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
566
+
567
+ if self.is_half:
568
+ npy = npy.astype("float16")
569
+ feats = (
570
+ torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
571
+ + (1 - index_rate) * feats
572
+ )
573
+
574
+ feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
575
+ if protect < 0.5 and pitch is not None and pitchf is not None:
576
+ feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(
577
+ 0, 2, 1
578
+ )
579
+ t1 = ttime()
580
+ p_len = audio0.shape[0] // self.window
581
+ if feats.shape[1] < p_len:
582
+ p_len = feats.shape[1]
583
+ if pitch is not None and pitchf is not None:
584
+ pitch = pitch[:, :p_len]
585
+ pitchf = pitchf[:, :p_len]
586
+
587
+ if protect < 0.5 and pitch is not None and pitchf is not None:
588
+ pitchff = pitchf.clone()
589
+ pitchff[pitchf > 0] = 1
590
+ pitchff[pitchf < 1] = protect
591
+ pitchff = pitchff.unsqueeze(-1)
592
+ feats = feats * pitchff + feats0 * (1 - pitchff)
593
+ feats = feats.to(feats0.dtype)
594
+ p_len = torch.tensor([p_len], device=self.device).long()
595
+ with torch.no_grad():
596
+ hasp = pitch is not None and pitchf is not None
597
+ arg = (feats, p_len, pitch, pitchf, sid) if hasp else (feats, p_len, sid)
598
+ audio1 = (net_g.infer(*arg)[0][0, 0]).data.cpu().float().numpy()
599
+ del hasp, arg
600
+ del feats, p_len, padding_mask
601
+ if torch.cuda.is_available():
602
+ torch.cuda.empty_cache()
603
+ t2 = ttime()
604
+ times[0] += t1 - t0
605
+ times[2] += t2 - t1
606
+ return audio1
607
+ def process_t(self, t, s, window, audio_pad, pitch, pitchf, times, index, big_npy, index_rate, version, protect, t_pad_tgt, if_f0, sid, model, net_g):
608
+ t = t // window * window
609
+ if if_f0 == 1:
610
+ return self.vc(
611
+ model,
612
+ net_g,
613
+ sid,
614
+ audio_pad[s : t + t_pad_tgt + window],
615
+ pitch[:, s // window : (t + t_pad_tgt) // window],
616
+ pitchf[:, s // window : (t + t_pad_tgt) // window],
617
+ times,
618
+ index,
619
+ big_npy,
620
+ index_rate,
621
+ version,
622
+ protect,
623
+ )[t_pad_tgt : -t_pad_tgt]
624
+ else:
625
+ return self.vc(
626
+ model,
627
+ net_g,
628
+ sid,
629
+ audio_pad[s : t + t_pad_tgt + window],
630
+ None,
631
+ None,
632
+ times,
633
+ index,
634
+ big_npy,
635
+ index_rate,
636
+ version,
637
+ protect,
638
+ )[t_pad_tgt : -t_pad_tgt]
639
+
640
+
641
+ def pipeline(
642
+ self,
643
+ model,
644
+ net_g,
645
+ sid,
646
+ audio,
647
+ input_audio_path,
648
+ times,
649
+ f0_up_key,
650
+ f0_method,
651
+ file_index,
652
+ index_rate,
653
+ if_f0,
654
+ filter_radius,
655
+ tgt_sr,
656
+ resample_sr,
657
+ rms_mix_rate,
658
+ version,
659
+ protect,
660
+ crepe_hop_length,
661
+ f0_autotune,
662
+ f0_min=50,
663
+ f0_max=1100
664
+ ):
665
+ if (
666
+ file_index != ""
667
+ and isinstance(file_index, str)
668
+ # and file_big_npy != ""
669
+ # and os.path.exists(file_big_npy) == True
670
+ and os.path.exists(file_index)
671
+ and index_rate != 0
672
+ ):
673
+ try:
674
+ index = faiss.read_index(file_index)
675
+ # big_npy = np.load(file_big_npy)
676
+ big_npy = index.reconstruct_n(0, index.ntotal)
677
+ except:
678
+ traceback.print_exc()
679
+ index = big_npy = None
680
+ else:
681
+ index = big_npy = None
682
+ audio = signal.filtfilt(bh, ah, audio)
683
+ audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
684
+ opt_ts = []
685
+ if audio_pad.shape[0] > self.t_max:
686
+ audio_sum = np.zeros_like(audio)
687
+ for i in range(self.window):
688
+ audio_sum += audio_pad[i : i - self.window]
689
+ for t in range(self.t_center, audio.shape[0], self.t_center):
690
+ opt_ts.append(
691
+ t
692
+ - self.t_query
693
+ + np.where(
694
+ np.abs(audio_sum[t - self.t_query : t + self.t_query])
695
+ == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
696
+ )[0][0]
697
+ )
698
+ s = 0
699
+ audio_opt = []
700
+ t = None
701
+ t1 = ttime()
702
+ audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
703
+ p_len = audio_pad.shape[0] // self.window
704
+ inp_f0 = None
705
+
706
+ sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
707
+ pitch, pitchf = None, None
708
+ if if_f0:
709
+ pitch, pitchf = self.get_f0(
710
+ input_audio_path,
711
+ audio_pad,
712
+ p_len,
713
+ f0_up_key,
714
+ f0_method,
715
+ filter_radius,
716
+ crepe_hop_length,
717
+ f0_autotune,
718
+ inp_f0,
719
+ f0_min,
720
+ f0_max
721
+ )
722
+ pitch = pitch[:p_len]
723
+ pitchf = pitchf[:p_len]
724
+ if "mps" not in str(self.device) or "xpu" not in str(self.device):
725
+ pitchf = pitchf.astype(np.float32)
726
+ pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
727
+ pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
728
+ t2 = ttime()
729
+ times[1] += t2 - t1
730
+
731
+ with tqdm(total=len(opt_ts), desc="Processing", unit="window") as pbar:
732
+ for i, t in enumerate(opt_ts):
733
+ t = t // self.window * self.window
734
+ start = s
735
+ end = t + self.t_pad2 + self.window
736
+ audio_slice = audio_pad[start:end]
737
+ pitch_slice = pitch[:, start // self.window:end // self.window] if if_f0 else None
738
+ pitchf_slice = pitchf[:, start // self.window:end // self.window] if if_f0 else None
739
+ audio_opt.append(self.vc(model, net_g, sid, audio_slice, pitch_slice, pitchf_slice, times, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt])
740
+ s = t
741
+ pbar.update(1)
742
+ pbar.refresh()
743
+
744
+ audio_slice = audio_pad[t:]
745
+ pitch_slice = pitch[:, t // self.window:] if if_f0 and t is not None else pitch
746
+ pitchf_slice = pitchf[:, t // self.window:] if if_f0 and t is not None else pitchf
747
+ audio_opt.append(self.vc(model, net_g, sid, audio_slice, pitch_slice, pitchf_slice, times, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt])
748
+
749
+ audio_opt = np.concatenate(audio_opt)
750
+ if rms_mix_rate != 1:
751
+ audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)
752
+ if tgt_sr != resample_sr >= 16000:
753
+ audio_opt = librosa.resample(
754
+ audio_opt, orig_sr=tgt_sr, target_sr=resample_sr
755
+ )
756
+ audio_max = np.abs(audio_opt).max() / 0.99
757
+ max_int16 = 32768
758
+ if audio_max > 1:
759
+ max_int16 /= audio_max
760
+ audio_opt = (audio_opt * max_int16).astype(np.int16)
761
+ del pitch, pitchf, sid
762
+ if torch.cuda.is_available():
763
+ torch.cuda.empty_cache()
764
+
765
+ print("Returning completed audio...")
766
+ return audio_opt
lib/split_audio.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pydub import AudioSegment
3
+ from pydub.silence import detect_silence, detect_nonsilent
4
+
5
+ SEPERATE_DIR = os.path.join(os.getcwd(), "seperate")
6
+ TEMP_DIR = os.path.join(SEPERATE_DIR, "temp")
7
+ cache = {}
8
+
9
+ os.makedirs(SEPERATE_DIR, exist_ok=True)
10
+ os.makedirs(TEMP_DIR, exist_ok=True)
11
+
12
+ def cache_result(func):
13
+ def wrapper(*args, **kwargs):
14
+ key = (args, frozenset(kwargs.items()))
15
+ if key in cache:
16
+ return cache[key]
17
+ else:
18
+ result = func(*args, **kwargs)
19
+ cache[key] = result
20
+ return result
21
+ return wrapper
22
+
23
+ def get_non_silent(audio_name, audio, min_silence, silence_thresh, seek_step, keep_silence):
24
+ """
25
+ Function to get non-silent parts of the audio.
26
+ """
27
+ nonsilent_ranges = detect_nonsilent(audio, min_silence_len=min_silence, silence_thresh=silence_thresh, seek_step=seek_step)
28
+ nonsilent_files = []
29
+ for index, range in enumerate(nonsilent_ranges):
30
+ nonsilent_name = os.path.join(SEPERATE_DIR, f"{audio_name}_min{min_silence}_t{silence_thresh}_ss{seek_step}_ks{keep_silence}", f"nonsilent{index}-{audio_name}.wav")
31
+ start, end = range[0] - keep_silence, range[1] + keep_silence
32
+ audio[start:end].export(nonsilent_name, format="wav")
33
+ nonsilent_files.append(nonsilent_name)
34
+ return nonsilent_files
35
+
36
+ def get_silence(audio_name, audio, min_silence, silence_thresh, seek_step, keep_silence):
37
+ """
38
+ Function to get silent parts of the audio.
39
+ """
40
+ silence_ranges = detect_silence(audio, min_silence_len=min_silence, silence_thresh=silence_thresh, seek_step=seek_step)
41
+ silence_files = []
42
+ for index, range in enumerate(silence_ranges):
43
+ silence_name = os.path.join(SEPERATE_DIR, f"{audio_name}_min{min_silence}_t{silence_thresh}_ss{seek_step}_ks{keep_silence}", f"silence{index}-{audio_name}.wav")
44
+ start, end = range[0] + keep_silence, range[1] - keep_silence
45
+ audio[start:end].export(silence_name, format="wav")
46
+ silence_files.append(silence_name)
47
+ return silence_files
48
+
49
+ @cache_result
50
+ def split_silence_nonsilent(input_path, min_silence=500, silence_thresh=-40, seek_step=1, keep_silence=100):
51
+ """
52
+ Function to split the audio into silent and non-silent parts.
53
+ """
54
+ audio_name = os.path.splitext(os.path.basename(input_path))[0]
55
+ os.makedirs(os.path.join(SEPERATE_DIR, f"{audio_name}_min{min_silence}_t{silence_thresh}_ss{seek_step}_ks{keep_silence}"), exist_ok=True)
56
+ audio = AudioSegment.silent(duration=1000) + AudioSegment.from_file(input_path) + AudioSegment.silent(duration=1000)
57
+ silence_files = get_silence(audio_name, audio, min_silence, silence_thresh, seek_step, keep_silence)
58
+ nonsilent_files = get_non_silent(audio_name, audio, min_silence, silence_thresh, seek_step, keep_silence)
59
+ return silence_files, nonsilent_files
60
+
61
+ def adjust_audio_lengths(original_audios, inferred_audios):
62
+ """
63
+ Function to adjust the lengths of the inferred audio files list to match the original audio files length.
64
+ """
65
+ adjusted_audios = []
66
+ for original_audio, inferred_audio in zip(original_audios, inferred_audios):
67
+ audio_1 = AudioSegment.from_file(original_audio)
68
+ audio_2 = AudioSegment.from_file(inferred_audio)
69
+
70
+ if len(audio_1) > len(audio_2):
71
+ audio_2 += AudioSegment.silent(duration=len(audio_1) - len(audio_2))
72
+ else:
73
+ audio_2 = audio_2[:len(audio_1)]
74
+
75
+ adjusted_file = os.path.join(TEMP_DIR, f"adjusted-{os.path.basename(inferred_audio)}")
76
+ audio_2.export(adjusted_file, format="wav")
77
+ adjusted_audios.append(adjusted_file)
78
+
79
+ return adjusted_audios
80
+
81
+ def combine_silence_nonsilent(silence_files, nonsilent_files, keep_silence, output):
82
+ """
83
+ Function to combine the silent and non-silent parts of the audio.
84
+ """
85
+ combined = AudioSegment.empty()
86
+ for silence, nonsilent in zip(silence_files, nonsilent_files):
87
+ combined += AudioSegment.from_wav(silence) + AudioSegment.from_wav(nonsilent)
88
+ combined += AudioSegment.from_wav(silence_files[-1])
89
+ combined = AudioSegment.silent(duration=keep_silence) + combined[1000:-1000] + AudioSegment.silent(duration=keep_silence)
90
+ combined.export(output, format="wav")
91
+ return output