Eempostor commited on
Commit
41a7bbb
1 Parent(s): 0566014

Upload 3 files

Browse files
lib/infer_libs/audio.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import librosa
2
+ import numpy as np
3
+ import av
4
+ from io import BytesIO
5
+ import ffmpeg
6
+ import os
7
+ import traceback
8
+ import sys
9
+ import random
10
+ import subprocess
11
+
12
+ platform_stft_mapping = {
13
+ 'linux': os.path.join(os.getcwd(), 'stftpitchshift'),
14
+ 'darwin': os.path.join(os.getcwd(), 'stftpitchshift'),
15
+ 'win32': os.path.join(os.getcwd(), 'stftpitchshift.exe'),
16
+ }
17
+
18
+ stft = platform_stft_mapping.get(sys.platform)
19
+
20
+ def wav2(i, o, format):
21
+ inp = av.open(i, 'rb')
22
+ if format == "m4a": format = "mp4"
23
+ out = av.open(o, 'wb', format=format)
24
+ if format == "ogg": format = "libvorbis"
25
+ if format == "mp4": format = "aac"
26
+
27
+ ostream = out.add_stream(format)
28
+
29
+ for frame in inp.decode(audio=0):
30
+ for p in ostream.encode(frame): out.mux(p)
31
+
32
+ for p in ostream.encode(None): out.mux(p)
33
+
34
+ out.close()
35
+ inp.close()
36
+
37
+ def audio2(i, o, format, sr):
38
+ inp = av.open(i, 'rb')
39
+ out = av.open(o, 'wb', format=format)
40
+ if format == "ogg": format = "libvorbis"
41
+ if format == "f32le": format = "pcm_f32le"
42
+
43
+ ostream = out.add_stream(format, channels=1)
44
+ ostream.sample_rate = sr
45
+
46
+ for frame in inp.decode(audio=0):
47
+ for p in ostream.encode(frame): out.mux(p)
48
+
49
+ out.close()
50
+ inp.close()
51
+
52
+ def load_audion(file, sr):
53
+ try:
54
+ file = (
55
+ file.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
56
+ ) # 防止小白拷路径头尾带了空格和"和回车
57
+ with open(file, "rb") as f:
58
+ with BytesIO() as out:
59
+ audio2(f, out, "f32le", sr)
60
+ return np.frombuffer(out.getvalue(), np.float32).flatten()
61
+
62
+ except AttributeError:
63
+ audio = file[1] / 32768.0
64
+ if len(audio.shape) == 2:
65
+ audio = np.mean(audio, -1)
66
+ return librosa.resample(audio, orig_sr=file[0], target_sr=16000)
67
+
68
+ except Exception as e:
69
+ raise RuntimeError(f"Failed to load audio: {e}")
70
+
71
+ def load_audio(file, sr, DoFormant=False, Quefrency=1.0, Timbre=1.0):
72
+ converted = False
73
+ formanted = False
74
+ file = file.strip(' \n"')
75
+ if not os.path.exists(file):
76
+ raise RuntimeError(
77
+ "Wrong audio path, that does not exist."
78
+ )
79
+
80
+ try:
81
+ if not file.endswith(".wav"):
82
+ converted = True
83
+ formatted_file = f"{os.path.splitext(os.path.basename(file))[0]}.wav"
84
+ subprocess.run(
85
+ ["ffmpeg", "-nostdin", "-i", file, formatted_file],
86
+ capture_output=True,
87
+ text=True,
88
+ )
89
+ file = formatted_file
90
+ print(f"File formatted to wav format: {file}\n")
91
+
92
+ if DoFormant:
93
+ print("Starting formant shift. Please wait as this process takes a while.")
94
+ formanted_file = f"{os.path.splitext(os.path.basename(file))[0]}_formanted{os.path.splitext(os.path.basename(file))[1]}"
95
+ command = (
96
+ f'{stft} -i "{file}" -q "{Quefrency}" '
97
+ f'-t "{Timbre}" -o "{formanted_file}"'
98
+ )
99
+ subprocess.run(command, shell=True)
100
+ file = formanted_file
101
+ print(f"Formanted {file}\n")
102
+
103
+ with open(file, "rb") as f:
104
+ with BytesIO() as out:
105
+ audio2(f, out, "f32le", sr)
106
+ audio_data = np.frombuffer(out.getvalue(), np.float32).flatten()
107
+
108
+ if converted:
109
+ try:
110
+ os.remove(formatted_file)
111
+ except Exception as error:
112
+ print(f"Couldn't remove converted type of file due to {error}")
113
+ error = None
114
+ converted = False
115
+
116
+ return audio_data
117
+
118
+ except AttributeError:
119
+ audio = file[1] / 32768.0
120
+ if len(audio.shape) == 2:
121
+ audio = np.mean(audio, -1)
122
+ return librosa.resample(audio, orig_sr=file[0], target_sr=16000)
123
+ except Exception:
124
+ raise RuntimeError(traceback.format_exc())
125
+
126
+ def check_audio_duration(file):
127
+ try:
128
+ file = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
129
+
130
+ probe = ffmpeg.probe(file)
131
+
132
+ duration = float(probe['streams'][0]['duration'])
133
+
134
+ if duration < 0.76:
135
+ print(
136
+ f"Audio file, {file.split('/')[-1]}, under ~0.76s detected - file is too short. Target at least 1-2s for best results."
137
+ )
138
+ return False
139
+
140
+ return True
141
+ except Exception as e:
142
+ raise RuntimeError(f"Failed to check audio duration: {e}")
lib/infer_libs/fcpe.py ADDED
@@ -0,0 +1,873 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Union
2
+
3
+ import torch.nn.functional as F
4
+ import numpy as np
5
+ import torch
6
+ import torch.nn as nn
7
+ from torch.nn.utils import weight_norm
8
+ from torchaudio.transforms import Resample
9
+ import os
10
+ import librosa
11
+ import soundfile as sf
12
+ import torch.utils.data
13
+ from librosa.filters import mel as librosa_mel_fn
14
+ import math
15
+ from functools import partial
16
+
17
+ from einops import rearrange, repeat
18
+ from local_attention import LocalAttention
19
+ from torch import nn
20
+
21
+ os.environ["LRU_CACHE_CAPACITY"] = "3"
22
+
23
+ def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False):
24
+ sampling_rate = None
25
+ try:
26
+ data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile.
27
+ except Exception as ex:
28
+ print(f"'{full_path}' failed to load.\nException:")
29
+ print(ex)
30
+ if return_empty_on_exception:
31
+ return [], sampling_rate or target_sr or 48000
32
+ else:
33
+ raise Exception(ex)
34
+
35
+ if len(data.shape) > 1:
36
+ data = data[:, 0]
37
+ assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension)
38
+
39
+ if np.issubdtype(data.dtype, np.integer): # if audio data is type int
40
+ max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX
41
+ else: # if audio data is type fp32
42
+ max_mag = max(np.amax(data), -np.amin(data))
43
+ max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32
44
+
45
+ data = torch.FloatTensor(data.astype(np.float32))/max_mag
46
+
47
+ if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except
48
+ return [], sampling_rate or target_sr or 48000
49
+ if target_sr is not None and sampling_rate != target_sr:
50
+ data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr))
51
+ sampling_rate = target_sr
52
+
53
+ return data, sampling_rate
54
+
55
+ def dynamic_range_compression(x, C=1, clip_val=1e-5):
56
+ return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
57
+
58
+ def dynamic_range_decompression(x, C=1):
59
+ return np.exp(x) / C
60
+
61
+ def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
62
+ return torch.log(torch.clamp(x, min=clip_val) * C)
63
+
64
+ def dynamic_range_decompression_torch(x, C=1):
65
+ return torch.exp(x) / C
66
+
67
+ class STFT():
68
+ def __init__(self, sr=22050, n_mels=80, n_fft=1024, win_size=1024, hop_length=256, fmin=20, fmax=11025, clip_val=1e-5):
69
+ self.target_sr = sr
70
+
71
+ self.n_mels = n_mels
72
+ self.n_fft = n_fft
73
+ self.win_size = win_size
74
+ self.hop_length = hop_length
75
+ self.fmin = fmin
76
+ self.fmax = fmax
77
+ self.clip_val = clip_val
78
+ self.mel_basis = {}
79
+ self.hann_window = {}
80
+
81
+ def get_mel(self, y, keyshift=0, speed=1, center=False, train=False):
82
+ sampling_rate = self.target_sr
83
+ n_mels = self.n_mels
84
+ n_fft = self.n_fft
85
+ win_size = self.win_size
86
+ hop_length = self.hop_length
87
+ fmin = self.fmin
88
+ fmax = self.fmax
89
+ clip_val = self.clip_val
90
+
91
+ factor = 2 ** (keyshift / 12)
92
+ n_fft_new = int(np.round(n_fft * factor))
93
+ win_size_new = int(np.round(win_size * factor))
94
+ hop_length_new = int(np.round(hop_length * speed))
95
+ if not train:
96
+ mel_basis = self.mel_basis
97
+ hann_window = self.hann_window
98
+ else:
99
+ mel_basis = {}
100
+ hann_window = {}
101
+
102
+ if torch.min(y) < -1.:
103
+ print('min value is ', torch.min(y))
104
+ if torch.max(y) > 1.:
105
+ print('max value is ', torch.max(y))
106
+
107
+ mel_basis_key = str(fmax)+'_'+str(y.device)
108
+ if mel_basis_key not in mel_basis:
109
+ mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)
110
+ mel_basis[mel_basis_key] = torch.from_numpy(mel).float().to(y.device)
111
+
112
+ keyshift_key = str(keyshift)+'_'+str(y.device)
113
+ if keyshift_key not in hann_window:
114
+ hann_window[keyshift_key] = torch.hann_window(win_size_new).to(y.device)
115
+
116
+ pad_left = (win_size_new - hop_length_new) //2
117
+ pad_right = max((win_size_new- hop_length_new + 1) //2, win_size_new - y.size(-1) - pad_left)
118
+ if pad_right < y.size(-1):
119
+ mode = 'reflect'
120
+ else:
121
+ mode = 'constant'
122
+ y = torch.nn.functional.pad(y.unsqueeze(1), (pad_left, pad_right), mode = mode)
123
+ y = y.squeeze(1)
124
+
125
+ spec = torch.stft(y, n_fft_new, hop_length=hop_length_new, win_length=win_size_new, window=hann_window[keyshift_key],
126
+ center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=True)
127
+ spec = torch.sqrt(spec.real.pow(2) + spec.imag.pow(2) + (1e-9))
128
+ if keyshift != 0:
129
+ size = n_fft // 2 + 1
130
+ resize = spec.size(1)
131
+ if resize < size:
132
+ spec = F.pad(spec, (0, 0, 0, size-resize))
133
+ spec = spec[:, :size, :] * win_size / win_size_new
134
+ spec = torch.matmul(mel_basis[mel_basis_key], spec)
135
+ spec = dynamic_range_compression_torch(spec, clip_val=clip_val)
136
+ return spec
137
+
138
+ def __call__(self, audiopath):
139
+ audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr)
140
+ spect = self.get_mel(audio.unsqueeze(0)).squeeze(0)
141
+ return spect
142
+
143
+ stft = STFT()
144
+
145
+ #import fast_transformers.causal_product.causal_product_cuda
146
+
147
+ def softmax_kernel(data, *, projection_matrix, is_query, normalize_data=True, eps=1e-4, device = None):
148
+ b, h, *_ = data.shape
149
+ # (batch size, head, length, model_dim)
150
+
151
+ # normalize model dim
152
+ data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1.
153
+
154
+ # what is ration?, projection_matrix.shape[0] --> 266
155
+
156
+ ratio = (projection_matrix.shape[0] ** -0.5)
157
+
158
+ projection = repeat(projection_matrix, 'j d -> b h j d', b = b, h = h)
159
+ projection = projection.type_as(data)
160
+
161
+ #data_dash = w^T x
162
+ data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection)
163
+
164
+
165
+ # diag_data = D**2
166
+ diag_data = data ** 2
167
+ diag_data = torch.sum(diag_data, dim=-1)
168
+ diag_data = (diag_data / 2.0) * (data_normalizer ** 2)
169
+ diag_data = diag_data.unsqueeze(dim=-1)
170
+
171
+ #print ()
172
+ if is_query:
173
+ data_dash = ratio * (
174
+ torch.exp(data_dash - diag_data -
175
+ torch.max(data_dash, dim=-1, keepdim=True).values) + eps)
176
+ else:
177
+ data_dash = ratio * (
178
+ torch.exp(data_dash - diag_data + eps))#- torch.max(data_dash)) + eps)
179
+
180
+ return data_dash.type_as(data)
181
+
182
+ def orthogonal_matrix_chunk(cols, qr_uniform_q = False, device = None):
183
+ unstructured_block = torch.randn((cols, cols), device = device)
184
+ q, r = torch.linalg.qr(unstructured_block.cpu(), mode='reduced')
185
+ q, r = map(lambda t: t.to(device), (q, r))
186
+
187
+ # proposed by @Parskatt
188
+ # to make sure Q is uniform https://arxiv.org/pdf/math-ph/0609050.pdf
189
+ if qr_uniform_q:
190
+ d = torch.diag(r, 0)
191
+ q *= d.sign()
192
+ return q.t()
193
+ def exists(val):
194
+ return val is not None
195
+
196
+ def empty(tensor):
197
+ return tensor.numel() == 0
198
+
199
+ def default(val, d):
200
+ return val if exists(val) else d
201
+
202
+ def cast_tuple(val):
203
+ return (val,) if not isinstance(val, tuple) else val
204
+
205
+ class PCmer(nn.Module):
206
+ """The encoder that is used in the Transformer model."""
207
+
208
+ def __init__(self,
209
+ num_layers,
210
+ num_heads,
211
+ dim_model,
212
+ dim_keys,
213
+ dim_values,
214
+ residual_dropout,
215
+ attention_dropout):
216
+ super().__init__()
217
+ self.num_layers = num_layers
218
+ self.num_heads = num_heads
219
+ self.dim_model = dim_model
220
+ self.dim_values = dim_values
221
+ self.dim_keys = dim_keys
222
+ self.residual_dropout = residual_dropout
223
+ self.attention_dropout = attention_dropout
224
+
225
+ self._layers = nn.ModuleList([_EncoderLayer(self) for _ in range(num_layers)])
226
+
227
+ # METHODS ########################################################################################################
228
+
229
+ def forward(self, phone, mask=None):
230
+
231
+ # apply all layers to the input
232
+ for (i, layer) in enumerate(self._layers):
233
+ phone = layer(phone, mask)
234
+ # provide the final sequence
235
+ return phone
236
+
237
+
238
+ # ==================================================================================================================== #
239
+ # CLASS _ E N C O D E R L A Y E R #
240
+ # ==================================================================================================================== #
241
+
242
+
243
+ class _EncoderLayer(nn.Module):
244
+ """One layer of the encoder.
245
+
246
+ Attributes:
247
+ attn: (:class:`mha.MultiHeadAttention`): The attention mechanism that is used to read the input sequence.
248
+ feed_forward (:class:`ffl.FeedForwardLayer`): The feed-forward layer on top of the attention mechanism.
249
+ """
250
+
251
+ def __init__(self, parent: PCmer):
252
+ """Creates a new instance of ``_EncoderLayer``.
253
+
254
+ Args:
255
+ parent (Encoder): The encoder that the layers is created for.
256
+ """
257
+ super().__init__()
258
+
259
+
260
+ self.conformer = ConformerConvModule(parent.dim_model)
261
+ self.norm = nn.LayerNorm(parent.dim_model)
262
+ self.dropout = nn.Dropout(parent.residual_dropout)
263
+
264
+ # selfatt -> fastatt: performer!
265
+ self.attn = SelfAttention(dim = parent.dim_model,
266
+ heads = parent.num_heads,
267
+ causal = False)
268
+
269
+ # METHODS ########################################################################################################
270
+
271
+ def forward(self, phone, mask=None):
272
+
273
+ # compute attention sub-layer
274
+ phone = phone + (self.attn(self.norm(phone), mask=mask))
275
+
276
+ phone = phone + (self.conformer(phone))
277
+
278
+ return phone
279
+
280
+ def calc_same_padding(kernel_size):
281
+ pad = kernel_size // 2
282
+ return (pad, pad - (kernel_size + 1) % 2)
283
+
284
+ # helper classes
285
+
286
+ class Swish(nn.Module):
287
+ def forward(self, x):
288
+ return x * x.sigmoid()
289
+
290
+ class Transpose(nn.Module):
291
+ def __init__(self, dims):
292
+ super().__init__()
293
+ assert len(dims) == 2, 'dims must be a tuple of two dimensions'
294
+ self.dims = dims
295
+
296
+ def forward(self, x):
297
+ return x.transpose(*self.dims)
298
+
299
+ class GLU(nn.Module):
300
+ def __init__(self, dim):
301
+ super().__init__()
302
+ self.dim = dim
303
+
304
+ def forward(self, x):
305
+ out, gate = x.chunk(2, dim=self.dim)
306
+ return out * gate.sigmoid()
307
+
308
+ class DepthWiseConv1d(nn.Module):
309
+ def __init__(self, chan_in, chan_out, kernel_size, padding):
310
+ super().__init__()
311
+ self.padding = padding
312
+ self.conv = nn.Conv1d(chan_in, chan_out, kernel_size, groups = chan_in)
313
+
314
+ def forward(self, x):
315
+ x = F.pad(x, self.padding)
316
+ return self.conv(x)
317
+
318
+ class ConformerConvModule(nn.Module):
319
+ def __init__(
320
+ self,
321
+ dim,
322
+ causal = False,
323
+ expansion_factor = 2,
324
+ kernel_size = 31,
325
+ dropout = 0.):
326
+ super().__init__()
327
+
328
+ inner_dim = dim * expansion_factor
329
+ padding = calc_same_padding(kernel_size) if not causal else (kernel_size - 1, 0)
330
+
331
+ self.net = nn.Sequential(
332
+ nn.LayerNorm(dim),
333
+ Transpose((1, 2)),
334
+ nn.Conv1d(dim, inner_dim * 2, 1),
335
+ GLU(dim=1),
336
+ DepthWiseConv1d(inner_dim, inner_dim, kernel_size = kernel_size, padding = padding),
337
+ #nn.BatchNorm1d(inner_dim) if not causal else nn.Identity(),
338
+ Swish(),
339
+ nn.Conv1d(inner_dim, dim, 1),
340
+ Transpose((1, 2)),
341
+ nn.Dropout(dropout)
342
+ )
343
+
344
+ def forward(self, x):
345
+ return self.net(x)
346
+
347
+ def linear_attention(q, k, v):
348
+ if v is None:
349
+ #print (k.size(), q.size())
350
+ out = torch.einsum('...ed,...nd->...ne', k, q)
351
+ return out
352
+
353
+ else:
354
+ k_cumsum = k.sum(dim = -2)
355
+ #k_cumsum = k.sum(dim = -2)
356
+ D_inv = 1. / (torch.einsum('...nd,...d->...n', q, k_cumsum.type_as(q)) + 1e-8)
357
+
358
+ context = torch.einsum('...nd,...ne->...de', k, v)
359
+ #print ("TRUEEE: ", context.size(), q.size(), D_inv.size())
360
+ out = torch.einsum('...de,...nd,...n->...ne', context, q, D_inv)
361
+ return out
362
+
363
+ def gaussian_orthogonal_random_matrix(nb_rows, nb_columns, scaling = 0, qr_uniform_q = False, device = None):
364
+ nb_full_blocks = int(nb_rows / nb_columns)
365
+ #print (nb_full_blocks)
366
+ block_list = []
367
+
368
+ for _ in range(nb_full_blocks):
369
+ q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q = qr_uniform_q, device = device)
370
+ block_list.append(q)
371
+ # block_list[n] is a orthogonal matrix ... (model_dim * model_dim)
372
+ #print (block_list[0].size(), torch.einsum('...nd,...nd->...n', block_list[0], torch.roll(block_list[0],1,1)))
373
+ #print (nb_rows, nb_full_blocks, nb_columns)
374
+ remaining_rows = nb_rows - nb_full_blocks * nb_columns
375
+ #print (remaining_rows)
376
+ if remaining_rows > 0:
377
+ q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q = qr_uniform_q, device = device)
378
+ #print (q[:remaining_rows].size())
379
+ block_list.append(q[:remaining_rows])
380
+
381
+ final_matrix = torch.cat(block_list)
382
+
383
+ if scaling == 0:
384
+ multiplier = torch.randn((nb_rows, nb_columns), device = device).norm(dim = 1)
385
+ elif scaling == 1:
386
+ multiplier = math.sqrt((float(nb_columns))) * torch.ones((nb_rows,), device = device)
387
+ else:
388
+ raise ValueError(f'Invalid scaling {scaling}')
389
+
390
+ return torch.diag(multiplier) @ final_matrix
391
+
392
+ class FastAttention(nn.Module):
393
+ def __init__(self, dim_heads, nb_features = None, ortho_scaling = 0, causal = False, generalized_attention = False, kernel_fn = nn.ReLU(), qr_uniform_q = False, no_projection = False):
394
+ super().__init__()
395
+ nb_features = default(nb_features, int(dim_heads * math.log(dim_heads)))
396
+
397
+ self.dim_heads = dim_heads
398
+ self.nb_features = nb_features
399
+ self.ortho_scaling = ortho_scaling
400
+
401
+ self.create_projection = partial(gaussian_orthogonal_random_matrix, nb_rows = self.nb_features, nb_columns = dim_heads, scaling = ortho_scaling, qr_uniform_q = qr_uniform_q)
402
+ projection_matrix = self.create_projection()
403
+ self.register_buffer('projection_matrix', projection_matrix)
404
+
405
+ self.generalized_attention = generalized_attention
406
+ self.kernel_fn = kernel_fn
407
+
408
+ # if this is turned on, no projection will be used
409
+ # queries and keys will be softmax-ed as in the original efficient attention paper
410
+ self.no_projection = no_projection
411
+
412
+ self.causal = causal
413
+
414
+ @torch.no_grad()
415
+ def redraw_projection_matrix(self):
416
+ projections = self.create_projection()
417
+ self.projection_matrix.copy_(projections)
418
+ del projections
419
+
420
+ def forward(self, q, k, v):
421
+ device = q.device
422
+
423
+ if self.no_projection:
424
+ q = q.softmax(dim = -1)
425
+ k = torch.exp(k) if self.causal else k.softmax(dim = -2)
426
+ else:
427
+ create_kernel = partial(softmax_kernel, projection_matrix = self.projection_matrix, device = device)
428
+
429
+ q = create_kernel(q, is_query = True)
430
+ k = create_kernel(k, is_query = False)
431
+
432
+ attn_fn = linear_attention if not self.causal else self.causal_linear_fn
433
+ if v is None:
434
+ out = attn_fn(q, k, None)
435
+ return out
436
+ else:
437
+ out = attn_fn(q, k, v)
438
+ return out
439
+ class SelfAttention(nn.Module):
440
+ def __init__(self, dim, causal = False, heads = 8, dim_head = 64, local_heads = 0, local_window_size = 256, nb_features = None, feature_redraw_interval = 1000, generalized_attention = False, kernel_fn = nn.ReLU(), qr_uniform_q = False, dropout = 0., no_projection = False):
441
+ super().__init__()
442
+ assert dim % heads == 0, 'dimension must be divisible by number of heads'
443
+ dim_head = default(dim_head, dim // heads)
444
+ inner_dim = dim_head * heads
445
+ self.fast_attention = FastAttention(dim_head, nb_features, causal = causal, generalized_attention = generalized_attention, kernel_fn = kernel_fn, qr_uniform_q = qr_uniform_q, no_projection = no_projection)
446
+
447
+ self.heads = heads
448
+ self.global_heads = heads - local_heads
449
+ self.local_attn = LocalAttention(window_size = local_window_size, causal = causal, autopad = True, dropout = dropout, look_forward = int(not causal), rel_pos_emb_config = (dim_head, local_heads)) if local_heads > 0 else None
450
+
451
+ #print (heads, nb_features, dim_head)
452
+ #name_embedding = torch.zeros(110, heads, dim_head, dim_head)
453
+ #self.name_embedding = nn.Parameter(name_embedding, requires_grad=True)
454
+
455
+
456
+ self.to_q = nn.Linear(dim, inner_dim)
457
+ self.to_k = nn.Linear(dim, inner_dim)
458
+ self.to_v = nn.Linear(dim, inner_dim)
459
+ self.to_out = nn.Linear(inner_dim, dim)
460
+ self.dropout = nn.Dropout(dropout)
461
+
462
+ @torch.no_grad()
463
+ def redraw_projection_matrix(self):
464
+ self.fast_attention.redraw_projection_matrix()
465
+ #torch.nn.init.zeros_(self.name_embedding)
466
+ #print (torch.sum(self.name_embedding))
467
+ def forward(self, x, context = None, mask = None, context_mask = None, name=None, inference=False, **kwargs):
468
+ _, _, _, h, gh = *x.shape, self.heads, self.global_heads
469
+
470
+ cross_attend = exists(context)
471
+
472
+ context = default(context, x)
473
+ context_mask = default(context_mask, mask) if not cross_attend else context_mask
474
+ #print (torch.sum(self.name_embedding))
475
+ q, k, v = self.to_q(x), self.to_k(context), self.to_v(context)
476
+
477
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
478
+ (q, lq), (k, lk), (v, lv) = map(lambda t: (t[:, :gh], t[:, gh:]), (q, k, v))
479
+
480
+ attn_outs = []
481
+ #print (name)
482
+ #print (self.name_embedding[name].size())
483
+ if not empty(q):
484
+ if exists(context_mask):
485
+ global_mask = context_mask[:, None, :, None]
486
+ v.masked_fill_(~global_mask, 0.)
487
+ if cross_attend:
488
+ pass
489
+ #print (torch.sum(self.name_embedding))
490
+ #out = self.fast_attention(q,self.name_embedding[name],None)
491
+ #print (torch.sum(self.name_embedding[...,-1:]))
492
+ else:
493
+ out = self.fast_attention(q, k, v)
494
+ attn_outs.append(out)
495
+
496
+ if not empty(lq):
497
+ assert not cross_attend, 'local attention is not compatible with cross attention'
498
+ out = self.local_attn(lq, lk, lv, input_mask = mask)
499
+ attn_outs.append(out)
500
+
501
+ out = torch.cat(attn_outs, dim = 1)
502
+ out = rearrange(out, 'b h n d -> b n (h d)')
503
+ out = self.to_out(out)
504
+ return self.dropout(out)
505
+
506
+ def l2_regularization(model, l2_alpha):
507
+ l2_loss = []
508
+ for module in model.modules():
509
+ if type(module) is nn.Conv2d:
510
+ l2_loss.append((module.weight ** 2).sum() / 2.0)
511
+ return l2_alpha * sum(l2_loss)
512
+
513
+
514
+ class FCPE(nn.Module):
515
+ def __init__(
516
+ self,
517
+ input_channel=128,
518
+ out_dims=360,
519
+ n_layers=12,
520
+ n_chans=512,
521
+ use_siren=False,
522
+ use_full=False,
523
+ loss_mse_scale=10,
524
+ loss_l2_regularization=False,
525
+ loss_l2_regularization_scale=1,
526
+ loss_grad1_mse=False,
527
+ loss_grad1_mse_scale=1,
528
+ f0_max=1975.5,
529
+ f0_min=32.70,
530
+ confidence=False,
531
+ threshold=0.05,
532
+ use_input_conv=True
533
+ ):
534
+ super().__init__()
535
+ if use_siren is True:
536
+ raise ValueError("Siren is not supported yet.")
537
+ if use_full is True:
538
+ raise ValueError("Full model is not supported yet.")
539
+
540
+ self.loss_mse_scale = loss_mse_scale if (loss_mse_scale is not None) else 10
541
+ self.loss_l2_regularization = loss_l2_regularization if (loss_l2_regularization is not None) else False
542
+ self.loss_l2_regularization_scale = loss_l2_regularization_scale if (loss_l2_regularization_scale
543
+ is not None) else 1
544
+ self.loss_grad1_mse = loss_grad1_mse if (loss_grad1_mse is not None) else False
545
+ self.loss_grad1_mse_scale = loss_grad1_mse_scale if (loss_grad1_mse_scale is not None) else 1
546
+ self.f0_max = f0_max if (f0_max is not None) else 1975.5
547
+ self.f0_min = f0_min if (f0_min is not None) else 32.70
548
+ self.confidence = confidence if (confidence is not None) else False
549
+ self.threshold = threshold if (threshold is not None) else 0.05
550
+ self.use_input_conv = use_input_conv if (use_input_conv is not None) else True
551
+
552
+ self.cent_table_b = torch.Tensor(
553
+ np.linspace(self.f0_to_cent(torch.Tensor([f0_min]))[0], self.f0_to_cent(torch.Tensor([f0_max]))[0],
554
+ out_dims))
555
+ self.register_buffer("cent_table", self.cent_table_b)
556
+
557
+ # conv in stack
558
+ _leaky = nn.LeakyReLU()
559
+ self.stack = nn.Sequential(
560
+ nn.Conv1d(input_channel, n_chans, 3, 1, 1),
561
+ nn.GroupNorm(4, n_chans),
562
+ _leaky,
563
+ nn.Conv1d(n_chans, n_chans, 3, 1, 1))
564
+
565
+ # transformer
566
+ self.decoder = PCmer(
567
+ num_layers=n_layers,
568
+ num_heads=8,
569
+ dim_model=n_chans,
570
+ dim_keys=n_chans,
571
+ dim_values=n_chans,
572
+ residual_dropout=0.1,
573
+ attention_dropout=0.1)
574
+ self.norm = nn.LayerNorm(n_chans)
575
+
576
+ # out
577
+ self.n_out = out_dims
578
+ self.dense_out = weight_norm(
579
+ nn.Linear(n_chans, self.n_out))
580
+
581
+ def forward(self, mel, infer=True, gt_f0=None, return_hz_f0=False, cdecoder = "local_argmax"):
582
+ """
583
+ input:
584
+ B x n_frames x n_unit
585
+ return:
586
+ dict of B x n_frames x feat
587
+ """
588
+ if cdecoder == "argmax":
589
+ self.cdecoder = self.cents_decoder
590
+ elif cdecoder == "local_argmax":
591
+ self.cdecoder = self.cents_local_decoder
592
+ if self.use_input_conv:
593
+ x = self.stack(mel.transpose(1, 2)).transpose(1, 2)
594
+ else:
595
+ x = mel
596
+ x = self.decoder(x)
597
+ x = self.norm(x)
598
+ x = self.dense_out(x) # [B,N,D]
599
+ x = torch.sigmoid(x)
600
+ if not infer:
601
+ gt_cent_f0 = self.f0_to_cent(gt_f0) # mel f0 #[B,N,1]
602
+ gt_cent_f0 = self.gaussian_blurred_cent(gt_cent_f0) # #[B,N,out_dim]
603
+ loss_all = self.loss_mse_scale * F.binary_cross_entropy(x, gt_cent_f0) # bce loss
604
+ # l2 regularization
605
+ if self.loss_l2_regularization:
606
+ loss_all = loss_all + l2_regularization(model=self, l2_alpha=self.loss_l2_regularization_scale)
607
+ x = loss_all
608
+ if infer:
609
+ x = self.cdecoder(x)
610
+ x = self.cent_to_f0(x)
611
+ if not return_hz_f0:
612
+ x = (1 + x / 700).log()
613
+ return x
614
+
615
+ def cents_decoder(self, y, mask=True):
616
+ B, N, _ = y.size()
617
+ ci = self.cent_table[None, None, :].expand(B, N, -1)
618
+ rtn = torch.sum(ci * y, dim=-1, keepdim=True) / torch.sum(y, dim=-1, keepdim=True) # cents: [B,N,1]
619
+ if mask:
620
+ confident = torch.max(y, dim=-1, keepdim=True)[0]
621
+ confident_mask = torch.ones_like(confident)
622
+ confident_mask[confident <= self.threshold] = float("-INF")
623
+ rtn = rtn * confident_mask
624
+ if self.confidence:
625
+ return rtn, confident
626
+ else:
627
+ return rtn
628
+
629
+ def cents_local_decoder(self, y, mask=True):
630
+ B, N, _ = y.size()
631
+ ci = self.cent_table[None, None, :].expand(B, N, -1)
632
+ confident, max_index = torch.max(y, dim=-1, keepdim=True)
633
+ local_argmax_index = torch.arange(0,9).to(max_index.device) + (max_index - 4)
634
+ local_argmax_index[local_argmax_index<0] = 0
635
+ local_argmax_index[local_argmax_index>=self.n_out] = self.n_out - 1
636
+ ci_l = torch.gather(ci,-1,local_argmax_index)
637
+ y_l = torch.gather(y,-1,local_argmax_index)
638
+ rtn = torch.sum(ci_l * y_l, dim=-1, keepdim=True) / torch.sum(y_l, dim=-1, keepdim=True) # cents: [B,N,1]
639
+ if mask:
640
+ confident_mask = torch.ones_like(confident)
641
+ confident_mask[confident <= self.threshold] = float("-INF")
642
+ rtn = rtn * confident_mask
643
+ if self.confidence:
644
+ return rtn, confident
645
+ else:
646
+ return rtn
647
+
648
+ def cent_to_f0(self, cent):
649
+ return 10. * 2 ** (cent / 1200.)
650
+
651
+ def f0_to_cent(self, f0):
652
+ return 1200. * torch.log2(f0 / 10.)
653
+
654
+ def gaussian_blurred_cent(self, cents): # cents: [B,N,1]
655
+ mask = (cents > 0.1) & (cents < (1200. * np.log2(self.f0_max / 10.)))
656
+ B, N, _ = cents.size()
657
+ ci = self.cent_table[None, None, :].expand(B, N, -1)
658
+ return torch.exp(-torch.square(ci - cents) / 1250) * mask.float()
659
+
660
+
661
+ class FCPEInfer:
662
+ def __init__(self, model_path, device=None, dtype=torch.float32):
663
+ if device is None:
664
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
665
+ self.device = device
666
+ ckpt = torch.load(model_path, map_location=torch.device(self.device))
667
+ self.args = DotDict(ckpt["config"])
668
+ self.dtype = dtype
669
+ model = FCPE(
670
+ input_channel=self.args.model.input_channel,
671
+ out_dims=self.args.model.out_dims,
672
+ n_layers=self.args.model.n_layers,
673
+ n_chans=self.args.model.n_chans,
674
+ use_siren=self.args.model.use_siren,
675
+ use_full=self.args.model.use_full,
676
+ loss_mse_scale=self.args.loss.loss_mse_scale,
677
+ loss_l2_regularization=self.args.loss.loss_l2_regularization,
678
+ loss_l2_regularization_scale=self.args.loss.loss_l2_regularization_scale,
679
+ loss_grad1_mse=self.args.loss.loss_grad1_mse,
680
+ loss_grad1_mse_scale=self.args.loss.loss_grad1_mse_scale,
681
+ f0_max=self.args.model.f0_max,
682
+ f0_min=self.args.model.f0_min,
683
+ confidence=self.args.model.confidence,
684
+ )
685
+ model.to(self.device).to(self.dtype)
686
+ model.load_state_dict(ckpt['model'])
687
+ model.eval()
688
+ self.model = model
689
+ self.wav2mel = Wav2Mel(self.args, dtype=self.dtype, device=self.device)
690
+
691
+ @torch.no_grad()
692
+ def __call__(self, audio, sr, threshold=0.05):
693
+ self.model.threshold = threshold
694
+ audio = audio[None,:]
695
+ mel = self.wav2mel(audio=audio, sample_rate=sr).to(self.dtype)
696
+ f0 = self.model(mel=mel, infer=True, return_hz_f0=True)
697
+ return f0
698
+
699
+
700
+ class Wav2Mel:
701
+
702
+ def __init__(self, args, device=None, dtype=torch.float32):
703
+ # self.args = args
704
+ self.sampling_rate = args.mel.sampling_rate
705
+ self.hop_size = args.mel.hop_size
706
+ if device is None:
707
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
708
+ self.device = device
709
+ self.dtype = dtype
710
+ self.stft = STFT(
711
+ args.mel.sampling_rate,
712
+ args.mel.num_mels,
713
+ args.mel.n_fft,
714
+ args.mel.win_size,
715
+ args.mel.hop_size,
716
+ args.mel.fmin,
717
+ args.mel.fmax
718
+ )
719
+ self.resample_kernel = {}
720
+
721
+ def extract_nvstft(self, audio, keyshift=0, train=False):
722
+ mel = self.stft.get_mel(audio, keyshift=keyshift, train=train).transpose(1, 2) # B, n_frames, bins
723
+ return mel
724
+
725
+ def extract_mel(self, audio, sample_rate, keyshift=0, train=False):
726
+ audio = audio.to(self.dtype).to(self.device)
727
+ # resample
728
+ if sample_rate == self.sampling_rate:
729
+ audio_res = audio
730
+ else:
731
+ key_str = str(sample_rate)
732
+ if key_str not in self.resample_kernel:
733
+ self.resample_kernel[key_str] = Resample(sample_rate, self.sampling_rate, lowpass_filter_width=128)
734
+ self.resample_kernel[key_str] = self.resample_kernel[key_str].to(self.dtype).to(self.device)
735
+ audio_res = self.resample_kernel[key_str](audio)
736
+
737
+ # extract
738
+ mel = self.extract_nvstft(audio_res, keyshift=keyshift, train=train) # B, n_frames, bins
739
+ n_frames = int(audio.shape[1] // self.hop_size) + 1
740
+ if n_frames > int(mel.shape[1]):
741
+ mel = torch.cat((mel, mel[:, -1:, :]), 1)
742
+ if n_frames < int(mel.shape[1]):
743
+ mel = mel[:, :n_frames, :]
744
+ return mel
745
+
746
+ def __call__(self, audio, sample_rate, keyshift=0, train=False):
747
+ return self.extract_mel(audio, sample_rate, keyshift=keyshift, train=train)
748
+
749
+
750
+ class DotDict(dict):
751
+ def __getattr__(*args):
752
+ val = dict.get(*args)
753
+ return DotDict(val) if type(val) is dict else val
754
+
755
+ __setattr__ = dict.__setitem__
756
+ __delattr__ = dict.__delitem__
757
+
758
+ class F0Predictor(object):
759
+ def compute_f0(self,wav,p_len):
760
+ '''
761
+ input: wav:[signal_length]
762
+ p_len:int
763
+ output: f0:[signal_length//hop_length]
764
+ '''
765
+ pass
766
+
767
+ def compute_f0_uv(self,wav,p_len):
768
+ '''
769
+ input: wav:[signal_length]
770
+ p_len:int
771
+ output: f0:[signal_length//hop_length],uv:[signal_length//hop_length]
772
+ '''
773
+ pass
774
+
775
+ class FCPE(F0Predictor):
776
+ def __init__(self, model_path, hop_length=512, f0_min=50, f0_max=1100, dtype=torch.float32, device=None, sampling_rate=44100,
777
+ threshold=0.05):
778
+ self.fcpe = FCPEInfer(model_path, device=device, dtype=dtype)
779
+ self.hop_length = hop_length
780
+ self.f0_min = f0_min
781
+ self.f0_max = f0_max
782
+ if device is None:
783
+ self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
784
+ else:
785
+ self.device = device
786
+ self.threshold = threshold
787
+ self.sampling_rate = sampling_rate
788
+ self.dtype = dtype
789
+ self.name = "fcpe"
790
+
791
+ def repeat_expand(
792
+ self, content: Union[torch.Tensor, np.ndarray], target_len: int, mode: str = "nearest"
793
+ ):
794
+ ndim = content.ndim
795
+
796
+ if content.ndim == 1:
797
+ content = content[None, None]
798
+ elif content.ndim == 2:
799
+ content = content[None]
800
+
801
+ assert content.ndim == 3
802
+
803
+ is_np = isinstance(content, np.ndarray)
804
+ if is_np:
805
+ content = torch.from_numpy(content)
806
+
807
+ results = torch.nn.functional.interpolate(content, size=target_len, mode=mode)
808
+
809
+ if is_np:
810
+ results = results.numpy()
811
+
812
+ if ndim == 1:
813
+ return results[0, 0]
814
+ elif ndim == 2:
815
+ return results[0]
816
+
817
+ def post_process(self, x, sampling_rate, f0, pad_to):
818
+ if isinstance(f0, np.ndarray):
819
+ f0 = torch.from_numpy(f0).float().to(x.device)
820
+
821
+ if pad_to is None:
822
+ return f0
823
+
824
+ f0 = self.repeat_expand(f0, pad_to)
825
+
826
+ vuv_vector = torch.zeros_like(f0)
827
+ vuv_vector[f0 > 0.0] = 1.0
828
+ vuv_vector[f0 <= 0.0] = 0.0
829
+
830
+ # 去掉0频率, 并线性插值
831
+ nzindex = torch.nonzero(f0).squeeze()
832
+ f0 = torch.index_select(f0, dim=0, index=nzindex).cpu().numpy()
833
+ time_org = self.hop_length / sampling_rate * nzindex.cpu().numpy()
834
+ time_frame = np.arange(pad_to) * self.hop_length / sampling_rate
835
+
836
+ vuv_vector = F.interpolate(vuv_vector[None, None, :], size=pad_to)[0][0]
837
+
838
+ if f0.shape[0] <= 0:
839
+ return torch.zeros(pad_to, dtype=torch.float, device=x.device).cpu().numpy(), vuv_vector.cpu().numpy()
840
+ if f0.shape[0] == 1:
841
+ return (torch.ones(pad_to, dtype=torch.float, device=x.device) * f0[
842
+ 0]).cpu().numpy(), vuv_vector.cpu().numpy()
843
+
844
+ # 大概可以用 torch 重写?
845
+ f0 = np.interp(time_frame, time_org, f0, left=f0[0], right=f0[-1])
846
+ # vuv_vector = np.ceil(scipy.ndimage.zoom(vuv_vector,pad_to/len(vuv_vector),order = 0))
847
+
848
+ return f0, vuv_vector.cpu().numpy()
849
+
850
+ def compute_f0(self, wav, p_len=None):
851
+ x = torch.FloatTensor(wav).to(self.dtype).to(self.device)
852
+ if p_len is None:
853
+ print("fcpe p_len is None")
854
+ p_len = x.shape[0] // self.hop_length
855
+ #else:
856
+ # assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
857
+ f0 = self.fcpe(x, sr=self.sampling_rate, threshold=self.threshold)[0,:,0]
858
+ if torch.all(f0 == 0):
859
+ rtn = f0.cpu().numpy() if p_len is None else np.zeros(p_len)
860
+ return rtn, rtn
861
+ return self.post_process(x, self.sampling_rate, f0, p_len)[0]
862
+
863
+ def compute_f0_uv(self, wav, p_len=None):
864
+ x = torch.FloatTensor(wav).to(self.dtype).to(self.device)
865
+ if p_len is None:
866
+ p_len = x.shape[0] // self.hop_length
867
+ #else:
868
+ # assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
869
+ f0 = self.fcpe(x, sr=self.sampling_rate, threshold=self.threshold)[0,:,0]
870
+ if torch.all(f0 == 0):
871
+ rtn = f0.cpu().numpy() if p_len is None else np.zeros(p_len)
872
+ return rtn, rtn
873
+ return self.post_process(x, self.sampling_rate, f0, p_len)
lib/infer_libs/rmvpe.py ADDED
@@ -0,0 +1,705 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import numpy as np
4
+ import torch
5
+ try:
6
+ #Fix "Torch not compiled with CUDA enabled"
7
+ import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
8
+ if torch.xpu.is_available():
9
+ from lib.infer.modules.ipex import ipex_init
10
+ ipex_init()
11
+ except Exception:
12
+ pass
13
+ import torch.nn as nn
14
+ import torch.nn.functional as F
15
+ from librosa.util import normalize, pad_center, tiny
16
+ from scipy.signal import get_window
17
+
18
+ import logging
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ ###stft codes from https://github.com/pseeth/torch-stft/blob/master/torch_stft/util.py
24
+ def window_sumsquare(
25
+ window,
26
+ n_frames,
27
+ hop_length=200,
28
+ win_length=800,
29
+ n_fft=800,
30
+ dtype=np.float32,
31
+ norm=None,
32
+ ):
33
+ """
34
+ # from librosa 0.6
35
+ Compute the sum-square envelope of a window function at a given hop length.
36
+ This is used to estimate modulation effects induced by windowing
37
+ observations in short-time fourier transforms.
38
+ Parameters
39
+ ----------
40
+ window : string, tuple, number, callable, or list-like
41
+ Window specification, as in `get_window`
42
+ n_frames : int > 0
43
+ The number of analysis frames
44
+ hop_length : int > 0
45
+ The number of samples to advance between frames
46
+ win_length : [optional]
47
+ The length of the window function. By default, this matches `n_fft`.
48
+ n_fft : int > 0
49
+ The length of each analysis frame.
50
+ dtype : np.dtype
51
+ The data type of the output
52
+ Returns
53
+ -------
54
+ wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
55
+ The sum-squared envelope of the window function
56
+ """
57
+ if win_length is None:
58
+ win_length = n_fft
59
+
60
+ n = n_fft + hop_length * (n_frames - 1)
61
+ x = np.zeros(n, dtype=dtype)
62
+
63
+ # Compute the squared window at the desired length
64
+ win_sq = get_window(window, win_length, fftbins=True)
65
+ win_sq = normalize(win_sq, norm=norm) ** 2
66
+ win_sq = pad_center(win_sq, n_fft)
67
+
68
+ # Fill the envelope
69
+ for i in range(n_frames):
70
+ sample = i * hop_length
71
+ x[sample : min(n, sample + n_fft)] += win_sq[: max(0, min(n_fft, n - sample))]
72
+ return x
73
+
74
+
75
+ class STFT(torch.nn.Module):
76
+ def __init__(
77
+ self, filter_length=1024, hop_length=512, win_length=None, window="hann"
78
+ ):
79
+ """
80
+ This module implements an STFT using 1D convolution and 1D transpose convolutions.
81
+ This is a bit tricky so there are some cases that probably won't work as working
82
+ out the same sizes before and after in all overlap add setups is tough. Right now,
83
+ this code should work with hop lengths that are half the filter length (50% overlap
84
+ between frames).
85
+
86
+ Keyword Arguments:
87
+ filter_length {int} -- Length of filters used (default: {1024})
88
+ hop_length {int} -- Hop length of STFT (restrict to 50% overlap between frames) (default: {512})
89
+ win_length {[type]} -- Length of the window function applied to each frame (if not specified, it
90
+ equals the filter length). (default: {None})
91
+ window {str} -- Type of window to use (options are bartlett, hann, hamming, blackman, blackmanharris)
92
+ (default: {'hann'})
93
+ """
94
+ super(STFT, self).__init__()
95
+ self.filter_length = filter_length
96
+ self.hop_length = hop_length
97
+ self.win_length = win_length if win_length else filter_length
98
+ self.window = window
99
+ self.forward_transform = None
100
+ self.pad_amount = int(self.filter_length / 2)
101
+ #scale = self.filter_length / self.hop_length
102
+ fourier_basis = np.fft.fft(np.eye(self.filter_length))
103
+
104
+ cutoff = int((self.filter_length / 2 + 1))
105
+ fourier_basis = np.vstack(
106
+ [np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])]
107
+ )
108
+ forward_basis = torch.FloatTensor(fourier_basis)
109
+ inverse_basis = torch.FloatTensor(
110
+ np.linalg.pinv(fourier_basis)
111
+ )
112
+
113
+ assert filter_length >= self.win_length
114
+ # get window and zero center pad it to filter_length
115
+ fft_window = get_window(window, self.win_length, fftbins=True)
116
+ fft_window = pad_center(fft_window, size=filter_length)
117
+ fft_window = torch.from_numpy(fft_window).float()
118
+
119
+ # window the bases
120
+ forward_basis *= fft_window
121
+ inverse_basis = (inverse_basis.T * fft_window).T
122
+
123
+ self.register_buffer("forward_basis", forward_basis.float())
124
+ self.register_buffer("inverse_basis", inverse_basis.float())
125
+ self.register_buffer("fft_window", fft_window.float())
126
+
127
+ def transform(self, input_data, return_phase=False):
128
+ """Take input data (audio) to STFT domain.
129
+
130
+ Arguments:
131
+ input_data {tensor} -- Tensor of floats, with shape (num_batch, num_samples)
132
+
133
+ Returns:
134
+ magnitude {tensor} -- Magnitude of STFT with shape (num_batch,
135
+ num_frequencies, num_frames)
136
+ phase {tensor} -- Phase of STFT with shape (num_batch,
137
+ num_frequencies, num_frames)
138
+ """
139
+ # num_batches = input_data.shape[0]
140
+ # num_samples = input_data.shape[-1]
141
+
142
+ # self.num_samples = num_samples
143
+
144
+ # similar to librosa, reflect-pad the input
145
+ # input_data = input_data.view(num_batches, 1, num_samples)
146
+ # print(1234,input_data.shape)
147
+ input_data = F.pad(
148
+ input_data,
149
+ (self.pad_amount, self.pad_amount),
150
+ mode="reflect",
151
+ )
152
+
153
+ forward_transform = input_data.unfold(1, self.filter_length, self.hop_length).permute(0, 2, 1)
154
+ forward_transform = torch.matmul(self.forward_basis, forward_transform)
155
+
156
+ cutoff = int((self.filter_length / 2) + 1)
157
+ real_part = forward_transform[:, :cutoff, :]
158
+ imag_part = forward_transform[:, cutoff:, :]
159
+
160
+ magnitude = torch.sqrt(real_part**2 + imag_part**2)
161
+ # phase = torch.atan2(imag_part.data, real_part.data)
162
+
163
+ if return_phase:
164
+ phase = torch.atan2(imag_part.data, real_part.data)
165
+ return magnitude, phase
166
+ else:
167
+ return magnitude
168
+
169
+ def inverse(self, magnitude, phase):
170
+ """Call the inverse STFT (iSTFT), given magnitude and phase tensors produced
171
+ by the ```transform``` function.
172
+
173
+ Arguments:
174
+ magnitude {tensor} -- Magnitude of STFT with shape (num_batch,
175
+ num_frequencies, num_frames)
176
+ phase {tensor} -- Phase of STFT with shape (num_batch,
177
+ num_frequencies, num_frames)
178
+
179
+ Returns:
180
+ inverse_transform {tensor} -- Reconstructed audio given magnitude and phase. Of
181
+ shape (num_batch, num_samples)
182
+ """
183
+ cat = torch.cat(
184
+ [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1
185
+ )
186
+
187
+ fold = torch.nn.Fold(
188
+ output_size=(1, (cat.size(-1) - 1) * self.hop_length + self.filter_length),
189
+ kernel_size=(1, self.filter_length),
190
+ stride=(1, self.hop_length))
191
+ inverse_transform = torch.matmul(self.inverse_basis, cat)
192
+ inverse_transform = fold(inverse_transform)[:, 0, 0, self.pad_amount : -self.pad_amount]
193
+ window_square_sum = self.fft_window.pow(2).repeat(cat.size(-1), 1).T.unsqueeze(0)
194
+ window_square_sum = fold(window_square_sum)[:, 0, 0, self.pad_amount : -self.pad_amount]
195
+ inverse_transform /= window_square_sum
196
+
197
+ return inverse_transform
198
+
199
+ def forward(self, input_data):
200
+ """Take input data (audio) to STFT domain and then back to audio.
201
+
202
+ Arguments:
203
+ input_data {tensor} -- Tensor of floats, with shape (num_batch, num_samples)
204
+
205
+ Returns:
206
+ reconstruction {tensor} -- Reconstructed audio given magnitude and phase. Of
207
+ shape (num_batch, num_samples)
208
+ """
209
+ self.magnitude, self.phase = self.transform(input_data, return_phase=True)
210
+ reconstruction = self.inverse(self.magnitude, self.phase)
211
+ return reconstruction
212
+
213
+
214
+ from time import time as ttime
215
+
216
+
217
+ class BiGRU(nn.Module):
218
+ def __init__(self, input_features, hidden_features, num_layers):
219
+ super(BiGRU, self).__init__()
220
+ self.gru = nn.GRU(
221
+ input_features,
222
+ hidden_features,
223
+ num_layers=num_layers,
224
+ batch_first=True,
225
+ bidirectional=True,
226
+ )
227
+
228
+ def forward(self, x):
229
+ return self.gru(x)[0]
230
+
231
+
232
+ class ConvBlockRes(nn.Module):
233
+ def __init__(self, in_channels, out_channels, momentum=0.01):
234
+ super(ConvBlockRes, self).__init__()
235
+ self.conv = nn.Sequential(
236
+ nn.Conv2d(
237
+ in_channels=in_channels,
238
+ out_channels=out_channels,
239
+ kernel_size=(3, 3),
240
+ stride=(1, 1),
241
+ padding=(1, 1),
242
+ bias=False,
243
+ ),
244
+ nn.BatchNorm2d(out_channels, momentum=momentum),
245
+ nn.ReLU(),
246
+ nn.Conv2d(
247
+ in_channels=out_channels,
248
+ out_channels=out_channels,
249
+ kernel_size=(3, 3),
250
+ stride=(1, 1),
251
+ padding=(1, 1),
252
+ bias=False,
253
+ ),
254
+ nn.BatchNorm2d(out_channels, momentum=momentum),
255
+ nn.ReLU(),
256
+ )
257
+ if in_channels != out_channels:
258
+ self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1))
259
+ self.is_shortcut = True
260
+ else:
261
+ self.is_shortcut = False
262
+
263
+ def forward(self, x):
264
+ if self.is_shortcut:
265
+ return self.conv(x) + self.shortcut(x)
266
+ else:
267
+ return self.conv(x) + x
268
+
269
+
270
+ class Encoder(nn.Module):
271
+ def __init__(
272
+ self,
273
+ in_channels,
274
+ in_size,
275
+ n_encoders,
276
+ kernel_size,
277
+ n_blocks,
278
+ out_channels=16,
279
+ momentum=0.01,
280
+ ):
281
+ super(Encoder, self).__init__()
282
+ self.n_encoders = n_encoders
283
+ self.bn = nn.BatchNorm2d(in_channels, momentum=momentum)
284
+ self.layers = nn.ModuleList()
285
+ self.latent_channels = []
286
+ for i in range(self.n_encoders):
287
+ self.layers.append(
288
+ ResEncoderBlock(
289
+ in_channels, out_channels, kernel_size, n_blocks, momentum=momentum
290
+ )
291
+ )
292
+ self.latent_channels.append([out_channels, in_size])
293
+ in_channels = out_channels
294
+ out_channels *= 2
295
+ in_size //= 2
296
+ self.out_size = in_size
297
+ self.out_channel = out_channels
298
+
299
+ def forward(self, x):
300
+ concat_tensors = []
301
+ x = self.bn(x)
302
+ for i in range(self.n_encoders):
303
+ _, x = self.layers[i](x)
304
+ concat_tensors.append(_)
305
+ return x, concat_tensors
306
+
307
+
308
+ class ResEncoderBlock(nn.Module):
309
+ def __init__(
310
+ self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01
311
+ ):
312
+ super(ResEncoderBlock, self).__init__()
313
+ self.n_blocks = n_blocks
314
+ self.conv = nn.ModuleList()
315
+ self.conv.append(ConvBlockRes(in_channels, out_channels, momentum))
316
+ for i in range(n_blocks - 1):
317
+ self.conv.append(ConvBlockRes(out_channels, out_channels, momentum))
318
+ self.kernel_size = kernel_size
319
+ if self.kernel_size is not None:
320
+ self.pool = nn.AvgPool2d(kernel_size=kernel_size)
321
+
322
+ def forward(self, x):
323
+ for i in range(self.n_blocks):
324
+ x = self.conv[i](x)
325
+ if self.kernel_size is not None:
326
+ return x, self.pool(x)
327
+ else:
328
+ return x
329
+
330
+
331
+ class Intermediate(nn.Module): #
332
+ def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01):
333
+ super(Intermediate, self).__init__()
334
+ self.n_inters = n_inters
335
+ self.layers = nn.ModuleList()
336
+ self.layers.append(
337
+ ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum)
338
+ )
339
+ for i in range(self.n_inters - 1):
340
+ self.layers.append(
341
+ ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum)
342
+ )
343
+
344
+ def forward(self, x):
345
+ for i in range(self.n_inters):
346
+ x = self.layers[i](x)
347
+ return x
348
+
349
+
350
+ class ResDecoderBlock(nn.Module):
351
+ def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01):
352
+ super(ResDecoderBlock, self).__init__()
353
+ out_padding = (0, 1) if stride == (1, 2) else (1, 1)
354
+ self.n_blocks = n_blocks
355
+ self.conv1 = nn.Sequential(
356
+ nn.ConvTranspose2d(
357
+ in_channels=in_channels,
358
+ out_channels=out_channels,
359
+ kernel_size=(3, 3),
360
+ stride=stride,
361
+ padding=(1, 1),
362
+ output_padding=out_padding,
363
+ bias=False,
364
+ ),
365
+ nn.BatchNorm2d(out_channels, momentum=momentum),
366
+ nn.ReLU(),
367
+ )
368
+ self.conv2 = nn.ModuleList()
369
+ self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum))
370
+ for i in range(n_blocks - 1):
371
+ self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum))
372
+
373
+ def forward(self, x, concat_tensor):
374
+ x = self.conv1(x)
375
+ x = torch.cat((x, concat_tensor), dim=1)
376
+ for i in range(self.n_blocks):
377
+ x = self.conv2[i](x)
378
+ return x
379
+
380
+
381
+ class Decoder(nn.Module):
382
+ def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01):
383
+ super(Decoder, self).__init__()
384
+ self.layers = nn.ModuleList()
385
+ self.n_decoders = n_decoders
386
+ for i in range(self.n_decoders):
387
+ out_channels = in_channels // 2
388
+ self.layers.append(
389
+ ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum)
390
+ )
391
+ in_channels = out_channels
392
+
393
+ def forward(self, x, concat_tensors):
394
+ for i in range(self.n_decoders):
395
+ x = self.layers[i](x, concat_tensors[-1 - i])
396
+ return x
397
+
398
+
399
+ class DeepUnet(nn.Module):
400
+ def __init__(
401
+ self,
402
+ kernel_size,
403
+ n_blocks,
404
+ en_de_layers=5,
405
+ inter_layers=4,
406
+ in_channels=1,
407
+ en_out_channels=16,
408
+ ):
409
+ super(DeepUnet, self).__init__()
410
+ self.encoder = Encoder(
411
+ in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels
412
+ )
413
+ self.intermediate = Intermediate(
414
+ self.encoder.out_channel // 2,
415
+ self.encoder.out_channel,
416
+ inter_layers,
417
+ n_blocks,
418
+ )
419
+ self.decoder = Decoder(
420
+ self.encoder.out_channel, en_de_layers, kernel_size, n_blocks
421
+ )
422
+
423
+ def forward(self, x):
424
+ x, concat_tensors = self.encoder(x)
425
+ x = self.intermediate(x)
426
+ x = self.decoder(x, concat_tensors)
427
+ return x
428
+
429
+
430
+ class E2E(nn.Module):
431
+ def __init__(
432
+ self,
433
+ n_blocks,
434
+ n_gru,
435
+ kernel_size,
436
+ en_de_layers=5,
437
+ inter_layers=4,
438
+ in_channels=1,
439
+ en_out_channels=16,
440
+ ):
441
+ super(E2E, self).__init__()
442
+ self.unet = DeepUnet(
443
+ kernel_size,
444
+ n_blocks,
445
+ en_de_layers,
446
+ inter_layers,
447
+ in_channels,
448
+ en_out_channels,
449
+ )
450
+ self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1))
451
+ if n_gru:
452
+ self.fc = nn.Sequential(
453
+ BiGRU(3 * 128, 256, n_gru),
454
+ nn.Linear(512, 360),
455
+ nn.Dropout(0.25),
456
+ nn.Sigmoid(),
457
+ )
458
+ else:
459
+ self.fc = nn.Sequential(
460
+ nn.Linear(3 * nn.N_MELS, nn.N_CLASS), nn.Dropout(0.25), nn.Sigmoid()
461
+ )
462
+
463
+ def forward(self, mel):
464
+ # print(mel.shape)
465
+ mel = mel.transpose(-1, -2).unsqueeze(1)
466
+ x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2)
467
+ x = self.fc(x)
468
+ # print(x.shape)
469
+ return x
470
+
471
+
472
+ from librosa.filters import mel
473
+
474
+
475
+ class MelSpectrogram(torch.nn.Module):
476
+ def __init__(
477
+ self,
478
+ is_half,
479
+ n_mel_channels,
480
+ sampling_rate,
481
+ win_length,
482
+ hop_length,
483
+ n_fft=None,
484
+ mel_fmin=0,
485
+ mel_fmax=None,
486
+ clamp=1e-5,
487
+ ):
488
+ super().__init__()
489
+ n_fft = win_length if n_fft is None else n_fft
490
+ self.hann_window = {}
491
+ mel_basis = mel(
492
+ sr=sampling_rate,
493
+ n_fft=n_fft,
494
+ n_mels=n_mel_channels,
495
+ fmin=mel_fmin,
496
+ fmax=mel_fmax,
497
+ htk=True,
498
+ )
499
+ mel_basis = torch.from_numpy(mel_basis).float()
500
+ self.register_buffer("mel_basis", mel_basis)
501
+ self.n_fft = win_length if n_fft is None else n_fft
502
+ self.hop_length = hop_length
503
+ self.win_length = win_length
504
+ self.sampling_rate = sampling_rate
505
+ self.n_mel_channels = n_mel_channels
506
+ self.clamp = clamp
507
+ self.is_half = is_half
508
+
509
+ def forward(self, audio, keyshift=0, speed=1, center=True):
510
+ factor = 2 ** (keyshift / 12)
511
+ n_fft_new = int(np.round(self.n_fft * factor))
512
+ win_length_new = int(np.round(self.win_length * factor))
513
+ hop_length_new = int(np.round(self.hop_length * speed))
514
+ keyshift_key = str(keyshift) + "_" + str(audio.device)
515
+ if keyshift_key not in self.hann_window:
516
+ self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to(
517
+ # "cpu"if(audio.device.type=="privateuseone") else audio.device
518
+ audio.device
519
+ )
520
+ if "privateuseone" in str(audio.device):
521
+ if not hasattr(self, "stft"):
522
+ self.stft = STFT(
523
+ filter_length=n_fft_new,
524
+ hop_length=hop_length_new,
525
+ win_length=win_length_new,
526
+ window="hann",
527
+ ).to(audio.device)
528
+ magnitude = self.stft.transform(audio)
529
+ else:
530
+ fft = torch.stft(
531
+ audio,
532
+ n_fft=n_fft_new,
533
+ hop_length=hop_length_new,
534
+ win_length=win_length_new,
535
+ window=self.hann_window[keyshift_key],
536
+ center=center,
537
+ return_complex=True,
538
+ )
539
+ magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2))
540
+ # if (audio.device.type == "privateuseone"):
541
+ # magnitude=magnitude.to(audio.device)
542
+ if keyshift != 0:
543
+ size = self.n_fft // 2 + 1
544
+ resize = magnitude.size(1)
545
+ if resize < size:
546
+ magnitude = F.pad(magnitude, (0, 0, 0, size - resize))
547
+ magnitude = magnitude[:, :size, :] * self.win_length / win_length_new
548
+ mel_output = torch.matmul(self.mel_basis, magnitude)
549
+ if self.is_half == True:
550
+ mel_output = mel_output.half()
551
+ log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp))
552
+ # print(log_mel_spec.device.type)
553
+ return log_mel_spec
554
+
555
+
556
+ class RMVPE:
557
+ def __init__(self, model_path, is_half, device=None):
558
+ self.resample_kernel = {}
559
+ self.resample_kernel = {}
560
+ self.is_half = is_half
561
+ if device is None:
562
+ device = "cuda" if torch.cuda.is_available() else "cpu"
563
+ self.device = device
564
+ self.mel_extractor = MelSpectrogram(
565
+ is_half, 128, 16000, 1024, 160, None, 30, 8000
566
+ ).to(device)
567
+ if "privateuseone" in str(device):
568
+ import onnxruntime as ort
569
+
570
+ ort_session = ort.InferenceSession(
571
+ "%s/rmvpe.onnx" % os.environ["rmvpe_root"],
572
+ providers=["DmlExecutionProvider"],
573
+ )
574
+ self.model = ort_session
575
+ else:
576
+ model = E2E(4, 1, (2, 2))
577
+ ckpt = torch.load(model_path, map_location="cpu")
578
+ model.load_state_dict(ckpt)
579
+ model.eval()
580
+ if is_half == True:
581
+ model = model.half()
582
+ self.model = model
583
+ self.model = self.model.to(device)
584
+ cents_mapping = 20 * np.arange(360) + 1997.3794084376191
585
+ self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368
586
+
587
+ def mel2hidden(self, mel):
588
+ with torch.no_grad():
589
+ n_frames = mel.shape[-1]
590
+ n_pad = 32 * ((n_frames - 1) // 32 + 1) - n_frames
591
+ if n_pad > 0:
592
+ mel = F.pad(
593
+ mel, (0, n_pad), mode="constant"
594
+ )
595
+ if "privateuseone" in str(self.device):
596
+ onnx_input_name = self.model.get_inputs()[0].name
597
+ onnx_outputs_names = self.model.get_outputs()[0].name
598
+ hidden = self.model.run(
599
+ [onnx_outputs_names],
600
+ input_feed={onnx_input_name: mel.cpu().numpy()},
601
+ )[0]
602
+ else:
603
+ hidden = self.model(mel)
604
+ return hidden[:, :n_frames]
605
+
606
+ def decode(self, hidden, thred=0.03):
607
+ cents_pred = self.to_local_average_cents(hidden, thred=thred)
608
+ f0 = 10 * (2 ** (cents_pred / 1200))
609
+ f0[f0 == 10] = 0
610
+ # f0 = np.array([10 * (2 ** (cent_pred / 1200)) if cent_pred else 0 for cent_pred in cents_pred])
611
+ return f0
612
+
613
+ def infer_from_audio(self, audio, thred=0.03):
614
+ # torch.cuda.synchronize()
615
+ t0 = ttime()
616
+ mel = self.mel_extractor(
617
+ torch.from_numpy(audio).float().to(self.device).unsqueeze(0), center=True
618
+ )
619
+ # print(123123123,mel.device.type)
620
+ # torch.cuda.synchronize()
621
+ t1 = ttime()
622
+ hidden = self.mel2hidden(mel)
623
+ # torch.cuda.synchronize()
624
+ t2 = ttime()
625
+ # print(234234,hidden.device.type)
626
+ if "privateuseone" not in str(self.device):
627
+ hidden = hidden.squeeze(0).cpu().numpy()
628
+ else:
629
+ hidden = hidden[0]
630
+ if self.is_half == True:
631
+ hidden = hidden.astype("float32")
632
+
633
+ f0 = self.decode(hidden, thred=thred)
634
+ # torch.cuda.synchronize()
635
+ t3 = ttime()
636
+ # print("hmvpe:%s\t%s\t%s\t%s"%(t1-t0,t2-t1,t3-t2,t3-t0))
637
+ return f0
638
+
639
+ def infer_from_audio_with_pitch(self, audio, thred=0.03, f0_min=50, f0_max=1100):
640
+ t0 = ttime()
641
+ audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0)
642
+ mel = self.mel_extractor(audio, center=True)
643
+ t1 = ttime()
644
+ hidden = self.mel2hidden(mel)
645
+ t2 = ttime()
646
+ if "privateuseone" not in str(self.device):
647
+ hidden = hidden.squeeze(0).cpu().numpy()
648
+ else:
649
+ hidden = hidden[0]
650
+ if self.is_half == True:
651
+ hidden = hidden.astype("float32")
652
+ f0 = self.decode(hidden, thred=thred)
653
+ f0[(f0 < f0_min) | (f0 > f0_max)] = 0
654
+ t3 = ttime()
655
+ return f0
656
+
657
+ def to_local_average_cents(self, salience, thred=0.05):
658
+ # t0 = ttime()
659
+ center = np.argmax(salience, axis=1) # 帧长#index
660
+ salience = np.pad(salience, ((0, 0), (4, 4))) # 帧长,368
661
+ # t1 = ttime()
662
+ center += 4
663
+ todo_salience = []
664
+ todo_cents_mapping = []
665
+ starts = center - 4
666
+ ends = center + 5
667
+ for idx in range(salience.shape[0]):
668
+ todo_salience.append(salience[:, starts[idx] : ends[idx]][idx])
669
+ todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]])
670
+ # t2 = ttime()
671
+ todo_salience = np.array(todo_salience) # 帧长,9
672
+ todo_cents_mapping = np.array(todo_cents_mapping) # 帧长,9
673
+ product_sum = np.sum(todo_salience * todo_cents_mapping, 1)
674
+ weight_sum = np.sum(todo_salience, 1) # 帧长
675
+ devided = product_sum / weight_sum # 帧长
676
+ # t3 = ttime()
677
+ maxx = np.max(salience, axis=1) # 帧长
678
+ devided[maxx <= thred] = 0
679
+ # t4 = ttime()
680
+ # print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))
681
+ return devided
682
+
683
+
684
+ if __name__ == "__main__":
685
+ import librosa
686
+ import soundfile as sf
687
+
688
+ audio, sampling_rate = sf.read(r"C:\Users\liujing04\Desktop\Z\冬之花clip1.wav")
689
+ if len(audio.shape) > 1:
690
+ audio = librosa.to_mono(audio.transpose(1, 0))
691
+ audio_bak = audio.copy()
692
+ if sampling_rate != 16000:
693
+ audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
694
+ model_path = r"D:\BaiduNetdiskDownload\RVC-beta-v2-0727AMD_realtime\rmvpe.pt"
695
+ thred = 0.03 # 0.01
696
+ device = "cuda" if torch.cuda.is_available() else "cpu"
697
+ rmvpe = RMVPE(model_path, is_half=False, device=device)
698
+ t0 = ttime()
699
+ f0 = rmvpe.infer_from_audio(audio, thred=thred)
700
+ # f0 = rmvpe.infer_from_audio(audio, thred=thred)
701
+ # f0 = rmvpe.infer_from_audio(audio, thred=thred)
702
+ # f0 = rmvpe.infer_from_audio(audio, thred=thred)
703
+ # f0 = rmvpe.infer_from_audio(audio, thred=thred)
704
+ t1 = ttime()
705
+ logger.info("%s %.2f", f0.shape, t1 - t0)