Eempostor commited on
Commit
6fa1243
1 Parent(s): 04f87da

Upload fcpe.py

Browse files
Files changed (1) hide show
  1. lib/infer_libs/fcpe.py +873 -0
lib/infer_libs/fcpe.py ADDED
@@ -0,0 +1,873 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Union
2
+
3
+ import torch.nn.functional as F
4
+ import numpy as np
5
+ import torch
6
+ import torch.nn as nn
7
+ from torch.nn.utils import weight_norm
8
+ from torchaudio.transforms import Resample
9
+ import os
10
+ import librosa
11
+ import soundfile as sf
12
+ import torch.utils.data
13
+ from librosa.filters import mel as librosa_mel_fn
14
+ import math
15
+ from functools import partial
16
+
17
+ from einops import rearrange, repeat
18
+ from local_attention import LocalAttention
19
+ from torch import nn
20
+
21
+ os.environ["LRU_CACHE_CAPACITY"] = "3"
22
+
23
+ def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False):
24
+ sampling_rate = None
25
+ try:
26
+ data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile.
27
+ except Exception as ex:
28
+ print(f"'{full_path}' failed to load.\nException:")
29
+ print(ex)
30
+ if return_empty_on_exception:
31
+ return [], sampling_rate or target_sr or 48000
32
+ else:
33
+ raise Exception(ex)
34
+
35
+ if len(data.shape) > 1:
36
+ data = data[:, 0]
37
+ assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension)
38
+
39
+ if np.issubdtype(data.dtype, np.integer): # if audio data is type int
40
+ max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX
41
+ else: # if audio data is type fp32
42
+ max_mag = max(np.amax(data), -np.amin(data))
43
+ max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32
44
+
45
+ data = torch.FloatTensor(data.astype(np.float32))/max_mag
46
+
47
+ if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except
48
+ return [], sampling_rate or target_sr or 48000
49
+ if target_sr is not None and sampling_rate != target_sr:
50
+ data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr))
51
+ sampling_rate = target_sr
52
+
53
+ return data, sampling_rate
54
+
55
+ def dynamic_range_compression(x, C=1, clip_val=1e-5):
56
+ return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
57
+
58
+ def dynamic_range_decompression(x, C=1):
59
+ return np.exp(x) / C
60
+
61
+ def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
62
+ return torch.log(torch.clamp(x, min=clip_val) * C)
63
+
64
+ def dynamic_range_decompression_torch(x, C=1):
65
+ return torch.exp(x) / C
66
+
67
+ class STFT():
68
+ def __init__(self, sr=22050, n_mels=80, n_fft=1024, win_size=1024, hop_length=256, fmin=20, fmax=11025, clip_val=1e-5):
69
+ self.target_sr = sr
70
+
71
+ self.n_mels = n_mels
72
+ self.n_fft = n_fft
73
+ self.win_size = win_size
74
+ self.hop_length = hop_length
75
+ self.fmin = fmin
76
+ self.fmax = fmax
77
+ self.clip_val = clip_val
78
+ self.mel_basis = {}
79
+ self.hann_window = {}
80
+
81
+ def get_mel(self, y, keyshift=0, speed=1, center=False, train=False):
82
+ sampling_rate = self.target_sr
83
+ n_mels = self.n_mels
84
+ n_fft = self.n_fft
85
+ win_size = self.win_size
86
+ hop_length = self.hop_length
87
+ fmin = self.fmin
88
+ fmax = self.fmax
89
+ clip_val = self.clip_val
90
+
91
+ factor = 2 ** (keyshift / 12)
92
+ n_fft_new = int(np.round(n_fft * factor))
93
+ win_size_new = int(np.round(win_size * factor))
94
+ hop_length_new = int(np.round(hop_length * speed))
95
+ if not train:
96
+ mel_basis = self.mel_basis
97
+ hann_window = self.hann_window
98
+ else:
99
+ mel_basis = {}
100
+ hann_window = {}
101
+
102
+ if torch.min(y) < -1.:
103
+ print('min value is ', torch.min(y))
104
+ if torch.max(y) > 1.:
105
+ print('max value is ', torch.max(y))
106
+
107
+ mel_basis_key = str(fmax)+'_'+str(y.device)
108
+ if mel_basis_key not in mel_basis:
109
+ mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)
110
+ mel_basis[mel_basis_key] = torch.from_numpy(mel).float().to(y.device)
111
+
112
+ keyshift_key = str(keyshift)+'_'+str(y.device)
113
+ if keyshift_key not in hann_window:
114
+ hann_window[keyshift_key] = torch.hann_window(win_size_new).to(y.device)
115
+
116
+ pad_left = (win_size_new - hop_length_new) //2
117
+ pad_right = max((win_size_new- hop_length_new + 1) //2, win_size_new - y.size(-1) - pad_left)
118
+ if pad_right < y.size(-1):
119
+ mode = 'reflect'
120
+ else:
121
+ mode = 'constant'
122
+ y = torch.nn.functional.pad(y.unsqueeze(1), (pad_left, pad_right), mode = mode)
123
+ y = y.squeeze(1)
124
+
125
+ spec = torch.stft(y, n_fft_new, hop_length=hop_length_new, win_length=win_size_new, window=hann_window[keyshift_key],
126
+ center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=True)
127
+ spec = torch.sqrt(spec.real.pow(2) + spec.imag.pow(2) + (1e-9))
128
+ if keyshift != 0:
129
+ size = n_fft // 2 + 1
130
+ resize = spec.size(1)
131
+ if resize < size:
132
+ spec = F.pad(spec, (0, 0, 0, size-resize))
133
+ spec = spec[:, :size, :] * win_size / win_size_new
134
+ spec = torch.matmul(mel_basis[mel_basis_key], spec)
135
+ spec = dynamic_range_compression_torch(spec, clip_val=clip_val)
136
+ return spec
137
+
138
+ def __call__(self, audiopath):
139
+ audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr)
140
+ spect = self.get_mel(audio.unsqueeze(0)).squeeze(0)
141
+ return spect
142
+
143
+ stft = STFT()
144
+
145
+ #import fast_transformers.causal_product.causal_product_cuda
146
+
147
+ def softmax_kernel(data, *, projection_matrix, is_query, normalize_data=True, eps=1e-4, device = None):
148
+ b, h, *_ = data.shape
149
+ # (batch size, head, length, model_dim)
150
+
151
+ # normalize model dim
152
+ data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1.
153
+
154
+ # what is ration?, projection_matrix.shape[0] --> 266
155
+
156
+ ratio = (projection_matrix.shape[0] ** -0.5)
157
+
158
+ projection = repeat(projection_matrix, 'j d -> b h j d', b = b, h = h)
159
+ projection = projection.type_as(data)
160
+
161
+ #data_dash = w^T x
162
+ data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection)
163
+
164
+
165
+ # diag_data = D**2
166
+ diag_data = data ** 2
167
+ diag_data = torch.sum(diag_data, dim=-1)
168
+ diag_data = (diag_data / 2.0) * (data_normalizer ** 2)
169
+ diag_data = diag_data.unsqueeze(dim=-1)
170
+
171
+ #print ()
172
+ if is_query:
173
+ data_dash = ratio * (
174
+ torch.exp(data_dash - diag_data -
175
+ torch.max(data_dash, dim=-1, keepdim=True).values) + eps)
176
+ else:
177
+ data_dash = ratio * (
178
+ torch.exp(data_dash - diag_data + eps))#- torch.max(data_dash)) + eps)
179
+
180
+ return data_dash.type_as(data)
181
+
182
+ def orthogonal_matrix_chunk(cols, qr_uniform_q = False, device = None):
183
+ unstructured_block = torch.randn((cols, cols), device = device)
184
+ q, r = torch.linalg.qr(unstructured_block.cpu(), mode='reduced')
185
+ q, r = map(lambda t: t.to(device), (q, r))
186
+
187
+ # proposed by @Parskatt
188
+ # to make sure Q is uniform https://arxiv.org/pdf/math-ph/0609050.pdf
189
+ if qr_uniform_q:
190
+ d = torch.diag(r, 0)
191
+ q *= d.sign()
192
+ return q.t()
193
+ def exists(val):
194
+ return val is not None
195
+
196
+ def empty(tensor):
197
+ return tensor.numel() == 0
198
+
199
+ def default(val, d):
200
+ return val if exists(val) else d
201
+
202
+ def cast_tuple(val):
203
+ return (val,) if not isinstance(val, tuple) else val
204
+
205
+ class PCmer(nn.Module):
206
+ """The encoder that is used in the Transformer model."""
207
+
208
+ def __init__(self,
209
+ num_layers,
210
+ num_heads,
211
+ dim_model,
212
+ dim_keys,
213
+ dim_values,
214
+ residual_dropout,
215
+ attention_dropout):
216
+ super().__init__()
217
+ self.num_layers = num_layers
218
+ self.num_heads = num_heads
219
+ self.dim_model = dim_model
220
+ self.dim_values = dim_values
221
+ self.dim_keys = dim_keys
222
+ self.residual_dropout = residual_dropout
223
+ self.attention_dropout = attention_dropout
224
+
225
+ self._layers = nn.ModuleList([_EncoderLayer(self) for _ in range(num_layers)])
226
+
227
+ # METHODS ########################################################################################################
228
+
229
+ def forward(self, phone, mask=None):
230
+
231
+ # apply all layers to the input
232
+ for (i, layer) in enumerate(self._layers):
233
+ phone = layer(phone, mask)
234
+ # provide the final sequence
235
+ return phone
236
+
237
+
238
+ # ==================================================================================================================== #
239
+ # CLASS _ E N C O D E R L A Y E R #
240
+ # ==================================================================================================================== #
241
+
242
+
243
+ class _EncoderLayer(nn.Module):
244
+ """One layer of the encoder.
245
+
246
+ Attributes:
247
+ attn: (:class:`mha.MultiHeadAttention`): The attention mechanism that is used to read the input sequence.
248
+ feed_forward (:class:`ffl.FeedForwardLayer`): The feed-forward layer on top of the attention mechanism.
249
+ """
250
+
251
+ def __init__(self, parent: PCmer):
252
+ """Creates a new instance of ``_EncoderLayer``.
253
+
254
+ Args:
255
+ parent (Encoder): The encoder that the layers is created for.
256
+ """
257
+ super().__init__()
258
+
259
+
260
+ self.conformer = ConformerConvModule(parent.dim_model)
261
+ self.norm = nn.LayerNorm(parent.dim_model)
262
+ self.dropout = nn.Dropout(parent.residual_dropout)
263
+
264
+ # selfatt -> fastatt: performer!
265
+ self.attn = SelfAttention(dim = parent.dim_model,
266
+ heads = parent.num_heads,
267
+ causal = False)
268
+
269
+ # METHODS ########################################################################################################
270
+
271
+ def forward(self, phone, mask=None):
272
+
273
+ # compute attention sub-layer
274
+ phone = phone + (self.attn(self.norm(phone), mask=mask))
275
+
276
+ phone = phone + (self.conformer(phone))
277
+
278
+ return phone
279
+
280
+ def calc_same_padding(kernel_size):
281
+ pad = kernel_size // 2
282
+ return (pad, pad - (kernel_size + 1) % 2)
283
+
284
+ # helper classes
285
+
286
+ class Swish(nn.Module):
287
+ def forward(self, x):
288
+ return x * x.sigmoid()
289
+
290
+ class Transpose(nn.Module):
291
+ def __init__(self, dims):
292
+ super().__init__()
293
+ assert len(dims) == 2, 'dims must be a tuple of two dimensions'
294
+ self.dims = dims
295
+
296
+ def forward(self, x):
297
+ return x.transpose(*self.dims)
298
+
299
+ class GLU(nn.Module):
300
+ def __init__(self, dim):
301
+ super().__init__()
302
+ self.dim = dim
303
+
304
+ def forward(self, x):
305
+ out, gate = x.chunk(2, dim=self.dim)
306
+ return out * gate.sigmoid()
307
+
308
+ class DepthWiseConv1d(nn.Module):
309
+ def __init__(self, chan_in, chan_out, kernel_size, padding):
310
+ super().__init__()
311
+ self.padding = padding
312
+ self.conv = nn.Conv1d(chan_in, chan_out, kernel_size, groups = chan_in)
313
+
314
+ def forward(self, x):
315
+ x = F.pad(x, self.padding)
316
+ return self.conv(x)
317
+
318
+ class ConformerConvModule(nn.Module):
319
+ def __init__(
320
+ self,
321
+ dim,
322
+ causal = False,
323
+ expansion_factor = 2,
324
+ kernel_size = 31,
325
+ dropout = 0.):
326
+ super().__init__()
327
+
328
+ inner_dim = dim * expansion_factor
329
+ padding = calc_same_padding(kernel_size) if not causal else (kernel_size - 1, 0)
330
+
331
+ self.net = nn.Sequential(
332
+ nn.LayerNorm(dim),
333
+ Transpose((1, 2)),
334
+ nn.Conv1d(dim, inner_dim * 2, 1),
335
+ GLU(dim=1),
336
+ DepthWiseConv1d(inner_dim, inner_dim, kernel_size = kernel_size, padding = padding),
337
+ #nn.BatchNorm1d(inner_dim) if not causal else nn.Identity(),
338
+ Swish(),
339
+ nn.Conv1d(inner_dim, dim, 1),
340
+ Transpose((1, 2)),
341
+ nn.Dropout(dropout)
342
+ )
343
+
344
+ def forward(self, x):
345
+ return self.net(x)
346
+
347
+ def linear_attention(q, k, v):
348
+ if v is None:
349
+ #print (k.size(), q.size())
350
+ out = torch.einsum('...ed,...nd->...ne', k, q)
351
+ return out
352
+
353
+ else:
354
+ k_cumsum = k.sum(dim = -2)
355
+ #k_cumsum = k.sum(dim = -2)
356
+ D_inv = 1. / (torch.einsum('...nd,...d->...n', q, k_cumsum.type_as(q)) + 1e-8)
357
+
358
+ context = torch.einsum('...nd,...ne->...de', k, v)
359
+ #print ("TRUEEE: ", context.size(), q.size(), D_inv.size())
360
+ out = torch.einsum('...de,...nd,...n->...ne', context, q, D_inv)
361
+ return out
362
+
363
+ def gaussian_orthogonal_random_matrix(nb_rows, nb_columns, scaling = 0, qr_uniform_q = False, device = None):
364
+ nb_full_blocks = int(nb_rows / nb_columns)
365
+ #print (nb_full_blocks)
366
+ block_list = []
367
+
368
+ for _ in range(nb_full_blocks):
369
+ q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q = qr_uniform_q, device = device)
370
+ block_list.append(q)
371
+ # block_list[n] is a orthogonal matrix ... (model_dim * model_dim)
372
+ #print (block_list[0].size(), torch.einsum('...nd,...nd->...n', block_list[0], torch.roll(block_list[0],1,1)))
373
+ #print (nb_rows, nb_full_blocks, nb_columns)
374
+ remaining_rows = nb_rows - nb_full_blocks * nb_columns
375
+ #print (remaining_rows)
376
+ if remaining_rows > 0:
377
+ q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q = qr_uniform_q, device = device)
378
+ #print (q[:remaining_rows].size())
379
+ block_list.append(q[:remaining_rows])
380
+
381
+ final_matrix = torch.cat(block_list)
382
+
383
+ if scaling == 0:
384
+ multiplier = torch.randn((nb_rows, nb_columns), device = device).norm(dim = 1)
385
+ elif scaling == 1:
386
+ multiplier = math.sqrt((float(nb_columns))) * torch.ones((nb_rows,), device = device)
387
+ else:
388
+ raise ValueError(f'Invalid scaling {scaling}')
389
+
390
+ return torch.diag(multiplier) @ final_matrix
391
+
392
+ class FastAttention(nn.Module):
393
+ def __init__(self, dim_heads, nb_features = None, ortho_scaling = 0, causal = False, generalized_attention = False, kernel_fn = nn.ReLU(), qr_uniform_q = False, no_projection = False):
394
+ super().__init__()
395
+ nb_features = default(nb_features, int(dim_heads * math.log(dim_heads)))
396
+
397
+ self.dim_heads = dim_heads
398
+ self.nb_features = nb_features
399
+ self.ortho_scaling = ortho_scaling
400
+
401
+ self.create_projection = partial(gaussian_orthogonal_random_matrix, nb_rows = self.nb_features, nb_columns = dim_heads, scaling = ortho_scaling, qr_uniform_q = qr_uniform_q)
402
+ projection_matrix = self.create_projection()
403
+ self.register_buffer('projection_matrix', projection_matrix)
404
+
405
+ self.generalized_attention = generalized_attention
406
+ self.kernel_fn = kernel_fn
407
+
408
+ # if this is turned on, no projection will be used
409
+ # queries and keys will be softmax-ed as in the original efficient attention paper
410
+ self.no_projection = no_projection
411
+
412
+ self.causal = causal
413
+
414
+ @torch.no_grad()
415
+ def redraw_projection_matrix(self):
416
+ projections = self.create_projection()
417
+ self.projection_matrix.copy_(projections)
418
+ del projections
419
+
420
+ def forward(self, q, k, v):
421
+ device = q.device
422
+
423
+ if self.no_projection:
424
+ q = q.softmax(dim = -1)
425
+ k = torch.exp(k) if self.causal else k.softmax(dim = -2)
426
+ else:
427
+ create_kernel = partial(softmax_kernel, projection_matrix = self.projection_matrix, device = device)
428
+
429
+ q = create_kernel(q, is_query = True)
430
+ k = create_kernel(k, is_query = False)
431
+
432
+ attn_fn = linear_attention if not self.causal else self.causal_linear_fn
433
+ if v is None:
434
+ out = attn_fn(q, k, None)
435
+ return out
436
+ else:
437
+ out = attn_fn(q, k, v)
438
+ return out
439
+ class SelfAttention(nn.Module):
440
+ def __init__(self, dim, causal = False, heads = 8, dim_head = 64, local_heads = 0, local_window_size = 256, nb_features = None, feature_redraw_interval = 1000, generalized_attention = False, kernel_fn = nn.ReLU(), qr_uniform_q = False, dropout = 0., no_projection = False):
441
+ super().__init__()
442
+ assert dim % heads == 0, 'dimension must be divisible by number of heads'
443
+ dim_head = default(dim_head, dim // heads)
444
+ inner_dim = dim_head * heads
445
+ self.fast_attention = FastAttention(dim_head, nb_features, causal = causal, generalized_attention = generalized_attention, kernel_fn = kernel_fn, qr_uniform_q = qr_uniform_q, no_projection = no_projection)
446
+
447
+ self.heads = heads
448
+ self.global_heads = heads - local_heads
449
+ self.local_attn = LocalAttention(window_size = local_window_size, causal = causal, autopad = True, dropout = dropout, look_forward = int(not causal), rel_pos_emb_config = (dim_head, local_heads)) if local_heads > 0 else None
450
+
451
+ #print (heads, nb_features, dim_head)
452
+ #name_embedding = torch.zeros(110, heads, dim_head, dim_head)
453
+ #self.name_embedding = nn.Parameter(name_embedding, requires_grad=True)
454
+
455
+
456
+ self.to_q = nn.Linear(dim, inner_dim)
457
+ self.to_k = nn.Linear(dim, inner_dim)
458
+ self.to_v = nn.Linear(dim, inner_dim)
459
+ self.to_out = nn.Linear(inner_dim, dim)
460
+ self.dropout = nn.Dropout(dropout)
461
+
462
+ @torch.no_grad()
463
+ def redraw_projection_matrix(self):
464
+ self.fast_attention.redraw_projection_matrix()
465
+ #torch.nn.init.zeros_(self.name_embedding)
466
+ #print (torch.sum(self.name_embedding))
467
+ def forward(self, x, context = None, mask = None, context_mask = None, name=None, inference=False, **kwargs):
468
+ _, _, _, h, gh = *x.shape, self.heads, self.global_heads
469
+
470
+ cross_attend = exists(context)
471
+
472
+ context = default(context, x)
473
+ context_mask = default(context_mask, mask) if not cross_attend else context_mask
474
+ #print (torch.sum(self.name_embedding))
475
+ q, k, v = self.to_q(x), self.to_k(context), self.to_v(context)
476
+
477
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
478
+ (q, lq), (k, lk), (v, lv) = map(lambda t: (t[:, :gh], t[:, gh:]), (q, k, v))
479
+
480
+ attn_outs = []
481
+ #print (name)
482
+ #print (self.name_embedding[name].size())
483
+ if not empty(q):
484
+ if exists(context_mask):
485
+ global_mask = context_mask[:, None, :, None]
486
+ v.masked_fill_(~global_mask, 0.)
487
+ if cross_attend:
488
+ pass
489
+ #print (torch.sum(self.name_embedding))
490
+ #out = self.fast_attention(q,self.name_embedding[name],None)
491
+ #print (torch.sum(self.name_embedding[...,-1:]))
492
+ else:
493
+ out = self.fast_attention(q, k, v)
494
+ attn_outs.append(out)
495
+
496
+ if not empty(lq):
497
+ assert not cross_attend, 'local attention is not compatible with cross attention'
498
+ out = self.local_attn(lq, lk, lv, input_mask = mask)
499
+ attn_outs.append(out)
500
+
501
+ out = torch.cat(attn_outs, dim = 1)
502
+ out = rearrange(out, 'b h n d -> b n (h d)')
503
+ out = self.to_out(out)
504
+ return self.dropout(out)
505
+
506
+ def l2_regularization(model, l2_alpha):
507
+ l2_loss = []
508
+ for module in model.modules():
509
+ if type(module) is nn.Conv2d:
510
+ l2_loss.append((module.weight ** 2).sum() / 2.0)
511
+ return l2_alpha * sum(l2_loss)
512
+
513
+
514
+ class FCPEModel(nn.Module):
515
+ def __init__(
516
+ self,
517
+ input_channel=128,
518
+ out_dims=360,
519
+ n_layers=12,
520
+ n_chans=512,
521
+ use_siren=False,
522
+ use_full=False,
523
+ loss_mse_scale=10,
524
+ loss_l2_regularization=False,
525
+ loss_l2_regularization_scale=1,
526
+ loss_grad1_mse=False,
527
+ loss_grad1_mse_scale=1,
528
+ f0_max=1975.5,
529
+ f0_min=32.70,
530
+ confidence=False,
531
+ threshold=0.05,
532
+ use_input_conv=True
533
+ ):
534
+ super().__init__()
535
+ if use_siren is True:
536
+ raise ValueError("Siren is not supported yet.")
537
+ if use_full is True:
538
+ raise ValueError("Full model is not supported yet.")
539
+
540
+ self.loss_mse_scale = loss_mse_scale if (loss_mse_scale is not None) else 10
541
+ self.loss_l2_regularization = loss_l2_regularization if (loss_l2_regularization is not None) else False
542
+ self.loss_l2_regularization_scale = loss_l2_regularization_scale if (loss_l2_regularization_scale
543
+ is not None) else 1
544
+ self.loss_grad1_mse = loss_grad1_mse if (loss_grad1_mse is not None) else False
545
+ self.loss_grad1_mse_scale = loss_grad1_mse_scale if (loss_grad1_mse_scale is not None) else 1
546
+ self.f0_max = f0_max if (f0_max is not None) else 1975.5
547
+ self.f0_min = f0_min if (f0_min is not None) else 32.70
548
+ self.confidence = confidence if (confidence is not None) else False
549
+ self.threshold = threshold if (threshold is not None) else 0.05
550
+ self.use_input_conv = use_input_conv if (use_input_conv is not None) else True
551
+
552
+ self.cent_table_b = torch.Tensor(
553
+ np.linspace(self.f0_to_cent(torch.Tensor([f0_min]))[0], self.f0_to_cent(torch.Tensor([f0_max]))[0],
554
+ out_dims))
555
+ self.register_buffer("cent_table", self.cent_table_b)
556
+
557
+ # conv in stack
558
+ _leaky = nn.LeakyReLU()
559
+ self.stack = nn.Sequential(
560
+ nn.Conv1d(input_channel, n_chans, 3, 1, 1),
561
+ nn.GroupNorm(4, n_chans),
562
+ _leaky,
563
+ nn.Conv1d(n_chans, n_chans, 3, 1, 1))
564
+
565
+ # transformer
566
+ self.decoder = PCmer(
567
+ num_layers=n_layers,
568
+ num_heads=8,
569
+ dim_model=n_chans,
570
+ dim_keys=n_chans,
571
+ dim_values=n_chans,
572
+ residual_dropout=0.1,
573
+ attention_dropout=0.1)
574
+ self.norm = nn.LayerNorm(n_chans)
575
+
576
+ # out
577
+ self.n_out = out_dims
578
+ self.dense_out = weight_norm(
579
+ nn.Linear(n_chans, self.n_out))
580
+
581
+ def forward(self, mel, infer=True, gt_f0=None, return_hz_f0=False, cdecoder = "local_argmax"):
582
+ """
583
+ input:
584
+ B x n_frames x n_unit
585
+ return:
586
+ dict of B x n_frames x feat
587
+ """
588
+ if cdecoder == "argmax":
589
+ self.cdecoder = self.cents_decoder
590
+ elif cdecoder == "local_argmax":
591
+ self.cdecoder = self.cents_local_decoder
592
+ if self.use_input_conv:
593
+ x = self.stack(mel.transpose(1, 2)).transpose(1, 2)
594
+ else:
595
+ x = mel
596
+ x = self.decoder(x)
597
+ x = self.norm(x)
598
+ x = self.dense_out(x) # [B,N,D]
599
+ x = torch.sigmoid(x)
600
+ if not infer:
601
+ gt_cent_f0 = self.f0_to_cent(gt_f0) # mel f0 #[B,N,1]
602
+ gt_cent_f0 = self.gaussian_blurred_cent(gt_cent_f0) # #[B,N,out_dim]
603
+ loss_all = self.loss_mse_scale * F.binary_cross_entropy(x, gt_cent_f0) # bce loss
604
+ # l2 regularization
605
+ if self.loss_l2_regularization:
606
+ loss_all = loss_all + l2_regularization(model=self, l2_alpha=self.loss_l2_regularization_scale)
607
+ x = loss_all
608
+ if infer:
609
+ x = self.cdecoder(x)
610
+ x = self.cent_to_f0(x)
611
+ if not return_hz_f0:
612
+ x = (1 + x / 700).log()
613
+ return x
614
+
615
+ def cents_decoder(self, y, mask=True):
616
+ B, N, _ = y.size()
617
+ ci = self.cent_table[None, None, :].expand(B, N, -1)
618
+ rtn = torch.sum(ci * y, dim=-1, keepdim=True) / torch.sum(y, dim=-1, keepdim=True) # cents: [B,N,1]
619
+ if mask:
620
+ confident = torch.max(y, dim=-1, keepdim=True)[0]
621
+ confident_mask = torch.ones_like(confident)
622
+ confident_mask[confident <= self.threshold] = float("-INF")
623
+ rtn = rtn * confident_mask
624
+ if self.confidence:
625
+ return rtn, confident
626
+ else:
627
+ return rtn
628
+
629
+ def cents_local_decoder(self, y, mask=True):
630
+ B, N, _ = y.size()
631
+ ci = self.cent_table[None, None, :].expand(B, N, -1)
632
+ confident, max_index = torch.max(y, dim=-1, keepdim=True)
633
+ local_argmax_index = torch.arange(0,9).to(max_index.device) + (max_index - 4)
634
+ local_argmax_index[local_argmax_index<0] = 0
635
+ local_argmax_index[local_argmax_index>=self.n_out] = self.n_out - 1
636
+ ci_l = torch.gather(ci,-1,local_argmax_index)
637
+ y_l = torch.gather(y,-1,local_argmax_index)
638
+ rtn = torch.sum(ci_l * y_l, dim=-1, keepdim=True) / torch.sum(y_l, dim=-1, keepdim=True) # cents: [B,N,1]
639
+ if mask:
640
+ confident_mask = torch.ones_like(confident)
641
+ confident_mask[confident <= self.threshold] = float("-INF")
642
+ rtn = rtn * confident_mask
643
+ if self.confidence:
644
+ return rtn, confident
645
+ else:
646
+ return rtn
647
+
648
+ def cent_to_f0(self, cent):
649
+ return 10. * 2 ** (cent / 1200.)
650
+
651
+ def f0_to_cent(self, f0):
652
+ return 1200. * torch.log2(f0 / 10.)
653
+
654
+ def gaussian_blurred_cent(self, cents): # cents: [B,N,1]
655
+ mask = (cents > 0.1) & (cents < (1200. * np.log2(self.f0_max / 10.)))
656
+ B, N, _ = cents.size()
657
+ ci = self.cent_table[None, None, :].expand(B, N, -1)
658
+ return torch.exp(-torch.square(ci - cents) / 1250) * mask.float()
659
+
660
+
661
+ class FCPEInfer:
662
+ def __init__(self, model_path, device=None, dtype=torch.float32):
663
+ if device is None:
664
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
665
+ self.device = device
666
+ ckpt = torch.load(model_path, map_location=torch.device(self.device))
667
+ self.args = DotDict(ckpt["config"])
668
+ self.dtype = dtype
669
+ model = FCPEModel(
670
+ input_channel=self.args.model.input_channel,
671
+ out_dims=self.args.model.out_dims,
672
+ n_layers=self.args.model.n_layers,
673
+ n_chans=self.args.model.n_chans,
674
+ use_siren=self.args.model.use_siren,
675
+ use_full=self.args.model.use_full,
676
+ loss_mse_scale=self.args.loss.loss_mse_scale,
677
+ loss_l2_regularization=self.args.loss.loss_l2_regularization,
678
+ loss_l2_regularization_scale=self.args.loss.loss_l2_regularization_scale,
679
+ loss_grad1_mse=self.args.loss.loss_grad1_mse,
680
+ loss_grad1_mse_scale=self.args.loss.loss_grad1_mse_scale,
681
+ f0_max=self.args.model.f0_max,
682
+ f0_min=self.args.model.f0_min,
683
+ confidence=self.args.model.confidence,
684
+ )
685
+ model.to(self.device).to(self.dtype)
686
+ model.load_state_dict(ckpt['model'])
687
+ model.eval()
688
+ self.model = model
689
+ self.wav2mel = Wav2Mel(self.args, dtype=self.dtype, device=self.device)
690
+
691
+ @torch.no_grad()
692
+ def __call__(self, audio, sr, threshold=0.05):
693
+ self.model.threshold = threshold
694
+ audio = audio[None,:]
695
+ mel = self.wav2mel(audio=audio, sample_rate=sr).to(self.dtype)
696
+ f0 = self.model(mel=mel, infer=True, return_hz_f0=True)
697
+ return f0
698
+
699
+
700
+ class Wav2Mel:
701
+
702
+ def __init__(self, args, device=None, dtype=torch.float32):
703
+ # self.args = args
704
+ self.sampling_rate = args.mel.sampling_rate
705
+ self.hop_size = args.mel.hop_size
706
+ if device is None:
707
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
708
+ self.device = device
709
+ self.dtype = dtype
710
+ self.stft = STFT(
711
+ args.mel.sampling_rate,
712
+ args.mel.num_mels,
713
+ args.mel.n_fft,
714
+ args.mel.win_size,
715
+ args.mel.hop_size,
716
+ args.mel.fmin,
717
+ args.mel.fmax
718
+ )
719
+ self.resample_kernel = {}
720
+
721
+ def extract_nvstft(self, audio, keyshift=0, train=False):
722
+ mel = self.stft.get_mel(audio, keyshift=keyshift, train=train).transpose(1, 2) # B, n_frames, bins
723
+ return mel
724
+
725
+ def extract_mel(self, audio, sample_rate, keyshift=0, train=False):
726
+ audio = audio.to(self.dtype).to(self.device)
727
+ # resample
728
+ if sample_rate == self.sampling_rate:
729
+ audio_res = audio
730
+ else:
731
+ key_str = str(sample_rate)
732
+ if key_str not in self.resample_kernel:
733
+ self.resample_kernel[key_str] = Resample(sample_rate, self.sampling_rate, lowpass_filter_width=128)
734
+ self.resample_kernel[key_str] = self.resample_kernel[key_str].to(self.dtype).to(self.device)
735
+ audio_res = self.resample_kernel[key_str](audio)
736
+
737
+ # extract
738
+ mel = self.extract_nvstft(audio_res, keyshift=keyshift, train=train) # B, n_frames, bins
739
+ n_frames = int(audio.shape[1] // self.hop_size) + 1
740
+ if n_frames > int(mel.shape[1]):
741
+ mel = torch.cat((mel, mel[:, -1:, :]), 1)
742
+ if n_frames < int(mel.shape[1]):
743
+ mel = mel[:, :n_frames, :]
744
+ return mel
745
+
746
+ def __call__(self, audio, sample_rate, keyshift=0, train=False):
747
+ return self.extract_mel(audio, sample_rate, keyshift=keyshift, train=train)
748
+
749
+
750
+ class DotDict(dict):
751
+ def __getattr__(*args):
752
+ val = dict.get(*args)
753
+ return DotDict(val) if type(val) is dict else val
754
+
755
+ __setattr__ = dict.__setitem__
756
+ __delattr__ = dict.__delitem__
757
+
758
+ class F0Predictor(object):
759
+ def compute_f0(self,wav,p_len):
760
+ '''
761
+ input: wav:[signal_length]
762
+ p_len:int
763
+ output: f0:[signal_length//hop_length]
764
+ '''
765
+ pass
766
+
767
+ def compute_f0_uv(self,wav,p_len):
768
+ '''
769
+ input: wav:[signal_length]
770
+ p_len:int
771
+ output: f0:[signal_length//hop_length],uv:[signal_length//hop_length]
772
+ '''
773
+ pass
774
+
775
+ class FCPE(F0Predictor):
776
+ def __init__(self, model_path, hop_length=512, f0_min=50, f0_max=1100, dtype=torch.float32, device=None, sampling_rate=44100,
777
+ threshold=0.05):
778
+ self.fcpe = FCPEInfer(model_path, device=device, dtype=dtype)
779
+ self.hop_length = hop_length
780
+ self.f0_min = f0_min
781
+ self.f0_max = f0_max
782
+ if device is None:
783
+ self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
784
+ else:
785
+ self.device = device
786
+ self.threshold = threshold
787
+ self.sampling_rate = sampling_rate
788
+ self.dtype = dtype
789
+ self.name = "fcpe"
790
+
791
+ def repeat_expand(
792
+ self, content: Union[torch.Tensor, np.ndarray], target_len: int, mode: str = "nearest"
793
+ ):
794
+ ndim = content.ndim
795
+
796
+ if content.ndim == 1:
797
+ content = content[None, None]
798
+ elif content.ndim == 2:
799
+ content = content[None]
800
+
801
+ assert content.ndim == 3
802
+
803
+ is_np = isinstance(content, np.ndarray)
804
+ if is_np:
805
+ content = torch.from_numpy(content)
806
+
807
+ results = torch.nn.functional.interpolate(content, size=target_len, mode=mode)
808
+
809
+ if is_np:
810
+ results = results.numpy()
811
+
812
+ if ndim == 1:
813
+ return results[0, 0]
814
+ elif ndim == 2:
815
+ return results[0]
816
+
817
+ def post_process(self, x, sampling_rate, f0, pad_to):
818
+ if isinstance(f0, np.ndarray):
819
+ f0 = torch.from_numpy(f0).float().to(x.device)
820
+
821
+ if pad_to is None:
822
+ return f0
823
+
824
+ f0 = self.repeat_expand(f0, pad_to)
825
+
826
+ vuv_vector = torch.zeros_like(f0)
827
+ vuv_vector[f0 > 0.0] = 1.0
828
+ vuv_vector[f0 <= 0.0] = 0.0
829
+
830
+ # 去掉0频率, 并线性插值
831
+ nzindex = torch.nonzero(f0).squeeze()
832
+ f0 = torch.index_select(f0, dim=0, index=nzindex).cpu().numpy()
833
+ time_org = self.hop_length / sampling_rate * nzindex.cpu().numpy()
834
+ time_frame = np.arange(pad_to) * self.hop_length / sampling_rate
835
+
836
+ vuv_vector = F.interpolate(vuv_vector[None, None, :], size=pad_to)[0][0]
837
+
838
+ if f0.shape[0] <= 0:
839
+ return torch.zeros(pad_to, dtype=torch.float, device=x.device).cpu().numpy(), vuv_vector.cpu().numpy()
840
+ if f0.shape[0] == 1:
841
+ return (torch.ones(pad_to, dtype=torch.float, device=x.device) * f0[
842
+ 0]).cpu().numpy(), vuv_vector.cpu().numpy()
843
+
844
+ # 大概可以用 torch 重写?
845
+ f0 = np.interp(time_frame, time_org, f0, left=f0[0], right=f0[-1])
846
+ # vuv_vector = np.ceil(scipy.ndimage.zoom(vuv_vector,pad_to/len(vuv_vector),order = 0))
847
+
848
+ return f0, vuv_vector.cpu().numpy()
849
+
850
+ def compute_f0(self, wav, p_len=None):
851
+ x = torch.FloatTensor(wav).to(self.dtype).to(self.device)
852
+ if p_len is None:
853
+ print("fcpe p_len is None")
854
+ p_len = x.shape[0] // self.hop_length
855
+ #else:
856
+ # assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
857
+ f0 = self.fcpe(x, sr=self.sampling_rate, threshold=self.threshold)[0,:,0]
858
+ if torch.all(f0 == 0):
859
+ rtn = f0.cpu().numpy() if p_len is None else np.zeros(p_len)
860
+ return rtn, rtn
861
+ return self.post_process(x, self.sampling_rate, f0, p_len)[0]
862
+
863
+ def compute_f0_uv(self, wav, p_len=None):
864
+ x = torch.FloatTensor(wav).to(self.dtype).to(self.device)
865
+ if p_len is None:
866
+ p_len = x.shape[0] // self.hop_length
867
+ #else:
868
+ # assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
869
+ f0 = self.fcpe(x, sr=self.sampling_rate, threshold=self.threshold)[0,:,0]
870
+ if torch.all(f0 == 0):
871
+ rtn = f0.cpu().numpy() if p_len is None else np.zeros(p_len)
872
+ return rtn, rtn
873
+ return self.post_process(x, self.sampling_rate, f0, p_len)