Staticaliza commited on
Commit
a6fe0ff
·
verified ·
1 Parent(s): b9b5a56

Update modules/bigvgan/bigvgan.py

Browse files
Files changed (1) hide show
  1. modules/bigvgan/bigvgan.py +492 -492
modules/bigvgan/bigvgan.py CHANGED
@@ -1,492 +1,492 @@
1
- # Copyright (c) 2024 NVIDIA CORPORATION.
2
- # Licensed under the MIT license.
3
-
4
- # Adapted from https://github.com/jik876/hifi-gan under the MIT license.
5
- # LICENSE is in incl_licenses directory.
6
-
7
- import os
8
- import json
9
- from pathlib import Path
10
- from typing import Optional, Union, Dict
11
-
12
- import torch
13
- import torch.nn as nn
14
- from torch.nn import Conv1d, ConvTranspose1d
15
- from torch.nn.utils import weight_norm, remove_weight_norm
16
-
17
- from . import activations
18
- from .utils import init_weights, get_padding
19
- from .alias_free_activation.torch.act import Activation1d as TorchActivation1d
20
- from .env import AttrDict
21
-
22
- from huggingface_hub import PyTorchModelHubMixin, hf_hub_download
23
-
24
-
25
- def load_hparams_from_json(path) -> AttrDict:
26
- with open(path) as f:
27
- data = f.read()
28
- return AttrDict(json.loads(data))
29
-
30
-
31
- class AMPBlock1(torch.nn.Module):
32
- """
33
- AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer.
34
- AMPBlock1 has additional self.convs2 that contains additional Conv1d layers with a fixed dilation=1 followed by each layer in self.convs1
35
-
36
- Args:
37
- h (AttrDict): Hyperparameters.
38
- channels (int): Number of convolution channels.
39
- kernel_size (int): Size of the convolution kernel. Default is 3.
40
- dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5).
41
- activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None.
42
- """
43
-
44
- def __init__(
45
- self,
46
- h: AttrDict,
47
- channels: int,
48
- kernel_size: int = 3,
49
- dilation: tuple = (1, 3, 5),
50
- activation: str = None,
51
- ):
52
- super().__init__()
53
-
54
- self.h = h
55
-
56
- self.convs1 = nn.ModuleList(
57
- [
58
- weight_norm(
59
- Conv1d(
60
- channels,
61
- channels,
62
- kernel_size,
63
- stride=1,
64
- dilation=d,
65
- padding=get_padding(kernel_size, d),
66
- )
67
- )
68
- for d in dilation
69
- ]
70
- )
71
- self.convs1.apply(init_weights)
72
-
73
- self.convs2 = nn.ModuleList(
74
- [
75
- weight_norm(
76
- Conv1d(
77
- channels,
78
- channels,
79
- kernel_size,
80
- stride=1,
81
- dilation=1,
82
- padding=get_padding(kernel_size, 1),
83
- )
84
- )
85
- for _ in range(len(dilation))
86
- ]
87
- )
88
- self.convs2.apply(init_weights)
89
-
90
- self.num_layers = len(self.convs1) + len(
91
- self.convs2
92
- ) # Total number of conv layers
93
-
94
- # Select which Activation1d, lazy-load cuda version to ensure backward compatibility
95
- if self.h.get("use_cuda_kernel", False):
96
- from alias_free_activation.cuda.activation1d import (
97
- Activation1d as CudaActivation1d,
98
- )
99
-
100
- Activation1d = CudaActivation1d
101
- else:
102
- Activation1d = TorchActivation1d
103
-
104
- # Activation functions
105
- if activation == "snake":
106
- self.activations = nn.ModuleList(
107
- [
108
- Activation1d(
109
- activation=activations.Snake(
110
- channels, alpha_logscale=h.snake_logscale
111
- )
112
- )
113
- for _ in range(self.num_layers)
114
- ]
115
- )
116
- elif activation == "snakebeta":
117
- self.activations = nn.ModuleList(
118
- [
119
- Activation1d(
120
- activation=activations.SnakeBeta(
121
- channels, alpha_logscale=h.snake_logscale
122
- )
123
- )
124
- for _ in range(self.num_layers)
125
- ]
126
- )
127
- else:
128
- raise NotImplementedError(
129
- "activation incorrectly specified. check the config file and look for 'activation'."
130
- )
131
-
132
- def forward(self, x):
133
- acts1, acts2 = self.activations[::2], self.activations[1::2]
134
- for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2):
135
- xt = a1(x)
136
- xt = c1(xt)
137
- xt = a2(xt)
138
- xt = c2(xt)
139
- x = xt + x
140
-
141
- return x
142
-
143
- def remove_weight_norm(self):
144
- for l in self.convs1:
145
- remove_weight_norm(l)
146
- for l in self.convs2:
147
- remove_weight_norm(l)
148
-
149
-
150
- class AMPBlock2(torch.nn.Module):
151
- """
152
- AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer.
153
- Unlike AMPBlock1, AMPBlock2 does not contain extra Conv1d layers with fixed dilation=1
154
-
155
- Args:
156
- h (AttrDict): Hyperparameters.
157
- channels (int): Number of convolution channels.
158
- kernel_size (int): Size of the convolution kernel. Default is 3.
159
- dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5).
160
- activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None.
161
- """
162
-
163
- def __init__(
164
- self,
165
- h: AttrDict,
166
- channels: int,
167
- kernel_size: int = 3,
168
- dilation: tuple = (1, 3, 5),
169
- activation: str = None,
170
- ):
171
- super().__init__()
172
-
173
- self.h = h
174
-
175
- self.convs = nn.ModuleList(
176
- [
177
- weight_norm(
178
- Conv1d(
179
- channels,
180
- channels,
181
- kernel_size,
182
- stride=1,
183
- dilation=d,
184
- padding=get_padding(kernel_size, d),
185
- )
186
- )
187
- for d in dilation
188
- ]
189
- )
190
- self.convs.apply(init_weights)
191
-
192
- self.num_layers = len(self.convs) # Total number of conv layers
193
-
194
- # Select which Activation1d, lazy-load cuda version to ensure backward compatibility
195
- if self.h.get("use_cuda_kernel", False):
196
- from alias_free_activation.cuda.activation1d import (
197
- Activation1d as CudaActivation1d,
198
- )
199
-
200
- Activation1d = CudaActivation1d
201
- else:
202
- Activation1d = TorchActivation1d
203
-
204
- # Activation functions
205
- if activation == "snake":
206
- self.activations = nn.ModuleList(
207
- [
208
- Activation1d(
209
- activation=activations.Snake(
210
- channels, alpha_logscale=h.snake_logscale
211
- )
212
- )
213
- for _ in range(self.num_layers)
214
- ]
215
- )
216
- elif activation == "snakebeta":
217
- self.activations = nn.ModuleList(
218
- [
219
- Activation1d(
220
- activation=activations.SnakeBeta(
221
- channels, alpha_logscale=h.snake_logscale
222
- )
223
- )
224
- for _ in range(self.num_layers)
225
- ]
226
- )
227
- else:
228
- raise NotImplementedError(
229
- "activation incorrectly specified. check the config file and look for 'activation'."
230
- )
231
-
232
- def forward(self, x):
233
- for c, a in zip(self.convs, self.activations):
234
- xt = a(x)
235
- xt = c(xt)
236
- x = xt + x
237
-
238
- def remove_weight_norm(self):
239
- for l in self.convs:
240
- remove_weight_norm(l)
241
-
242
-
243
- class BigVGAN(
244
- torch.nn.Module,
245
- PyTorchModelHubMixin,
246
- library_name="bigvgan",
247
- repo_url="https://github.com/NVIDIA/BigVGAN",
248
- docs_url="https://github.com/NVIDIA/BigVGAN/blob/main/README.md",
249
- pipeline_tag="audio-to-audio",
250
- license="mit",
251
- tags=["neural-vocoder", "audio-generation", "arxiv:2206.04658"],
252
- ):
253
- """
254
- BigVGAN is a neural vocoder model that applies anti-aliased periodic activation for residual blocks (resblocks).
255
- New in BigVGAN-v2: it can optionally use optimized CUDA kernels for AMP (anti-aliased multi-periodicity) blocks.
256
-
257
- Args:
258
- h (AttrDict): Hyperparameters.
259
- use_cuda_kernel (bool): If set to True, loads optimized CUDA kernels for AMP. This should be used for inference only, as training is not supported with CUDA kernels.
260
-
261
- Note:
262
- - The `use_cuda_kernel` parameter should be used for inference only, as training with CUDA kernels is not supported.
263
- - Ensure that the activation function is correctly specified in the hyperparameters (h.activation).
264
- """
265
-
266
- def __init__(self, h: AttrDict, use_cuda_kernel: bool = False):
267
- super().__init__()
268
- self.h = h
269
- self.h["use_cuda_kernel"] = use_cuda_kernel
270
-
271
- # Select which Activation1d, lazy-load cuda version to ensure backward compatibility
272
- if self.h.get("use_cuda_kernel", False):
273
- from alias_free_activation.cuda.activation1d import (
274
- Activation1d as CudaActivation1d,
275
- )
276
-
277
- Activation1d = CudaActivation1d
278
- else:
279
- Activation1d = TorchActivation1d
280
-
281
- self.num_kernels = len(h.resblock_kernel_sizes)
282
- self.num_upsamples = len(h.upsample_rates)
283
-
284
- # Pre-conv
285
- self.conv_pre = weight_norm(
286
- Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3)
287
- )
288
-
289
- # Define which AMPBlock to use. BigVGAN uses AMPBlock1 as default
290
- if h.resblock == "1":
291
- resblock_class = AMPBlock1
292
- elif h.resblock == "2":
293
- resblock_class = AMPBlock2
294
- else:
295
- raise ValueError(
296
- f"Incorrect resblock class specified in hyperparameters. Got {h.resblock}"
297
- )
298
-
299
- # Transposed conv-based upsamplers. does not apply anti-aliasing
300
- self.ups = nn.ModuleList()
301
- for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
302
- self.ups.append(
303
- nn.ModuleList(
304
- [
305
- weight_norm(
306
- ConvTranspose1d(
307
- h.upsample_initial_channel // (2**i),
308
- h.upsample_initial_channel // (2 ** (i + 1)),
309
- k,
310
- u,
311
- padding=(k - u) // 2,
312
- )
313
- )
314
- ]
315
- )
316
- )
317
-
318
- # Residual blocks using anti-aliased multi-periodicity composition modules (AMP)
319
- self.resblocks = nn.ModuleList()
320
- for i in range(len(self.ups)):
321
- ch = h.upsample_initial_channel // (2 ** (i + 1))
322
- for j, (k, d) in enumerate(
323
- zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)
324
- ):
325
- self.resblocks.append(
326
- resblock_class(h, ch, k, d, activation=h.activation)
327
- )
328
-
329
- # Post-conv
330
- activation_post = (
331
- activations.Snake(ch, alpha_logscale=h.snake_logscale)
332
- if h.activation == "snake"
333
- else (
334
- activations.SnakeBeta(ch, alpha_logscale=h.snake_logscale)
335
- if h.activation == "snakebeta"
336
- else None
337
- )
338
- )
339
- if activation_post is None:
340
- raise NotImplementedError(
341
- "activation incorrectly specified. check the config file and look for 'activation'."
342
- )
343
-
344
- self.activation_post = Activation1d(activation=activation_post)
345
-
346
- # Whether to use bias for the final conv_post. Default to True for backward compatibility
347
- self.use_bias_at_final = h.get("use_bias_at_final", True)
348
- self.conv_post = weight_norm(
349
- Conv1d(ch, 1, 7, 1, padding=3, bias=self.use_bias_at_final)
350
- )
351
-
352
- # Weight initialization
353
- for i in range(len(self.ups)):
354
- self.ups[i].apply(init_weights)
355
- self.conv_post.apply(init_weights)
356
-
357
- # Final tanh activation. Defaults to True for backward compatibility
358
- self.use_tanh_at_final = h.get("use_tanh_at_final", True)
359
-
360
- def forward(self, x):
361
- # Pre-conv
362
- x = self.conv_pre(x)
363
-
364
- for i in range(self.num_upsamples):
365
- # Upsampling
366
- for i_up in range(len(self.ups[i])):
367
- x = self.ups[i][i_up](x)
368
- # AMP blocks
369
- xs = None
370
- for j in range(self.num_kernels):
371
- if xs is None:
372
- xs = self.resblocks[i * self.num_kernels + j](x)
373
- else:
374
- xs += self.resblocks[i * self.num_kernels + j](x)
375
- x = xs / self.num_kernels
376
-
377
- # Post-conv
378
- x = self.activation_post(x)
379
- x = self.conv_post(x)
380
- # Final tanh activation
381
- if self.use_tanh_at_final:
382
- x = torch.tanh(x)
383
- else:
384
- x = torch.clamp(x, min=-1.0, max=1.0) # Bound the output to [-1, 1]
385
-
386
- return x
387
-
388
- def remove_weight_norm(self):
389
- try:
390
- print("Removing weight norm...")
391
- for l in self.ups:
392
- for l_i in l:
393
- remove_weight_norm(l_i)
394
- for l in self.resblocks:
395
- l.remove_weight_norm()
396
- remove_weight_norm(self.conv_pre)
397
- remove_weight_norm(self.conv_post)
398
- except ValueError:
399
- print("[INFO] Model already removed weight norm. Skipping!")
400
- pass
401
-
402
- # Additional methods for huggingface_hub support
403
- def _save_pretrained(self, save_directory: Path) -> None:
404
- """Save weights and config.json from a Pytorch model to a local directory."""
405
-
406
- model_path = save_directory / "bigvgan_generator.pt"
407
- torch.save({"generator": self.state_dict()}, model_path)
408
-
409
- config_path = save_directory / "config.json"
410
- with open(config_path, "w") as config_file:
411
- json.dump(self.h, config_file, indent=4)
412
-
413
- @classmethod
414
- def _from_pretrained(
415
- cls,
416
- *,
417
- model_id: str,
418
- revision: str,
419
- cache_dir: str,
420
- force_download: bool,
421
- proxies: Optional[Dict],
422
- resume_download: bool,
423
- local_files_only: bool,
424
- token: Union[str, bool, None],
425
- map_location: str = "cpu", # Additional argument
426
- strict: bool = False, # Additional argument
427
- use_cuda_kernel: bool = False,
428
- **model_kwargs,
429
- ):
430
- """Load Pytorch pretrained weights and return the loaded model."""
431
-
432
- # Download and load hyperparameters (h) used by BigVGAN
433
- if os.path.isdir(model_id):
434
- print("Loading config.json from local directory")
435
- config_file = os.path.join(model_id, "config.json")
436
- else:
437
- config_file = hf_hub_download(
438
- repo_id=model_id,
439
- filename="config.json",
440
- revision=revision,
441
- cache_dir=cache_dir,
442
- force_download=force_download,
443
- proxies=proxies,
444
- resume_download=resume_download,
445
- token=token,
446
- local_files_only=local_files_only,
447
- )
448
- h = load_hparams_from_json(config_file)
449
-
450
- # instantiate BigVGAN using h
451
- if use_cuda_kernel:
452
- print(
453
- f"[WARNING] You have specified use_cuda_kernel=True during BigVGAN.from_pretrained(). Only inference is supported (training is not implemented)!"
454
- )
455
- print(
456
- f"[WARNING] You need nvcc and ninja installed in your system that matches your PyTorch build is using to build the kernel. If not, the model will fail to initialize or generate incorrect waveform!"
457
- )
458
- print(
459
- f"[WARNING] For detail, see the official GitHub repository: https://github.com/NVIDIA/BigVGAN?tab=readme-ov-file#using-custom-cuda-kernel-for-synthesis"
460
- )
461
- model = cls(h, use_cuda_kernel=use_cuda_kernel)
462
-
463
- # Download and load pretrained generator weight
464
- if os.path.isdir(model_id):
465
- print("Loading weights from local directory")
466
- model_file = os.path.join(model_id, "bigvgan_generator.pt")
467
- else:
468
- print(f"Loading weights from {model_id}")
469
- model_file = hf_hub_download(
470
- repo_id=model_id,
471
- filename="bigvgan_generator.pt",
472
- revision=revision,
473
- cache_dir=cache_dir,
474
- force_download=force_download,
475
- proxies=proxies,
476
- resume_download=resume_download,
477
- token=token,
478
- local_files_only=local_files_only,
479
- )
480
-
481
- checkpoint_dict = torch.load(model_file, map_location=map_location)
482
-
483
- try:
484
- model.load_state_dict(checkpoint_dict["generator"])
485
- except RuntimeError:
486
- print(
487
- f"[INFO] the pretrained checkpoint does not contain weight norm. Loading the checkpoint after removing weight norm!"
488
- )
489
- model.remove_weight_norm()
490
- model.load_state_dict(checkpoint_dict["generator"])
491
-
492
- return model
 
1
+ # Copyright (c) 2024 NVIDIA CORPORATION.
2
+ # Licensed under the MIT license.
3
+
4
+ # Adapted from https://github.com/jik876/hifi-gan under the MIT license.
5
+ # LICENSE is in incl_licenses directory.
6
+
7
+ import os
8
+ import json
9
+ from pathlib import Path
10
+ from typing import Optional, Union, Dict
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ from torch.nn import Conv1d, ConvTranspose1d
15
+ from torch.nn.utils import weight_norm, remove_weight_norm
16
+
17
+ from . import activations
18
+ from .utils import init_weights, get_padding
19
+ from .act import Activation1d as TorchActivation1d
20
+ from .env import AttrDict
21
+
22
+ from huggingface_hub import PyTorchModelHubMixin, hf_hub_download
23
+
24
+
25
+ def load_hparams_from_json(path) -> AttrDict:
26
+ with open(path) as f:
27
+ data = f.read()
28
+ return AttrDict(json.loads(data))
29
+
30
+
31
+ class AMPBlock1(torch.nn.Module):
32
+ """
33
+ AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer.
34
+ AMPBlock1 has additional self.convs2 that contains additional Conv1d layers with a fixed dilation=1 followed by each layer in self.convs1
35
+
36
+ Args:
37
+ h (AttrDict): Hyperparameters.
38
+ channels (int): Number of convolution channels.
39
+ kernel_size (int): Size of the convolution kernel. Default is 3.
40
+ dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5).
41
+ activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None.
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ h: AttrDict,
47
+ channels: int,
48
+ kernel_size: int = 3,
49
+ dilation: tuple = (1, 3, 5),
50
+ activation: str = None,
51
+ ):
52
+ super().__init__()
53
+
54
+ self.h = h
55
+
56
+ self.convs1 = nn.ModuleList(
57
+ [
58
+ weight_norm(
59
+ Conv1d(
60
+ channels,
61
+ channels,
62
+ kernel_size,
63
+ stride=1,
64
+ dilation=d,
65
+ padding=get_padding(kernel_size, d),
66
+ )
67
+ )
68
+ for d in dilation
69
+ ]
70
+ )
71
+ self.convs1.apply(init_weights)
72
+
73
+ self.convs2 = nn.ModuleList(
74
+ [
75
+ weight_norm(
76
+ Conv1d(
77
+ channels,
78
+ channels,
79
+ kernel_size,
80
+ stride=1,
81
+ dilation=1,
82
+ padding=get_padding(kernel_size, 1),
83
+ )
84
+ )
85
+ for _ in range(len(dilation))
86
+ ]
87
+ )
88
+ self.convs2.apply(init_weights)
89
+
90
+ self.num_layers = len(self.convs1) + len(
91
+ self.convs2
92
+ ) # Total number of conv layers
93
+
94
+ # Select which Activation1d, lazy-load cuda version to ensure backward compatibility
95
+ if self.h.get("use_cuda_kernel", False):
96
+ from alias_free_activation.cuda.activation1d import (
97
+ Activation1d as CudaActivation1d,
98
+ )
99
+
100
+ Activation1d = CudaActivation1d
101
+ else:
102
+ Activation1d = TorchActivation1d
103
+
104
+ # Activation functions
105
+ if activation == "snake":
106
+ self.activations = nn.ModuleList(
107
+ [
108
+ Activation1d(
109
+ activation=activations.Snake(
110
+ channels, alpha_logscale=h.snake_logscale
111
+ )
112
+ )
113
+ for _ in range(self.num_layers)
114
+ ]
115
+ )
116
+ elif activation == "snakebeta":
117
+ self.activations = nn.ModuleList(
118
+ [
119
+ Activation1d(
120
+ activation=activations.SnakeBeta(
121
+ channels, alpha_logscale=h.snake_logscale
122
+ )
123
+ )
124
+ for _ in range(self.num_layers)
125
+ ]
126
+ )
127
+ else:
128
+ raise NotImplementedError(
129
+ "activation incorrectly specified. check the config file and look for 'activation'."
130
+ )
131
+
132
+ def forward(self, x):
133
+ acts1, acts2 = self.activations[::2], self.activations[1::2]
134
+ for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2):
135
+ xt = a1(x)
136
+ xt = c1(xt)
137
+ xt = a2(xt)
138
+ xt = c2(xt)
139
+ x = xt + x
140
+
141
+ return x
142
+
143
+ def remove_weight_norm(self):
144
+ for l in self.convs1:
145
+ remove_weight_norm(l)
146
+ for l in self.convs2:
147
+ remove_weight_norm(l)
148
+
149
+
150
+ class AMPBlock2(torch.nn.Module):
151
+ """
152
+ AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer.
153
+ Unlike AMPBlock1, AMPBlock2 does not contain extra Conv1d layers with fixed dilation=1
154
+
155
+ Args:
156
+ h (AttrDict): Hyperparameters.
157
+ channels (int): Number of convolution channels.
158
+ kernel_size (int): Size of the convolution kernel. Default is 3.
159
+ dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5).
160
+ activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None.
161
+ """
162
+
163
+ def __init__(
164
+ self,
165
+ h: AttrDict,
166
+ channels: int,
167
+ kernel_size: int = 3,
168
+ dilation: tuple = (1, 3, 5),
169
+ activation: str = None,
170
+ ):
171
+ super().__init__()
172
+
173
+ self.h = h
174
+
175
+ self.convs = nn.ModuleList(
176
+ [
177
+ weight_norm(
178
+ Conv1d(
179
+ channels,
180
+ channels,
181
+ kernel_size,
182
+ stride=1,
183
+ dilation=d,
184
+ padding=get_padding(kernel_size, d),
185
+ )
186
+ )
187
+ for d in dilation
188
+ ]
189
+ )
190
+ self.convs.apply(init_weights)
191
+
192
+ self.num_layers = len(self.convs) # Total number of conv layers
193
+
194
+ # Select which Activation1d, lazy-load cuda version to ensure backward compatibility
195
+ if self.h.get("use_cuda_kernel", False):
196
+ from alias_free_activation.cuda.activation1d import (
197
+ Activation1d as CudaActivation1d,
198
+ )
199
+
200
+ Activation1d = CudaActivation1d
201
+ else:
202
+ Activation1d = TorchActivation1d
203
+
204
+ # Activation functions
205
+ if activation == "snake":
206
+ self.activations = nn.ModuleList(
207
+ [
208
+ Activation1d(
209
+ activation=activations.Snake(
210
+ channels, alpha_logscale=h.snake_logscale
211
+ )
212
+ )
213
+ for _ in range(self.num_layers)
214
+ ]
215
+ )
216
+ elif activation == "snakebeta":
217
+ self.activations = nn.ModuleList(
218
+ [
219
+ Activation1d(
220
+ activation=activations.SnakeBeta(
221
+ channels, alpha_logscale=h.snake_logscale
222
+ )
223
+ )
224
+ for _ in range(self.num_layers)
225
+ ]
226
+ )
227
+ else:
228
+ raise NotImplementedError(
229
+ "activation incorrectly specified. check the config file and look for 'activation'."
230
+ )
231
+
232
+ def forward(self, x):
233
+ for c, a in zip(self.convs, self.activations):
234
+ xt = a(x)
235
+ xt = c(xt)
236
+ x = xt + x
237
+
238
+ def remove_weight_norm(self):
239
+ for l in self.convs:
240
+ remove_weight_norm(l)
241
+
242
+
243
+ class BigVGAN(
244
+ torch.nn.Module,
245
+ PyTorchModelHubMixin,
246
+ library_name="bigvgan",
247
+ repo_url="https://github.com/NVIDIA/BigVGAN",
248
+ docs_url="https://github.com/NVIDIA/BigVGAN/blob/main/README.md",
249
+ pipeline_tag="audio-to-audio",
250
+ license="mit",
251
+ tags=["neural-vocoder", "audio-generation", "arxiv:2206.04658"],
252
+ ):
253
+ """
254
+ BigVGAN is a neural vocoder model that applies anti-aliased periodic activation for residual blocks (resblocks).
255
+ New in BigVGAN-v2: it can optionally use optimized CUDA kernels for AMP (anti-aliased multi-periodicity) blocks.
256
+
257
+ Args:
258
+ h (AttrDict): Hyperparameters.
259
+ use_cuda_kernel (bool): If set to True, loads optimized CUDA kernels for AMP. This should be used for inference only, as training is not supported with CUDA kernels.
260
+
261
+ Note:
262
+ - The `use_cuda_kernel` parameter should be used for inference only, as training with CUDA kernels is not supported.
263
+ - Ensure that the activation function is correctly specified in the hyperparameters (h.activation).
264
+ """
265
+
266
+ def __init__(self, h: AttrDict, use_cuda_kernel: bool = False):
267
+ super().__init__()
268
+ self.h = h
269
+ self.h["use_cuda_kernel"] = use_cuda_kernel
270
+
271
+ # Select which Activation1d, lazy-load cuda version to ensure backward compatibility
272
+ if self.h.get("use_cuda_kernel", False):
273
+ from alias_free_activation.cuda.activation1d import (
274
+ Activation1d as CudaActivation1d,
275
+ )
276
+
277
+ Activation1d = CudaActivation1d
278
+ else:
279
+ Activation1d = TorchActivation1d
280
+
281
+ self.num_kernels = len(h.resblock_kernel_sizes)
282
+ self.num_upsamples = len(h.upsample_rates)
283
+
284
+ # Pre-conv
285
+ self.conv_pre = weight_norm(
286
+ Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3)
287
+ )
288
+
289
+ # Define which AMPBlock to use. BigVGAN uses AMPBlock1 as default
290
+ if h.resblock == "1":
291
+ resblock_class = AMPBlock1
292
+ elif h.resblock == "2":
293
+ resblock_class = AMPBlock2
294
+ else:
295
+ raise ValueError(
296
+ f"Incorrect resblock class specified in hyperparameters. Got {h.resblock}"
297
+ )
298
+
299
+ # Transposed conv-based upsamplers. does not apply anti-aliasing
300
+ self.ups = nn.ModuleList()
301
+ for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
302
+ self.ups.append(
303
+ nn.ModuleList(
304
+ [
305
+ weight_norm(
306
+ ConvTranspose1d(
307
+ h.upsample_initial_channel // (2**i),
308
+ h.upsample_initial_channel // (2 ** (i + 1)),
309
+ k,
310
+ u,
311
+ padding=(k - u) // 2,
312
+ )
313
+ )
314
+ ]
315
+ )
316
+ )
317
+
318
+ # Residual blocks using anti-aliased multi-periodicity composition modules (AMP)
319
+ self.resblocks = nn.ModuleList()
320
+ for i in range(len(self.ups)):
321
+ ch = h.upsample_initial_channel // (2 ** (i + 1))
322
+ for j, (k, d) in enumerate(
323
+ zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)
324
+ ):
325
+ self.resblocks.append(
326
+ resblock_class(h, ch, k, d, activation=h.activation)
327
+ )
328
+
329
+ # Post-conv
330
+ activation_post = (
331
+ activations.Snake(ch, alpha_logscale=h.snake_logscale)
332
+ if h.activation == "snake"
333
+ else (
334
+ activations.SnakeBeta(ch, alpha_logscale=h.snake_logscale)
335
+ if h.activation == "snakebeta"
336
+ else None
337
+ )
338
+ )
339
+ if activation_post is None:
340
+ raise NotImplementedError(
341
+ "activation incorrectly specified. check the config file and look for 'activation'."
342
+ )
343
+
344
+ self.activation_post = Activation1d(activation=activation_post)
345
+
346
+ # Whether to use bias for the final conv_post. Default to True for backward compatibility
347
+ self.use_bias_at_final = h.get("use_bias_at_final", True)
348
+ self.conv_post = weight_norm(
349
+ Conv1d(ch, 1, 7, 1, padding=3, bias=self.use_bias_at_final)
350
+ )
351
+
352
+ # Weight initialization
353
+ for i in range(len(self.ups)):
354
+ self.ups[i].apply(init_weights)
355
+ self.conv_post.apply(init_weights)
356
+
357
+ # Final tanh activation. Defaults to True for backward compatibility
358
+ self.use_tanh_at_final = h.get("use_tanh_at_final", True)
359
+
360
+ def forward(self, x):
361
+ # Pre-conv
362
+ x = self.conv_pre(x)
363
+
364
+ for i in range(self.num_upsamples):
365
+ # Upsampling
366
+ for i_up in range(len(self.ups[i])):
367
+ x = self.ups[i][i_up](x)
368
+ # AMP blocks
369
+ xs = None
370
+ for j in range(self.num_kernels):
371
+ if xs is None:
372
+ xs = self.resblocks[i * self.num_kernels + j](x)
373
+ else:
374
+ xs += self.resblocks[i * self.num_kernels + j](x)
375
+ x = xs / self.num_kernels
376
+
377
+ # Post-conv
378
+ x = self.activation_post(x)
379
+ x = self.conv_post(x)
380
+ # Final tanh activation
381
+ if self.use_tanh_at_final:
382
+ x = torch.tanh(x)
383
+ else:
384
+ x = torch.clamp(x, min=-1.0, max=1.0) # Bound the output to [-1, 1]
385
+
386
+ return x
387
+
388
+ def remove_weight_norm(self):
389
+ try:
390
+ print("Removing weight norm...")
391
+ for l in self.ups:
392
+ for l_i in l:
393
+ remove_weight_norm(l_i)
394
+ for l in self.resblocks:
395
+ l.remove_weight_norm()
396
+ remove_weight_norm(self.conv_pre)
397
+ remove_weight_norm(self.conv_post)
398
+ except ValueError:
399
+ print("[INFO] Model already removed weight norm. Skipping!")
400
+ pass
401
+
402
+ # Additional methods for huggingface_hub support
403
+ def _save_pretrained(self, save_directory: Path) -> None:
404
+ """Save weights and config.json from a Pytorch model to a local directory."""
405
+
406
+ model_path = save_directory / "bigvgan_generator.pt"
407
+ torch.save({"generator": self.state_dict()}, model_path)
408
+
409
+ config_path = save_directory / "config.json"
410
+ with open(config_path, "w") as config_file:
411
+ json.dump(self.h, config_file, indent=4)
412
+
413
+ @classmethod
414
+ def _from_pretrained(
415
+ cls,
416
+ *,
417
+ model_id: str,
418
+ revision: str,
419
+ cache_dir: str,
420
+ force_download: bool,
421
+ proxies: Optional[Dict],
422
+ resume_download: bool,
423
+ local_files_only: bool,
424
+ token: Union[str, bool, None],
425
+ map_location: str = "cpu", # Additional argument
426
+ strict: bool = False, # Additional argument
427
+ use_cuda_kernel: bool = False,
428
+ **model_kwargs,
429
+ ):
430
+ """Load Pytorch pretrained weights and return the loaded model."""
431
+
432
+ # Download and load hyperparameters (h) used by BigVGAN
433
+ if os.path.isdir(model_id):
434
+ print("Loading config.json from local directory")
435
+ config_file = os.path.join(model_id, "config.json")
436
+ else:
437
+ config_file = hf_hub_download(
438
+ repo_id=model_id,
439
+ filename="config.json",
440
+ revision=revision,
441
+ cache_dir=cache_dir,
442
+ force_download=force_download,
443
+ proxies=proxies,
444
+ resume_download=resume_download,
445
+ token=token,
446
+ local_files_only=local_files_only,
447
+ )
448
+ h = load_hparams_from_json(config_file)
449
+
450
+ # instantiate BigVGAN using h
451
+ if use_cuda_kernel:
452
+ print(
453
+ f"[WARNING] You have specified use_cuda_kernel=True during BigVGAN.from_pretrained(). Only inference is supported (training is not implemented)!"
454
+ )
455
+ print(
456
+ f"[WARNING] You need nvcc and ninja installed in your system that matches your PyTorch build is using to build the kernel. If not, the model will fail to initialize or generate incorrect waveform!"
457
+ )
458
+ print(
459
+ f"[WARNING] For detail, see the official GitHub repository: https://github.com/NVIDIA/BigVGAN?tab=readme-ov-file#using-custom-cuda-kernel-for-synthesis"
460
+ )
461
+ model = cls(h, use_cuda_kernel=use_cuda_kernel)
462
+
463
+ # Download and load pretrained generator weight
464
+ if os.path.isdir(model_id):
465
+ print("Loading weights from local directory")
466
+ model_file = os.path.join(model_id, "bigvgan_generator.pt")
467
+ else:
468
+ print(f"Loading weights from {model_id}")
469
+ model_file = hf_hub_download(
470
+ repo_id=model_id,
471
+ filename="bigvgan_generator.pt",
472
+ revision=revision,
473
+ cache_dir=cache_dir,
474
+ force_download=force_download,
475
+ proxies=proxies,
476
+ resume_download=resume_download,
477
+ token=token,
478
+ local_files_only=local_files_only,
479
+ )
480
+
481
+ checkpoint_dict = torch.load(model_file, map_location=map_location)
482
+
483
+ try:
484
+ model.load_state_dict(checkpoint_dict["generator"])
485
+ except RuntimeError:
486
+ print(
487
+ f"[INFO] the pretrained checkpoint does not contain weight norm. Loading the checkpoint after removing weight norm!"
488
+ )
489
+ model.remove_weight_norm()
490
+ model.load_state_dict(checkpoint_dict["generator"])
491
+
492
+ return model