jeduardogruiz commited on
Commit
1ecb5aa
1 Parent(s): eb3c3f4

Create Conv.py

Browse files
Files changed (1) hide show
  1. Conv.py +253 -0
Conv.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """Convolutional layers tokens wrappers and utilities."""
8
+
9
+ import math
10
+ import typing as tp
11
+ import warnings
12
+
13
+ import torch
14
+ from torch import nn
15
+ from torch.nn import functional as F
16
+ from torch.nn.utils import spectral_norm, weight_norm
17
+
18
+ from .norm import ConvLayerNorm
19
+
20
+
21
+ CONV_NORMALIZATIONS = frozenset(['none', 'weight_norm', 'spectral_norm',
22
+ 'time_layer_norm', 'layer_norm', 'time_group_norm'])
23
+
24
+
25
+ def apply_parametrization_norm(module: nn.Module, norm: str = 'none') -> nn.Module:
26
+ assert norm in CONV_NORMALIZATIONS
27
+ if norm == 'weight_norm':
28
+ return weight_norm(module)
29
+ elif norm == 'spectral_norm':
30
+ return spectral_norm(module)
31
+ else:
32
+ # We already check was in CONV_NORMALIZATION, so any other choice
33
+ # doesn't need reparametrization.
34
+ return module
35
+
36
+
37
+ def get_norm_module(module: nn.Module, causal: bool = False, norm: str = 'none', **norm_kwargs) -> nn.Module:
38
+ """Return the proper normalization module. If causal is True, this will ensure the returned
39
+ module is causal, or return an error if the normalization doesn't support causal evaluation.
40
+ """
41
+ assert norm in CONV_NORMALIZATIONS
42
+ if norm == 'layer_norm':
43
+ assert isinstance(module, nn.modules.conv._ConvNd)
44
+ return ConvLayerNorm(module.out_channels, **norm_kwargs)
45
+ elif norm == 'time_group_norm':
46
+ if causal:
47
+ raise ValueError("GroupNorm doesn't support causal evaluation.")
48
+ assert isinstance(module, nn.modules.conv._ConvNd)
49
+ return nn.GroupNorm(1, module.out_channels, **norm_kwargs)
50
+ else:
51
+ return nn.Identity()
52
+
53
+
54
+ def get_extra_padding_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int,
55
+ padding_total: int = 0) -> int:
56
+ """See `pad_for_conv1d`.
57
+ """
58
+ length = x.shape[-1]
59
+ n_frames = (length - kernel_size + padding_total) / stride + 1
60
+ ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total)
61
+ return ideal_length - length
62
+
63
+
64
+ def pad_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int, padding_total: int = 0):
65
+ """Pad for a convolution to make sure that the last window is full.
66
+ Extra padding is added at the end. This is required to ensure that we can rebuild
67
+ an output of the same length, as otherwise, even with padding, some time steps
68
+ might get removed.
69
+ For instance, with total padding = 4, kernel size = 4, stride = 2:
70
+ 0 0 1 2 3 4 5 0 0 # (0s are padding)
71
+ 1 2 3 # (output frames of a convolution, last 0 is never used)
72
+ 0 0 1 2 3 4 5 0 # (output of tr. conv., but pos. 5 is going to get removed as padding)
73
+ 1 2 3 4 # once you removed padding, we are missing one time step !
74
+ """
75
+ extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total)
76
+ return F.pad(x, (0, extra_padding))
77
+
78
+
79
+ def pad1d(x: torch.Tensor, paddings: tp.Tuple[int, int], mode: str = 'zero', value: float = 0.):
80
+ """Tiny wrapper around F.pad, just to allow for reflect padding on small input.
81
+ If this is the case, we insert extra 0 padding to the right before the reflection happen.
82
+ """
83
+ length = x.shape[-1]
84
+ padding_left, padding_right = paddings
85
+ assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
86
+ if mode == 'reflect':
87
+ max_pad = max(padding_left, padding_right)
88
+ extra_pad = 0
89
+ if length <= max_pad:
90
+ extra_pad = max_pad - length + 1
91
+ x = F.pad(x, (0, extra_pad))
92
+ padded = F.pad(x, paddings, mode, value)
93
+ end = padded.shape[-1] - extra_pad
94
+ return padded[..., :end]
95
+ else:
96
+ return F.pad(x, paddings, mode, value)
97
+
98
+
99
+ def unpad1d(x: torch.Tensor, paddings: tp.Tuple[int, int]):
100
+ """Remove padding from x, handling properly zero padding. Only for 1d!"""
101
+ padding_left, padding_right = paddings
102
+ assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
103
+ assert (padding_left + padding_right) <= x.shape[-1]
104
+ end = x.shape[-1] - padding_right
105
+ return x[..., padding_left: end]
106
+
107
+
108
+ class NormConv1d(nn.Module):
109
+ """Wrapper around Conv1d and normalization applied to this conv
110
+ to provide a uniform interface across normalization approaches.
111
+ """
112
+ def __init__(self, *args, causal: bool = False, norm: str = 'none',
113
+ norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
114
+ super().__init__()
115
+ self.conv = apply_parametrization_norm(nn.Conv1d(*args, **kwargs), norm)
116
+ self.norm = get_norm_module(self.conv, causal, norm, **norm_kwargs)
117
+ self.norm_type = norm
118
+
119
+ def forward(self, x):
120
+ x = self.conv(x)
121
+ x = self.norm(x)
122
+ return x
123
+
124
+
125
+ class NormConv2d(nn.Module):
126
+ """Wrapper around Conv2d and normalization applied to this conv
127
+ to provide a uniform interface across normalization approaches.
128
+ """
129
+ def __init__(self, *args, norm: str = 'none',
130
+ norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
131
+ super().__init__()
132
+ self.conv = apply_parametrization_norm(nn.Conv2d(*args, **kwargs), norm)
133
+ self.norm = get_norm_module(self.conv, causal=False, norm=norm, **norm_kwargs)
134
+ self.norm_type = norm
135
+
136
+ def forward(self, x):
137
+ x = self.conv(x)
138
+ x = self.norm(x)
139
+ return x
140
+
141
+
142
+ class NormConvTranspose1d(nn.Module):
143
+ """Wrapper around ConvTranspose1d and normalization applied to this conv
144
+ to provide a uniform interface across normalization approaches.
145
+ """
146
+ def __init__(self, *args, causal: bool = False, norm: str = 'none',
147
+ norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
148
+ super().__init__()
149
+ self.convtr = apply_parametrization_norm(nn.ConvTranspose1d(*args, **kwargs), norm)
150
+ self.norm = get_norm_module(self.convtr, causal, norm, **norm_kwargs)
151
+ self.norm_type = norm
152
+
153
+ def forward(self, x):
154
+ x = self.convtr(x)
155
+ x = self.norm(x)
156
+ return x
157
+
158
+
159
+ class NormConvTranspose2d(nn.Module):
160
+ """Wrapper around ConvTranspose2d and normalization applied to this conv
161
+ to provide a uniform interface across normalization approaches.
162
+ """
163
+ def __init__(self, *args, norm: str = 'none',
164
+ norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
165
+ super().__init__()
166
+ self.convtr = apply_parametrization_norm(nn.ConvTranspose2d(*args, **kwargs), norm)
167
+ self.norm = get_norm_module(self.convtr, causal=False, norm=norm, **norm_kwargs)
168
+
169
+ def forward(self, x):
170
+ x = self.convtr(x)
171
+ x = self.norm(x)
172
+ return x
173
+
174
+
175
+ class SConv1d(nn.Module):
176
+ """Conv1d with some builtin handling of asymmetric or causal padding
177
+ and normalization.
178
+ """
179
+ def __init__(self, in_channels: int, out_channels: int,
180
+ kernel_size: int, stride: int = 1, dilation: int = 1,
181
+ groups: int = 1, bias: bool = True, causal: bool = False,
182
+ norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {},
183
+ pad_mode: str = 'reflect'):
184
+ super().__init__()
185
+ # warn user on unusual setup between dilation and stride
186
+ if stride > 1 and dilation > 1:
187
+ warnings.warn('SConv1d has been initialized with stride > 1 and dilation > 1'
188
+ f' (kernel_size={kernel_size} stride={stride}, dilation={dilation}).')
189
+ self.conv = NormConv1d(in_channels, out_channels, kernel_size, stride,
190
+ dilation=dilation, groups=groups, bias=bias, causal=causal,
191
+ norm=norm, norm_kwargs=norm_kwargs)
192
+ self.causal = causal
193
+ self.pad_mode = pad_mode
194
+
195
+ def forward(self, x):
196
+ B, C, T = x.shape
197
+ kernel_size = self.conv.conv.kernel_size[0]
198
+ stride = self.conv.conv.stride[0]
199
+ dilation = self.conv.conv.dilation[0]
200
+ kernel_size = (kernel_size - 1) * dilation + 1 # effective kernel size with dilations
201
+ padding_total = kernel_size - stride
202
+ extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total)
203
+ if self.causal:
204
+ # Left padding for causal
205
+ x = pad1d(x, (padding_total, extra_padding), mode=self.pad_mode)
206
+ else:
207
+ # Asymmetric padding required for odd strides
208
+ padding_right = padding_total // 2
209
+ padding_left = padding_total - padding_right
210
+ x = pad1d(x, (padding_left, padding_right + extra_padding), mode=self.pad_mode)
211
+ return self.conv(x)
212
+
213
+
214
+ class SConvTranspose1d(nn.Module):
215
+ """ConvTranspose1d with some builtin handling of asymmetric or causal padding
216
+ and normalization.
217
+ """
218
+ def __init__(self, in_channels: int, out_channels: int,
219
+ kernel_size: int, stride: int = 1, causal: bool = False,
220
+ norm: str = 'none', trim_right_ratio: float = 1.,
221
+ norm_kwargs: tp.Dict[str, tp.Any] = {}):
222
+ super().__init__()
223
+ self.convtr = NormConvTranspose1d(in_channels, out_channels, kernel_size, stride,
224
+ causal=causal, norm=norm, norm_kwargs=norm_kwargs)
225
+ self.causal = causal
226
+ self.trim_right_ratio = trim_right_ratio
227
+ assert self.causal or self.trim_right_ratio == 1., \
228
+ "`trim_right_ratio` != 1.0 only makes sense for causal convolutions"
229
+ assert self.trim_right_ratio >= 0. and self.trim_right_ratio <= 1.
230
+
231
+ def forward(self, x):
232
+ kernel_size = self.convtr.convtr.kernel_size[0]
233
+ stride = self.convtr.convtr.stride[0]
234
+ padding_total = kernel_size - stride
235
+
236
+ y = self.convtr(x)
237
+
238
+ # We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be
239
+ # removed at the very end, when keeping only the right length for the output,
240
+ # as removing it here would require also passing the length at the matching layer
241
+ # in the encoder.
242
+ if self.causal:
243
+ # Trim the padding on the right according to the specified ratio
244
+ # if trim_right_ratio = 1.0, trim everything from right
245
+ padding_right = math.ceil(padding_total * self.trim_right_ratio)
246
+ padding_left = padding_total - padding_right
247
+ y = unpad1d(y, (padding_left, padding_right))
248
+ else:
249
+ # Asymmetric padding required for odd strides
250
+ padding_right = padding_total // 2
251
+ padding_left = padding_total - padding_right
252
+ y = unpad1d(y, (padding_left, padding_right))
253
+ return y