Spaces:
Sleeping
Sleeping
Staticaliza
commited on
Update modules/quantize.py
Browse files- modules/quantize.py +228 -228
modules/quantize.py
CHANGED
@@ -1,229 +1,229 @@
|
|
1 |
-
from dac.nn.quantize import ResidualVectorQuantize
|
2 |
-
from torch import nn
|
3 |
-
from modules.wavenet import WN
|
4 |
-
import torch
|
5 |
-
import torchaudio
|
6 |
-
import torchaudio.functional as audio_F
|
7 |
-
import numpy as np
|
8 |
-
from .
|
9 |
-
from torch.nn.utils import weight_norm
|
10 |
-
from torch import nn, sin, pow
|
11 |
-
from einops.layers.torch import Rearrange
|
12 |
-
from dac.model.encodec import SConv1d
|
13 |
-
|
14 |
-
def init_weights(m):
|
15 |
-
if isinstance(m, nn.Conv1d):
|
16 |
-
nn.init.trunc_normal_(m.weight, std=0.02)
|
17 |
-
nn.init.constant_(m.bias, 0)
|
18 |
-
|
19 |
-
|
20 |
-
def WNConv1d(*args, **kwargs):
|
21 |
-
return weight_norm(nn.Conv1d(*args, **kwargs))
|
22 |
-
|
23 |
-
|
24 |
-
def WNConvTranspose1d(*args, **kwargs):
|
25 |
-
return weight_norm(nn.ConvTranspose1d(*args, **kwargs))
|
26 |
-
|
27 |
-
class SnakeBeta(nn.Module):
|
28 |
-
"""
|
29 |
-
A modified Snake function which uses separate parameters for the magnitude of the periodic components
|
30 |
-
Shape:
|
31 |
-
- Input: (B, C, T)
|
32 |
-
- Output: (B, C, T), same shape as the input
|
33 |
-
Parameters:
|
34 |
-
- alpha - trainable parameter that controls frequency
|
35 |
-
- beta - trainable parameter that controls magnitude
|
36 |
-
References:
|
37 |
-
- This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
|
38 |
-
https://arxiv.org/abs/2006.08195
|
39 |
-
Examples:
|
40 |
-
>>> a1 = snakebeta(256)
|
41 |
-
>>> x = torch.randn(256)
|
42 |
-
>>> x = a1(x)
|
43 |
-
"""
|
44 |
-
|
45 |
-
def __init__(
|
46 |
-
self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False
|
47 |
-
):
|
48 |
-
"""
|
49 |
-
Initialization.
|
50 |
-
INPUT:
|
51 |
-
- in_features: shape of the input
|
52 |
-
- alpha - trainable parameter that controls frequency
|
53 |
-
- beta - trainable parameter that controls magnitude
|
54 |
-
alpha is initialized to 1 by default, higher values = higher-frequency.
|
55 |
-
beta is initialized to 1 by default, higher values = higher-magnitude.
|
56 |
-
alpha will be trained along with the rest of your model.
|
57 |
-
"""
|
58 |
-
super(SnakeBeta, self).__init__()
|
59 |
-
self.in_features = in_features
|
60 |
-
|
61 |
-
# initialize alpha
|
62 |
-
self.alpha_logscale = alpha_logscale
|
63 |
-
if self.alpha_logscale: # log scale alphas initialized to zeros
|
64 |
-
self.alpha = nn.Parameter(torch.zeros(in_features) * alpha)
|
65 |
-
self.beta = nn.Parameter(torch.zeros(in_features) * alpha)
|
66 |
-
else: # linear scale alphas initialized to ones
|
67 |
-
self.alpha = nn.Parameter(torch.ones(in_features) * alpha)
|
68 |
-
self.beta = nn.Parameter(torch.ones(in_features) * alpha)
|
69 |
-
|
70 |
-
self.alpha.requires_grad = alpha_trainable
|
71 |
-
self.beta.requires_grad = alpha_trainable
|
72 |
-
|
73 |
-
self.no_div_by_zero = 0.000000001
|
74 |
-
|
75 |
-
def forward(self, x):
|
76 |
-
"""
|
77 |
-
Forward pass of the function.
|
78 |
-
Applies the function to the input elementwise.
|
79 |
-
SnakeBeta := x + 1/b * sin^2 (xa)
|
80 |
-
"""
|
81 |
-
alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
|
82 |
-
beta = self.beta.unsqueeze(0).unsqueeze(-1)
|
83 |
-
if self.alpha_logscale:
|
84 |
-
alpha = torch.exp(alpha)
|
85 |
-
beta = torch.exp(beta)
|
86 |
-
x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
|
87 |
-
|
88 |
-
return x
|
89 |
-
|
90 |
-
class ResidualUnit(nn.Module):
|
91 |
-
def __init__(self, dim: int = 16, dilation: int = 1):
|
92 |
-
super().__init__()
|
93 |
-
pad = ((7 - 1) * dilation) // 2
|
94 |
-
self.block = nn.Sequential(
|
95 |
-
Activation1d(activation=SnakeBeta(dim, alpha_logscale=True)),
|
96 |
-
WNConv1d(dim, dim, kernel_size=7, dilation=dilation, padding=pad),
|
97 |
-
Activation1d(activation=SnakeBeta(dim, alpha_logscale=True)),
|
98 |
-
WNConv1d(dim, dim, kernel_size=1),
|
99 |
-
)
|
100 |
-
|
101 |
-
def forward(self, x):
|
102 |
-
return x + self.block(x)
|
103 |
-
|
104 |
-
class CNNLSTM(nn.Module):
|
105 |
-
def __init__(self, indim, outdim, head, global_pred=False):
|
106 |
-
super().__init__()
|
107 |
-
self.global_pred = global_pred
|
108 |
-
self.model = nn.Sequential(
|
109 |
-
ResidualUnit(indim, dilation=1),
|
110 |
-
ResidualUnit(indim, dilation=2),
|
111 |
-
ResidualUnit(indim, dilation=3),
|
112 |
-
Activation1d(activation=SnakeBeta(indim, alpha_logscale=True)),
|
113 |
-
Rearrange("b c t -> b t c"),
|
114 |
-
)
|
115 |
-
self.heads = nn.ModuleList([nn.Linear(indim, outdim) for i in range(head)])
|
116 |
-
|
117 |
-
def forward(self, x):
|
118 |
-
# x: [B, C, T]
|
119 |
-
x = self.model(x)
|
120 |
-
if self.global_pred:
|
121 |
-
x = torch.mean(x, dim=1, keepdim=False)
|
122 |
-
outs = [head(x) for head in self.heads]
|
123 |
-
return outs
|
124 |
-
|
125 |
-
def sequence_mask(length, max_length=None):
|
126 |
-
if max_length is None:
|
127 |
-
max_length = length.max()
|
128 |
-
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
129 |
-
return x.unsqueeze(0) < length.unsqueeze(1)
|
130 |
-
class FAquantizer(nn.Module):
|
131 |
-
def __init__(self, in_dim=1024,
|
132 |
-
n_p_codebooks=1,
|
133 |
-
n_c_codebooks=2,
|
134 |
-
n_t_codebooks=2,
|
135 |
-
n_r_codebooks=3,
|
136 |
-
codebook_size=1024,
|
137 |
-
codebook_dim=8,
|
138 |
-
quantizer_dropout=0.5,
|
139 |
-
causal=False,
|
140 |
-
separate_prosody_encoder=False,
|
141 |
-
timbre_norm=False,):
|
142 |
-
super(FAquantizer, self).__init__()
|
143 |
-
conv1d_type = SConv1d# if causal else nn.Conv1d
|
144 |
-
self.prosody_quantizer = ResidualVectorQuantize(
|
145 |
-
input_dim=in_dim,
|
146 |
-
n_codebooks=n_p_codebooks,
|
147 |
-
codebook_size=codebook_size,
|
148 |
-
codebook_dim=codebook_dim,
|
149 |
-
quantizer_dropout=quantizer_dropout,
|
150 |
-
)
|
151 |
-
|
152 |
-
self.content_quantizer = ResidualVectorQuantize(
|
153 |
-
input_dim=in_dim,
|
154 |
-
n_codebooks=n_c_codebooks,
|
155 |
-
codebook_size=codebook_size,
|
156 |
-
codebook_dim=codebook_dim,
|
157 |
-
quantizer_dropout=quantizer_dropout,
|
158 |
-
)
|
159 |
-
|
160 |
-
self.residual_quantizer = ResidualVectorQuantize(
|
161 |
-
input_dim=in_dim,
|
162 |
-
n_codebooks=n_r_codebooks,
|
163 |
-
codebook_size=codebook_size,
|
164 |
-
codebook_dim=codebook_dim,
|
165 |
-
quantizer_dropout=quantizer_dropout,
|
166 |
-
)
|
167 |
-
|
168 |
-
self.melspec_linear = conv1d_type(in_channels=20, out_channels=256, kernel_size=1, causal=causal)
|
169 |
-
self.melspec_encoder = WN(hidden_channels=256, kernel_size=5, dilation_rate=1, n_layers=8, gin_channels=0, p_dropout=0.2, causal=causal)
|
170 |
-
self.melspec_linear2 = conv1d_type(in_channels=256, out_channels=1024, kernel_size=1, causal=causal)
|
171 |
-
|
172 |
-
self.prob_random_mask_residual = 0.75
|
173 |
-
|
174 |
-
SPECT_PARAMS = {
|
175 |
-
"n_fft": 2048,
|
176 |
-
"win_length": 1200,
|
177 |
-
"hop_length": 300,
|
178 |
-
}
|
179 |
-
MEL_PARAMS = {
|
180 |
-
"n_mels": 80,
|
181 |
-
}
|
182 |
-
|
183 |
-
self.to_mel = torchaudio.transforms.MelSpectrogram(
|
184 |
-
n_mels=MEL_PARAMS["n_mels"], sample_rate=24000, **SPECT_PARAMS
|
185 |
-
)
|
186 |
-
self.mel_mean, self.mel_std = -4, 4
|
187 |
-
self.frame_rate = 24000 / 300
|
188 |
-
self.hop_length = 300
|
189 |
-
|
190 |
-
def preprocess(self, wave_tensor, n_bins=20):
|
191 |
-
mel_tensor = self.to_mel(wave_tensor.squeeze(1))
|
192 |
-
mel_tensor = (torch.log(1e-5 + mel_tensor) - self.mel_mean) / self.mel_std
|
193 |
-
return mel_tensor[:, :n_bins, :int(wave_tensor.size(-1) / self.hop_length)]
|
194 |
-
|
195 |
-
def forward(self, x, wave_segments):
|
196 |
-
outs = 0
|
197 |
-
prosody_feature = self.preprocess(wave_segments)
|
198 |
-
|
199 |
-
f0_input = prosody_feature # (B, T, 20)
|
200 |
-
f0_input = self.melspec_linear(f0_input)
|
201 |
-
f0_input = self.melspec_encoder(f0_input, torch.ones(f0_input.shape[0], 1, f0_input.shape[2]).to(
|
202 |
-
f0_input.device).bool())
|
203 |
-
f0_input = self.melspec_linear2(f0_input)
|
204 |
-
|
205 |
-
common_min_size = min(f0_input.size(2), x.size(2))
|
206 |
-
f0_input = f0_input[:, :, :common_min_size]
|
207 |
-
|
208 |
-
x = x[:, :, :common_min_size]
|
209 |
-
|
210 |
-
z_p, codes_p, latents_p, commitment_loss_p, codebook_loss_p = self.prosody_quantizer(
|
211 |
-
f0_input, 1
|
212 |
-
)
|
213 |
-
outs += z_p.detach()
|
214 |
-
|
215 |
-
z_c, codes_c, latents_c, commitment_loss_c, codebook_loss_c = self.content_quantizer(
|
216 |
-
x, 2
|
217 |
-
)
|
218 |
-
outs += z_c.detach()
|
219 |
-
|
220 |
-
residual_feature = x - z_p.detach() - z_c.detach()
|
221 |
-
|
222 |
-
z_r, codes_r, latents_r, commitment_loss_r, codebook_loss_r = self.residual_quantizer(
|
223 |
-
residual_feature, 3
|
224 |
-
)
|
225 |
-
|
226 |
-
quantized = [z_p, z_c, z_r]
|
227 |
-
codes = [codes_p, codes_c, codes_r]
|
228 |
-
|
229 |
return quantized, codes
|
|
|
1 |
+
from dac.nn.quantize import ResidualVectorQuantize
|
2 |
+
from torch import nn
|
3 |
+
from modules.wavenet import WN
|
4 |
+
import torch
|
5 |
+
import torchaudio
|
6 |
+
import torchaudio.functional as audio_F
|
7 |
+
import numpy as np
|
8 |
+
from .bigvgan import *
|
9 |
+
from torch.nn.utils import weight_norm
|
10 |
+
from torch import nn, sin, pow
|
11 |
+
from einops.layers.torch import Rearrange
|
12 |
+
from dac.model.encodec import SConv1d
|
13 |
+
|
14 |
+
def init_weights(m):
|
15 |
+
if isinstance(m, nn.Conv1d):
|
16 |
+
nn.init.trunc_normal_(m.weight, std=0.02)
|
17 |
+
nn.init.constant_(m.bias, 0)
|
18 |
+
|
19 |
+
|
20 |
+
def WNConv1d(*args, **kwargs):
|
21 |
+
return weight_norm(nn.Conv1d(*args, **kwargs))
|
22 |
+
|
23 |
+
|
24 |
+
def WNConvTranspose1d(*args, **kwargs):
|
25 |
+
return weight_norm(nn.ConvTranspose1d(*args, **kwargs))
|
26 |
+
|
27 |
+
class SnakeBeta(nn.Module):
|
28 |
+
"""
|
29 |
+
A modified Snake function which uses separate parameters for the magnitude of the periodic components
|
30 |
+
Shape:
|
31 |
+
- Input: (B, C, T)
|
32 |
+
- Output: (B, C, T), same shape as the input
|
33 |
+
Parameters:
|
34 |
+
- alpha - trainable parameter that controls frequency
|
35 |
+
- beta - trainable parameter that controls magnitude
|
36 |
+
References:
|
37 |
+
- This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
|
38 |
+
https://arxiv.org/abs/2006.08195
|
39 |
+
Examples:
|
40 |
+
>>> a1 = snakebeta(256)
|
41 |
+
>>> x = torch.randn(256)
|
42 |
+
>>> x = a1(x)
|
43 |
+
"""
|
44 |
+
|
45 |
+
def __init__(
|
46 |
+
self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False
|
47 |
+
):
|
48 |
+
"""
|
49 |
+
Initialization.
|
50 |
+
INPUT:
|
51 |
+
- in_features: shape of the input
|
52 |
+
- alpha - trainable parameter that controls frequency
|
53 |
+
- beta - trainable parameter that controls magnitude
|
54 |
+
alpha is initialized to 1 by default, higher values = higher-frequency.
|
55 |
+
beta is initialized to 1 by default, higher values = higher-magnitude.
|
56 |
+
alpha will be trained along with the rest of your model.
|
57 |
+
"""
|
58 |
+
super(SnakeBeta, self).__init__()
|
59 |
+
self.in_features = in_features
|
60 |
+
|
61 |
+
# initialize alpha
|
62 |
+
self.alpha_logscale = alpha_logscale
|
63 |
+
if self.alpha_logscale: # log scale alphas initialized to zeros
|
64 |
+
self.alpha = nn.Parameter(torch.zeros(in_features) * alpha)
|
65 |
+
self.beta = nn.Parameter(torch.zeros(in_features) * alpha)
|
66 |
+
else: # linear scale alphas initialized to ones
|
67 |
+
self.alpha = nn.Parameter(torch.ones(in_features) * alpha)
|
68 |
+
self.beta = nn.Parameter(torch.ones(in_features) * alpha)
|
69 |
+
|
70 |
+
self.alpha.requires_grad = alpha_trainable
|
71 |
+
self.beta.requires_grad = alpha_trainable
|
72 |
+
|
73 |
+
self.no_div_by_zero = 0.000000001
|
74 |
+
|
75 |
+
def forward(self, x):
|
76 |
+
"""
|
77 |
+
Forward pass of the function.
|
78 |
+
Applies the function to the input elementwise.
|
79 |
+
SnakeBeta := x + 1/b * sin^2 (xa)
|
80 |
+
"""
|
81 |
+
alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
|
82 |
+
beta = self.beta.unsqueeze(0).unsqueeze(-1)
|
83 |
+
if self.alpha_logscale:
|
84 |
+
alpha = torch.exp(alpha)
|
85 |
+
beta = torch.exp(beta)
|
86 |
+
x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
|
87 |
+
|
88 |
+
return x
|
89 |
+
|
90 |
+
class ResidualUnit(nn.Module):
|
91 |
+
def __init__(self, dim: int = 16, dilation: int = 1):
|
92 |
+
super().__init__()
|
93 |
+
pad = ((7 - 1) * dilation) // 2
|
94 |
+
self.block = nn.Sequential(
|
95 |
+
Activation1d(activation=SnakeBeta(dim, alpha_logscale=True)),
|
96 |
+
WNConv1d(dim, dim, kernel_size=7, dilation=dilation, padding=pad),
|
97 |
+
Activation1d(activation=SnakeBeta(dim, alpha_logscale=True)),
|
98 |
+
WNConv1d(dim, dim, kernel_size=1),
|
99 |
+
)
|
100 |
+
|
101 |
+
def forward(self, x):
|
102 |
+
return x + self.block(x)
|
103 |
+
|
104 |
+
class CNNLSTM(nn.Module):
|
105 |
+
def __init__(self, indim, outdim, head, global_pred=False):
|
106 |
+
super().__init__()
|
107 |
+
self.global_pred = global_pred
|
108 |
+
self.model = nn.Sequential(
|
109 |
+
ResidualUnit(indim, dilation=1),
|
110 |
+
ResidualUnit(indim, dilation=2),
|
111 |
+
ResidualUnit(indim, dilation=3),
|
112 |
+
Activation1d(activation=SnakeBeta(indim, alpha_logscale=True)),
|
113 |
+
Rearrange("b c t -> b t c"),
|
114 |
+
)
|
115 |
+
self.heads = nn.ModuleList([nn.Linear(indim, outdim) for i in range(head)])
|
116 |
+
|
117 |
+
def forward(self, x):
|
118 |
+
# x: [B, C, T]
|
119 |
+
x = self.model(x)
|
120 |
+
if self.global_pred:
|
121 |
+
x = torch.mean(x, dim=1, keepdim=False)
|
122 |
+
outs = [head(x) for head in self.heads]
|
123 |
+
return outs
|
124 |
+
|
125 |
+
def sequence_mask(length, max_length=None):
|
126 |
+
if max_length is None:
|
127 |
+
max_length = length.max()
|
128 |
+
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
|
129 |
+
return x.unsqueeze(0) < length.unsqueeze(1)
|
130 |
+
class FAquantizer(nn.Module):
|
131 |
+
def __init__(self, in_dim=1024,
|
132 |
+
n_p_codebooks=1,
|
133 |
+
n_c_codebooks=2,
|
134 |
+
n_t_codebooks=2,
|
135 |
+
n_r_codebooks=3,
|
136 |
+
codebook_size=1024,
|
137 |
+
codebook_dim=8,
|
138 |
+
quantizer_dropout=0.5,
|
139 |
+
causal=False,
|
140 |
+
separate_prosody_encoder=False,
|
141 |
+
timbre_norm=False,):
|
142 |
+
super(FAquantizer, self).__init__()
|
143 |
+
conv1d_type = SConv1d# if causal else nn.Conv1d
|
144 |
+
self.prosody_quantizer = ResidualVectorQuantize(
|
145 |
+
input_dim=in_dim,
|
146 |
+
n_codebooks=n_p_codebooks,
|
147 |
+
codebook_size=codebook_size,
|
148 |
+
codebook_dim=codebook_dim,
|
149 |
+
quantizer_dropout=quantizer_dropout,
|
150 |
+
)
|
151 |
+
|
152 |
+
self.content_quantizer = ResidualVectorQuantize(
|
153 |
+
input_dim=in_dim,
|
154 |
+
n_codebooks=n_c_codebooks,
|
155 |
+
codebook_size=codebook_size,
|
156 |
+
codebook_dim=codebook_dim,
|
157 |
+
quantizer_dropout=quantizer_dropout,
|
158 |
+
)
|
159 |
+
|
160 |
+
self.residual_quantizer = ResidualVectorQuantize(
|
161 |
+
input_dim=in_dim,
|
162 |
+
n_codebooks=n_r_codebooks,
|
163 |
+
codebook_size=codebook_size,
|
164 |
+
codebook_dim=codebook_dim,
|
165 |
+
quantizer_dropout=quantizer_dropout,
|
166 |
+
)
|
167 |
+
|
168 |
+
self.melspec_linear = conv1d_type(in_channels=20, out_channels=256, kernel_size=1, causal=causal)
|
169 |
+
self.melspec_encoder = WN(hidden_channels=256, kernel_size=5, dilation_rate=1, n_layers=8, gin_channels=0, p_dropout=0.2, causal=causal)
|
170 |
+
self.melspec_linear2 = conv1d_type(in_channels=256, out_channels=1024, kernel_size=1, causal=causal)
|
171 |
+
|
172 |
+
self.prob_random_mask_residual = 0.75
|
173 |
+
|
174 |
+
SPECT_PARAMS = {
|
175 |
+
"n_fft": 2048,
|
176 |
+
"win_length": 1200,
|
177 |
+
"hop_length": 300,
|
178 |
+
}
|
179 |
+
MEL_PARAMS = {
|
180 |
+
"n_mels": 80,
|
181 |
+
}
|
182 |
+
|
183 |
+
self.to_mel = torchaudio.transforms.MelSpectrogram(
|
184 |
+
n_mels=MEL_PARAMS["n_mels"], sample_rate=24000, **SPECT_PARAMS
|
185 |
+
)
|
186 |
+
self.mel_mean, self.mel_std = -4, 4
|
187 |
+
self.frame_rate = 24000 / 300
|
188 |
+
self.hop_length = 300
|
189 |
+
|
190 |
+
def preprocess(self, wave_tensor, n_bins=20):
|
191 |
+
mel_tensor = self.to_mel(wave_tensor.squeeze(1))
|
192 |
+
mel_tensor = (torch.log(1e-5 + mel_tensor) - self.mel_mean) / self.mel_std
|
193 |
+
return mel_tensor[:, :n_bins, :int(wave_tensor.size(-1) / self.hop_length)]
|
194 |
+
|
195 |
+
def forward(self, x, wave_segments):
|
196 |
+
outs = 0
|
197 |
+
prosody_feature = self.preprocess(wave_segments)
|
198 |
+
|
199 |
+
f0_input = prosody_feature # (B, T, 20)
|
200 |
+
f0_input = self.melspec_linear(f0_input)
|
201 |
+
f0_input = self.melspec_encoder(f0_input, torch.ones(f0_input.shape[0], 1, f0_input.shape[2]).to(
|
202 |
+
f0_input.device).bool())
|
203 |
+
f0_input = self.melspec_linear2(f0_input)
|
204 |
+
|
205 |
+
common_min_size = min(f0_input.size(2), x.size(2))
|
206 |
+
f0_input = f0_input[:, :, :common_min_size]
|
207 |
+
|
208 |
+
x = x[:, :, :common_min_size]
|
209 |
+
|
210 |
+
z_p, codes_p, latents_p, commitment_loss_p, codebook_loss_p = self.prosody_quantizer(
|
211 |
+
f0_input, 1
|
212 |
+
)
|
213 |
+
outs += z_p.detach()
|
214 |
+
|
215 |
+
z_c, codes_c, latents_c, commitment_loss_c, codebook_loss_c = self.content_quantizer(
|
216 |
+
x, 2
|
217 |
+
)
|
218 |
+
outs += z_c.detach()
|
219 |
+
|
220 |
+
residual_feature = x - z_p.detach() - z_c.detach()
|
221 |
+
|
222 |
+
z_r, codes_r, latents_r, commitment_loss_r, codebook_loss_r = self.residual_quantizer(
|
223 |
+
residual_feature, 3
|
224 |
+
)
|
225 |
+
|
226 |
+
quantized = [z_p, z_c, z_r]
|
227 |
+
codes = [codes_p, codes_c, codes_r]
|
228 |
+
|
229 |
return quantized, codes
|