Spaces:
Sleeping
Sleeping
File size: 8,935 Bytes
d7e58f0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 |
from typing import Optional
import numpy as np
import torch
from mmcv.runner import load_checkpoint
from torch import Tensor, nn
from detrsmpl.utils.transforms import (
aa_to_rotmat,
rot6d_to_rotmat,
rotmat_to_aa,
rotmat_to_rot6d,
)
from ..builder import POST_PROCESSING
class SmoothNetResBlock(nn.Module):
"""Residual block module used in SmoothNet.
Args:
in_channels (int): Input channel number.
hidden_channels (int): The hidden feature channel number.
dropout (float): Dropout probability. Default: 0.5
Shape:
Input: (*, in_channels)
Output: (*, in_channels)
"""
def __init__(self, in_channels, hidden_channels, dropout=0.1):
super().__init__()
self.linear1 = nn.Linear(in_channels, hidden_channels)
self.linear2 = nn.Linear(hidden_channels, in_channels)
self.lrelu = nn.LeakyReLU(0.2, inplace=True)
self.dropout = nn.Dropout(p=dropout, inplace=True)
def forward(self, x):
identity = x
x = self.linear1(x)
x = self.dropout(x)
x = self.lrelu(x)
x = self.linear2(x)
x = self.dropout(x)
x = self.lrelu(x)
out = x + identity
return out
class SmoothNet(nn.Module):
"""SmoothNet is a plug-and-play temporal-only network to refine human
poses. It works for 2d/3d/6d pose smoothing.
"SmoothNet: A Plug-and-Play Network for Refining Human Poses in Videos",
arXiv'2021. More details can be found in the `paper
<https://arxiv.org/abs/2112.13715>`__ .
Note:
N: The batch size
T: The temporal length of the pose sequence
C: The total pose dimension (e.g. keypoint_number * keypoint_dim)
Args:
window_size (int): The size of the input window.
output_size (int): The size of the output window.
hidden_size (int): The hidden feature dimension in the encoder,
the decoder and between residual blocks. Default: 512
res_hidden_size (int): The hidden feature dimension inside the
residual blocks. Default: 256
num_blocks (int): The number of residual blocks. Default: 3
dropout (float): Dropout probability. Default: 0.5
Shape:
Input: (N, C, T) the original pose sequence
Output: (N, C, T) the smoothed pose sequence
"""
def __init__(self,
window_size: int,
output_size: int,
hidden_size: int = 512,
res_hidden_size: int = 512,
num_blocks: int = 5,
dropout: float = 0.1):
super().__init__()
self.window_size = window_size
self.output_size = output_size
self.hidden_size = hidden_size
self.res_hidden_size = res_hidden_size
self.num_blocks = num_blocks
self.dropout = dropout
assert output_size <= window_size, (
'The output size should be less than or equal to the window size.',
f' Got output_size=={output_size} and window_size=={window_size}')
# Build encoder layers
self.encoder = nn.Sequential(nn.Linear(window_size, hidden_size),
nn.LeakyReLU(0.1, inplace=True))
# Build residual blocks
res_blocks = []
for _ in range(num_blocks):
res_blocks.append(
SmoothNetResBlock(in_channels=hidden_size,
hidden_channels=res_hidden_size,
dropout=dropout))
self.res_blocks = nn.Sequential(*res_blocks)
# Build decoder layers
self.decoder = nn.Linear(hidden_size, output_size)
def forward(self, x: Tensor) -> Tensor:
"""Forward function."""
N, C, T = x.shape
num_windows = T - self.window_size + 1
assert T >= self.window_size, (
'Input sequence length must be no less than the window size. ',
f'Got x.shape[2]=={T} and window_size=={self.window_size}')
# Unfold x to obtain input sliding windows
# [N, C, num_windows, window_size]
x = x.unfold(2, self.window_size, 1)
# Forward layers
x = self.encoder(x)
x = self.res_blocks(x)
x = self.decoder(x) # [N, C, num_windows, output_size]
# Accumulate output ensembles
out = x.new_zeros(N, C, T)
count = x.new_zeros(T)
for t in range(num_windows):
out[..., t:t + self.output_size] += x[:, :, t]
count[t:t + self.output_size] += 1.0
return out.div(count)
@POST_PROCESSING.register_module(name=['SmoothNetFilter', 'smoothnet'])
class SmoothNetFilter:
"""Apply SmoothNet filter.
"SmoothNet: A Plug-and-Play Network for Refining Human Poses in Videos",
arXiv'2021. More details can be found in the `paper
<https://arxiv.org/abs/2112.13715>`__ .
Args:
window_size (int): The size of the filter window. It's also the
window_size of SmoothNet model.
output_size (int): The output window size of SmoothNet model.
checkpoint (str): The checkpoint file of the pretrained SmoothNet
model. Please note that `checkpoint` should be matched with
`window_size` and `output_size`.
hidden_size (int): SmoothNet argument. See :class:`SmoothNet` for
details. Default: 512
hidden_res_size (int): SmoothNet argument. See :class:`SmoothNet`
for details. Default: 256
num_blocks (int): SmoothNet argument. See :class:`SmoothNet` for
details. Default: 3
device (str): Device for model inference. Default: 'cpu'
root_index (int, optional): If not None, relative keypoint coordinates
will be calculated as the SmoothNet input, by centering the
keypoints around the root point. The model output will be
converted back to absolute coordinates. Default: None
"""
def __init__(
self,
window_size: int,
output_size: int,
checkpoint: Optional[str] = None,
hidden_size: int = 512,
res_hidden_size: int = 512,
num_blocks: int = 5,
device: str = 'cpu',
):
super(SmoothNetFilter, self).__init__()
self.window_size = window_size
self.device = device
self.smoothnet = SmoothNet(window_size, output_size, hidden_size,
res_hidden_size, num_blocks)
self.smoothnet.to(device)
if checkpoint:
load_checkpoint(self.smoothnet,
checkpoint,
map_location=self.device)
self.smoothnet.eval()
for p in self.smoothnet.parameters():
p.requires_grad_(False)
def __call__(self, x: np.ndarray):
x_type = 'tensor'
if not isinstance(x, torch.Tensor):
x_type = 'array'
assert x.ndim == 3, ('Input should be an array with shape [T, K, C]'
f', but got invalid shape {x.shape}')
T, K, C = x.shape
assert C == 3 or C == 6 or C == 9
if T < self.window_size:
# Skip smoothing if the input length is less than the window size
smoothed = x
else:
if x_type == 'array':
dtype = x.dtype
# Convert to tensor and forward the model
with torch.no_grad():
if x_type == 'array':
x = torch.tensor(x,
dtype=torch.float32,
device=self.device)
if C == 9:
input_type = 'matrix'
x = rotmat_to_rot6d(x.reshape(-1, 3, 3)).reshape(T, K, -1)
elif C == 3:
input_type = 'axis_angles'
x = rotmat_to_rot6d(aa_to_rotmat(x.reshape(-1,
3))).reshape(
T, K, -1)
else:
input_type = 'rotation_6d'
x = x.view(1, T, -1).permute(0, 2, 1) # to [1, KC, T]
smoothed = self.smoothnet(x) # in shape [1, KC, T]
# Convert model output back to input shape and format
smoothed = smoothed.permute(0, 2, 1).view(T, K, -1) # to [T, K, C]
if input_type == 'matrix':
smoothed = rot6d_to_rotmat(smoothed.reshape(-1, 6)).reshape(
T, K, C)
elif input_type == 'axis_angles':
smoothed = rotmat_to_aa(
rot6d_to_rotmat(smoothed.reshape(-1, 6))).reshape(T, K, C)
if x_type == 'array':
smoothed = smoothed.cpu().numpy().astype(
dtype) # to numpy.ndarray
return smoothed
|