|
|
|
|
|
|
|
import numpy as np |
|
import torch |
|
import torch.nn as nn |
|
import functools |
|
|
|
import os |
|
import cv2 |
|
from einops import rearrange |
|
from annotator.util import annotator_ckpts_path |
|
|
|
|
|
class UnetGenerator(nn.Module): |
|
"""Create a Unet-based generator""" |
|
|
|
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False): |
|
"""Construct a Unet generator |
|
Parameters: |
|
input_nc (int) -- the number of channels in input images |
|
output_nc (int) -- the number of channels in output images |
|
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7, |
|
image of size 128x128 will become of size 1x1 # at the bottleneck |
|
ngf (int) -- the number of filters in the last conv layer |
|
norm_layer -- normalization layer |
|
We construct the U-Net from the innermost layer to the outermost layer. |
|
It is a recursive process. |
|
""" |
|
super(UnetGenerator, self).__init__() |
|
|
|
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) |
|
for _ in range(num_downs - 5): |
|
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout) |
|
|
|
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer) |
|
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer) |
|
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer) |
|
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) |
|
|
|
def forward(self, input): |
|
"""Standard forward""" |
|
return self.model(input) |
|
|
|
|
|
class UnetSkipConnectionBlock(nn.Module): |
|
"""Defines the Unet submodule with skip connection. |
|
X -------------------identity---------------------- |
|
|-- downsampling -- |submodule| -- upsampling --| |
|
""" |
|
|
|
def __init__(self, outer_nc, inner_nc, input_nc=None, |
|
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False): |
|
"""Construct a Unet submodule with skip connections. |
|
Parameters: |
|
outer_nc (int) -- the number of filters in the outer conv layer |
|
inner_nc (int) -- the number of filters in the inner conv layer |
|
input_nc (int) -- the number of channels in input images/features |
|
submodule (UnetSkipConnectionBlock) -- previously defined submodules |
|
outermost (bool) -- if this module is the outermost module |
|
innermost (bool) -- if this module is the innermost module |
|
norm_layer -- normalization layer |
|
use_dropout (bool) -- if use dropout layers. |
|
""" |
|
super(UnetSkipConnectionBlock, self).__init__() |
|
self.outermost = outermost |
|
if type(norm_layer) == functools.partial: |
|
use_bias = norm_layer.func == nn.InstanceNorm2d |
|
else: |
|
use_bias = norm_layer == nn.InstanceNorm2d |
|
if input_nc is None: |
|
input_nc = outer_nc |
|
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, |
|
stride=2, padding=1, bias=use_bias) |
|
downrelu = nn.LeakyReLU(0.2, True) |
|
downnorm = norm_layer(inner_nc) |
|
uprelu = nn.ReLU(True) |
|
upnorm = norm_layer(outer_nc) |
|
|
|
if outermost: |
|
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, |
|
kernel_size=4, stride=2, |
|
padding=1) |
|
down = [downconv] |
|
up = [uprelu, upconv, nn.Tanh()] |
|
model = down + [submodule] + up |
|
elif innermost: |
|
upconv = nn.ConvTranspose2d(inner_nc, outer_nc, |
|
kernel_size=4, stride=2, |
|
padding=1, bias=use_bias) |
|
down = [downrelu, downconv] |
|
up = [uprelu, upconv, upnorm] |
|
model = down + up |
|
else: |
|
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, |
|
kernel_size=4, stride=2, |
|
padding=1, bias=use_bias) |
|
down = [downrelu, downconv, downnorm] |
|
up = [uprelu, upconv, upnorm] |
|
|
|
if use_dropout: |
|
model = down + [submodule] + up + [nn.Dropout(0.5)] |
|
else: |
|
model = down + [submodule] + up |
|
|
|
self.model = nn.Sequential(*model) |
|
|
|
def forward(self, x): |
|
if self.outermost: |
|
return self.model(x) |
|
else: |
|
return torch.cat([x, self.model(x)], 1) |
|
|
|
|
|
class LineartAnimeDetector: |
|
def __init__(self): |
|
remote_model_path = "https://huggingface.co./lllyasviel/Annotators/resolve/main/netG.pth" |
|
modelpath = os.path.join(annotator_ckpts_path, "netG.pth") |
|
if not os.path.exists(modelpath): |
|
from basicsr.utils.download_util import load_file_from_url |
|
load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path) |
|
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) |
|
net = UnetGenerator(3, 1, 8, 64, norm_layer=norm_layer, use_dropout=False) |
|
|
|
ckpt = torch.load(modelpath, map_location=torch.device('cpu')) |
|
for key in list(ckpt.keys()): |
|
if 'module.' in key: |
|
ckpt[key.replace('module.', '')] = ckpt[key] |
|
del ckpt[key] |
|
net.load_state_dict(ckpt) |
|
|
|
net = net.cpu() |
|
net.eval() |
|
self.model = net |
|
|
|
def __call__(self, input_image): |
|
H, W, C = input_image.shape |
|
Hn = 256 * int(np.ceil(float(H) / 256.0)) |
|
Wn = 256 * int(np.ceil(float(W) / 256.0)) |
|
img = cv2.resize(input_image, (Wn, Hn), interpolation=cv2.INTER_CUBIC) |
|
with torch.no_grad(): |
|
|
|
image_feed = torch.from_numpy(img).float().cpu() |
|
image_feed = image_feed / 127.5 - 1.0 |
|
image_feed = rearrange(image_feed, 'h w c -> 1 c h w') |
|
|
|
line = self.model(image_feed)[0, 0] * 127.5 + 127.5 |
|
line = line.cpu().numpy() |
|
|
|
line = cv2.resize(line, (W, H), interpolation=cv2.INTER_CUBIC) |
|
line = line.clip(0, 255).astype(np.uint8) |
|
return line |
|
|
|
|