# This model is part of the paper "Fine-tuning deep learning model parameters for improved super-resolution of dynamic MRI with prior-knowledge" (https://doi.org/10.1016/j.artmed.2021.102196) # and has been published on GitHub: https://github.com/soumickmj/FTSuperResDynMRI/blob/main/models/unet3D.py import torch from torch import nn import torch.nn.functional as F __author__ = "Soumick Chatterjee, Chompunuch Sarasaen" __copyright__ = "Copyright 2020, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick@live.com" __status__ = "Published" class UNet(nn.Module): """ Implementation of U-Net: Convolutional Networks for Biomedical Image Segmentation (Ronneberger et al., 2015) https://arxiv.org/abs/1505.04597 Using the default arguments will yield the exact version used in the original paper Adapted from https://discuss.pytorch.org/t/unet-implementation/426 Args: in_channels (int): number of input channels n_classes (int): number of output channels depth (int): depth of the network wf (int): number of filters in the first layer is 2**wf padding (bool): if True, apply padding such that the input shape is the same as the output. This may introduce artifacts batch_norm (bool): Use BatchNorm after layers with an activation function up_mode (str): one of 'upconv' or 'upsample'. 'upconv' will use transposed convolutions for learned upsampling. 'upsample' will use bilinear upsampling. """ def __init__(self, in_channels=1, n_classes=1, depth=3, wf=6, padding=True, batch_norm=False, up_mode='upconv', dropout=False): super(UNet, self).__init__() assert up_mode in ('upconv', 'upsample') self.padding = padding self.depth = depth self.dropout = nn.Dropout3d() if dropout else nn.Sequential() prev_channels = in_channels self.down_path = nn.ModuleList() for i in range(depth): self.down_path.append(UNetConvBlock(prev_channels, 2**(wf+i), padding, batch_norm)) prev_channels = 2**(wf+i) self.up_path = nn.ModuleList() for i in reversed(range(depth - 1)): self.up_path.append(UNetUpBlock(prev_channels, 2**(wf+i), up_mode, padding, batch_norm)) prev_channels = 2**(wf+i) self.last = nn.Conv3d(prev_channels, n_classes, kernel_size=1) def forward(self, x): blocks = [] for i, down in enumerate(self.down_path): x = down(x) if i != len(self.down_path)-1: blocks.append(x) x = F.avg_pool3d(x, 2) x = self.dropout(x) for i, up in enumerate(self.up_path): x = up(x, blocks[-i-1]) return self.last(x) class UNetConvBlock(nn.Module): def __init__(self, in_size, out_size, padding, batch_norm): super(UNetConvBlock, self).__init__() block = [] block.append(nn.Conv3d(in_size, out_size, kernel_size=3, padding=int(padding))) block.append(nn.ReLU()) if batch_norm: block.append(nn.BatchNorm3d(out_size)) block.append(nn.Conv3d(out_size, out_size, kernel_size=3, padding=int(padding))) block.append(nn.ReLU()) if batch_norm: block.append(nn.BatchNorm3d(out_size)) self.block = nn.Sequential(*block) def forward(self, x): out = self.block(x) return out class UNetUpBlock(nn.Module): def __init__(self, in_size, out_size, up_mode, padding, batch_norm): super(UNetUpBlock, self).__init__() if up_mode == 'upconv': self.up = nn.ConvTranspose3d(in_size, out_size, kernel_size=2, stride=2) elif up_mode == 'upsample': self.up = nn.Sequential(nn.Upsample(mode='trilinear', scale_factor=2), nn.Conv3d(in_size, out_size, kernel_size=1)) self.conv_block = UNetConvBlock(in_size, out_size, padding, batch_norm) def center_crop(self, layer, target_size): _, _, layer_depth, layer_height, layer_width = layer.size() diff_z = (layer_depth - target_size[0]) // 2 diff_y = (layer_height - target_size[1]) // 2 diff_x = (layer_width - target_size[2]) // 2 return layer[:, :, diff_z:(diff_z + target_size[0]), diff_y:(diff_y + target_size[1]), diff_x:(diff_x + target_size[2])] # _, _, layer_height, layer_width = layer.size() #for 2D data # diff_y = (layer_height - target_size[0]) // 2 # diff_x = (layer_width - target_size[1]) // 2 # return layer[:, :, diff_y:(diff_y + target_size[0]), diff_x:(diff_x + target_size[1])] def forward(self, x, bridge): up = self.up(x) # bridge = self.center_crop(bridge, up.shape[2:]) #sending shape ignoring 2 digit, so target size start with 0,1,2 up = F.interpolate(up, size=bridge.shape[2:], mode='trilinear') out = torch.cat([up, bridge], 1) out = self.conv_block(out) return out