|
|
|
""" |
|
DETR Transformer class. |
|
|
|
Copy-paste from torch.nn.Transformer with modifications: |
|
* positional encodings are passed in MHattention |
|
* extra LN at the end of encoder is removed |
|
* decoder returns a stack of activations from all decoding layers |
|
""" |
|
import copy |
|
from typing import Optional, List |
|
|
|
import torch |
|
import torch.nn.functional as F |
|
from torch import nn, Tensor |
|
from .MHA import MultiheadAttention |
|
|
|
|
|
class VisionLanguageEncoder(nn.Module): |
|
|
|
def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, |
|
num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, |
|
activation="relu", normalize_before=False): |
|
super().__init__() |
|
|
|
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, |
|
dropout, activation, normalize_before) |
|
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None |
|
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) |
|
|
|
self._reset_parameters() |
|
|
|
self.d_model = d_model |
|
self.nhead = nhead |
|
|
|
def _reset_parameters(self): |
|
for p in self.parameters(): |
|
if p.dim() > 1: |
|
nn.init.xavier_uniform_(p) |
|
|
|
def forward(self, src, mask, pos_embed): |
|
return self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) |
|
|
|
|
|
class TransformerEncoder(nn.Module): |
|
|
|
def __init__(self, encoder_layer, num_layers, norm=None): |
|
super().__init__() |
|
self.layers = _get_clones(encoder_layer, num_layers) |
|
self.num_layers = num_layers |
|
self.norm = norm |
|
|
|
def forward(self, src, |
|
mask: Optional[Tensor] = None, |
|
src_key_padding_mask: Optional[Tensor] = None, |
|
pos: Optional[Tensor] = None): |
|
output = src |
|
weights = [] |
|
|
|
for layer in self.layers: |
|
output, attn_output_weights = layer(output, src_mask=mask, |
|
src_key_padding_mask=src_key_padding_mask, pos=pos) |
|
weights.append(attn_output_weights) |
|
|
|
if self.norm is not None: |
|
output = self.norm(output) |
|
|
|
return output, weights[-1] |
|
|
|
|
|
class TransformerEncoderLayer(nn.Module): |
|
|
|
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, |
|
activation="relu", normalize_before=False): |
|
super().__init__() |
|
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout) |
|
|
|
self.linear1 = nn.Linear(d_model, dim_feedforward) |
|
self.dropout = nn.Dropout(dropout) |
|
self.linear2 = nn.Linear(dim_feedforward, d_model) |
|
|
|
self.norm1 = nn.LayerNorm(d_model) |
|
self.norm2 = nn.LayerNorm(d_model) |
|
self.dropout1 = nn.Dropout(dropout) |
|
self.dropout2 = nn.Dropout(dropout) |
|
|
|
self.activation = _get_activation_fn(activation) |
|
self.normalize_before = normalize_before |
|
|
|
def with_pos_embed(self, tensor, pos: Optional[Tensor]): |
|
return tensor if pos is None else tensor + pos |
|
|
|
def forward_post(self, |
|
src, |
|
src_mask: Optional[Tensor] = None, |
|
src_key_padding_mask: Optional[Tensor] = None, |
|
pos: Optional[Tensor] = None): |
|
q = k = self.with_pos_embed(src, pos) |
|
src2, attn_output_weights = self.self_attn(q, k, value=src, attn_mask=src_mask, |
|
key_padding_mask=src_key_padding_mask) |
|
src = src + self.dropout1(src2) |
|
src = self.norm1(src) |
|
src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) |
|
src = src + self.dropout2(src2) |
|
src = self.norm2(src) |
|
return src, attn_output_weights |
|
|
|
def forward_pre(self, src, |
|
src_mask: Optional[Tensor] = None, |
|
src_key_padding_mask: Optional[Tensor] = None, |
|
pos: Optional[Tensor] = None): |
|
src2 = self.norm1(src) |
|
q = k = self.with_pos_embed(src2, pos) |
|
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, |
|
key_padding_mask=src_key_padding_mask)[0] |
|
src = src + self.dropout1(src2) |
|
src2 = self.norm2(src) |
|
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) |
|
src = src + self.dropout2(src2) |
|
return src |
|
|
|
def forward(self, src, |
|
src_mask: Optional[Tensor] = None, |
|
src_key_padding_mask: Optional[Tensor] = None, |
|
pos: Optional[Tensor] = None): |
|
if self.normalize_before: |
|
return self.forward_pre(src, src_mask, src_key_padding_mask, pos) |
|
return self.forward_post(src, src_mask, src_key_padding_mask, pos) |
|
|
|
|
|
def _get_clones(module, N): |
|
return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) |
|
|
|
|
|
def build_vl_transformer(args): |
|
return VisionLanguageEncoder( |
|
d_model=args.vl_hidden_dim, |
|
dropout=args.vl_dropout, |
|
nhead=args.vl_nheads, |
|
dim_feedforward=args.vl_dim_feedforward, |
|
num_encoder_layers=args.vl_enc_layers, |
|
normalize_before=False, |
|
) |
|
|
|
|
|
def _get_activation_fn(activation): |
|
"""Return an activation function given a string""" |
|
if activation == "relu": |
|
return F.relu |
|
if activation == "gelu": |
|
return F.gelu |
|
if activation == "glu": |
|
return F.glu |
|
raise RuntimeError(F"activation should be relu/gelu, not {activation}.") |
|
|