JustinLin610
commited on
Commit
•
3e5e249
1
Parent(s):
9585e2a
Upload visual.py
Browse files
visual.py
ADDED
@@ -0,0 +1,426 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Alibaba Cloud.
|
2 |
+
#
|
3 |
+
# This source code is licensed under the license found in the
|
4 |
+
# LICENSE file in the root directory of this source tree.
|
5 |
+
|
6 |
+
from collections import OrderedDict
|
7 |
+
import math
|
8 |
+
import requests
|
9 |
+
from io import BytesIO
|
10 |
+
from functools import partial
|
11 |
+
from PIL import Image
|
12 |
+
from typing import Callable, Optional, Sequence, Tuple, List
|
13 |
+
import numpy as np
|
14 |
+
|
15 |
+
import torch
|
16 |
+
from torch import nn
|
17 |
+
from torch.nn import functional as F
|
18 |
+
from torch.nn.init import trunc_normal_
|
19 |
+
from torchvision import transforms
|
20 |
+
from torchvision.transforms import InterpolationMode
|
21 |
+
|
22 |
+
|
23 |
+
def get_abs_pos(abs_pos, tgt_size):
|
24 |
+
# abs_pos: L, C
|
25 |
+
# tgt_size: M
|
26 |
+
# return: M, C
|
27 |
+
src_size = int(math.sqrt(abs_pos.size(0)))
|
28 |
+
tgt_size = int(math.sqrt(tgt_size))
|
29 |
+
dtype = abs_pos.dtype
|
30 |
+
|
31 |
+
if src_size != tgt_size:
|
32 |
+
return F.interpolate(
|
33 |
+
abs_pos.float().reshape(1, src_size, src_size, -1).permute(0, 3, 1, 2),
|
34 |
+
size=(tgt_size, tgt_size),
|
35 |
+
mode="bicubic",
|
36 |
+
align_corners=False,
|
37 |
+
).permute(0, 2, 3, 1).flatten(0, 2).to(dtype=dtype)
|
38 |
+
else:
|
39 |
+
return abs_pos
|
40 |
+
|
41 |
+
# https://github.com/facebookresearch/mae/blob/efb2a8062c206524e35e47d04501ed4f544c0ae8/util/pos_embed.py#L20
|
42 |
+
def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
|
43 |
+
"""
|
44 |
+
grid_size: int of the grid height and width
|
45 |
+
return:
|
46 |
+
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
|
47 |
+
"""
|
48 |
+
grid_h = np.arange(grid_size, dtype=np.float32)
|
49 |
+
grid_w = np.arange(grid_size, dtype=np.float32)
|
50 |
+
grid = np.meshgrid(grid_w, grid_h) # here w goes first
|
51 |
+
grid = np.stack(grid, axis=0)
|
52 |
+
|
53 |
+
grid = grid.reshape([2, 1, grid_size, grid_size])
|
54 |
+
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
|
55 |
+
if cls_token:
|
56 |
+
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
|
57 |
+
return pos_embed
|
58 |
+
|
59 |
+
|
60 |
+
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
|
61 |
+
assert embed_dim % 2 == 0
|
62 |
+
|
63 |
+
# use half of dimensions to encode grid_h
|
64 |
+
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
|
65 |
+
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
|
66 |
+
|
67 |
+
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
|
68 |
+
return emb
|
69 |
+
|
70 |
+
|
71 |
+
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
|
72 |
+
"""
|
73 |
+
embed_dim: output dimension for each position
|
74 |
+
pos: a list of positions to be encoded: size (M,)
|
75 |
+
out: (M, D)
|
76 |
+
"""
|
77 |
+
assert embed_dim % 2 == 0
|
78 |
+
omega = np.arange(embed_dim // 2, dtype=np.float32)
|
79 |
+
omega /= embed_dim / 2.
|
80 |
+
omega = 1. / 10000**omega # (D/2,)
|
81 |
+
|
82 |
+
pos = pos.reshape(-1) # (M,)
|
83 |
+
out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
|
84 |
+
|
85 |
+
emb_sin = np.sin(out) # (M, D/2)
|
86 |
+
emb_cos = np.cos(out) # (M, D/2)
|
87 |
+
|
88 |
+
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
|
89 |
+
return emb
|
90 |
+
|
91 |
+
|
92 |
+
class Resampler(nn.Module):
|
93 |
+
"""
|
94 |
+
A 2D perceiver-resampler network with one cross attention layers by
|
95 |
+
(grid_size**2) learnable queries and 2d sincos pos_emb
|
96 |
+
Outputs:
|
97 |
+
A tensor with the shape of (grid_size**2, embed_dim)
|
98 |
+
"""
|
99 |
+
def __init__(
|
100 |
+
self,
|
101 |
+
grid_size,
|
102 |
+
embed_dim,
|
103 |
+
num_heads,
|
104 |
+
kv_dim=None,
|
105 |
+
norm_layer=nn.LayerNorm
|
106 |
+
):
|
107 |
+
super().__init__()
|
108 |
+
self.num_queries = grid_size ** 2
|
109 |
+
self.embed_dim = embed_dim
|
110 |
+
self.num_heads = num_heads
|
111 |
+
|
112 |
+
self.pos_embed = nn.Parameter(
|
113 |
+
torch.from_numpy(get_2d_sincos_pos_embed(embed_dim, grid_size)).float()
|
114 |
+
).requires_grad_(False)
|
115 |
+
|
116 |
+
self.query = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
|
117 |
+
trunc_normal_(self.query, std=.02)
|
118 |
+
|
119 |
+
if kv_dim is not None and kv_dim != embed_dim:
|
120 |
+
self.kv_proj = nn.Linear(kv_dim, embed_dim, bias=False)
|
121 |
+
else:
|
122 |
+
self.kv_proj = nn.Identity()
|
123 |
+
|
124 |
+
self.attn = nn.MultiheadAttention(embed_dim, num_heads)
|
125 |
+
self.ln_q = norm_layer(embed_dim)
|
126 |
+
self.ln_kv = norm_layer(embed_dim)
|
127 |
+
|
128 |
+
self.apply(self._init_weights)
|
129 |
+
|
130 |
+
def _init_weights(self, m):
|
131 |
+
if isinstance(m, nn.Linear):
|
132 |
+
trunc_normal_(m.weight, std=.02)
|
133 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
134 |
+
nn.init.constant_(m.bias, 0)
|
135 |
+
elif isinstance(m, nn.LayerNorm):
|
136 |
+
nn.init.constant_(m.bias, 0)
|
137 |
+
nn.init.constant_(m.weight, 1.0)
|
138 |
+
|
139 |
+
def forward(self, x, attn_mask=None):
|
140 |
+
|
141 |
+
pos_embed = get_abs_pos(self.pos_embed, x.size(1))
|
142 |
+
|
143 |
+
x = self.kv_proj(x)
|
144 |
+
x = self.ln_kv(x).permute(1, 0, 2)
|
145 |
+
|
146 |
+
N = x.shape[1]
|
147 |
+
q = self.ln_q(self.query)
|
148 |
+
out = self.attn(
|
149 |
+
self._repeat(q, N) + self.pos_embed.unsqueeze(1),
|
150 |
+
x + pos_embed.unsqueeze(1),
|
151 |
+
x,
|
152 |
+
attn_mask=attn_mask)[0]
|
153 |
+
return out.permute(1, 0, 2)
|
154 |
+
|
155 |
+
def _repeat(self, query, N: int):
|
156 |
+
return query.unsqueeze(1).repeat(1, N, 1)
|
157 |
+
|
158 |
+
|
159 |
+
class VisualAttention(nn.Module):
|
160 |
+
"""self-attention layer class.
|
161 |
+
|
162 |
+
Self-attention layer takes input with size [s, b, h]
|
163 |
+
and returns output of the same size.
|
164 |
+
"""
|
165 |
+
|
166 |
+
def __init__(self, embed_dim, num_heads,
|
167 |
+
bias=True, kdim=None, vdim=None):
|
168 |
+
super(VisualAttention, self).__init__()
|
169 |
+
self.embed_dim = embed_dim
|
170 |
+
self.kdim = kdim if kdim is not None else embed_dim
|
171 |
+
self.vdim = vdim if vdim is not None else embed_dim
|
172 |
+
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
|
173 |
+
|
174 |
+
self.num_heads = num_heads
|
175 |
+
|
176 |
+
# Per attention head and per partition values.
|
177 |
+
assert embed_dim % num_heads == 0
|
178 |
+
self.hidden_size_per_attention_head = embed_dim // num_heads
|
179 |
+
self.num_attention_heads_per_partition = num_heads
|
180 |
+
self.hidden_size_per_partition = embed_dim
|
181 |
+
|
182 |
+
# Strided linear layer.
|
183 |
+
assert self._qkv_same_embed_dim, 'Only Support SelfAttention Currently'
|
184 |
+
self.in_proj = nn.Linear(embed_dim, 3 * embed_dim)
|
185 |
+
self.out_proj = nn.Linear(embed_dim, embed_dim)
|
186 |
+
self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
|
187 |
+
|
188 |
+
def forward(self, query, key, value, attn_mask = None):
|
189 |
+
# query/key/value: [sq, b, h]
|
190 |
+
sq, b, _ = query.size()
|
191 |
+
|
192 |
+
assert query is key, 'Only Support Self-Attention Currently'
|
193 |
+
sk = sq
|
194 |
+
mixed_x_layer = self.in_proj(query)
|
195 |
+
|
196 |
+
# [sq, b, (np * 3 * hn)] --> [sq, b, np, 3 * hn]
|
197 |
+
new_tensor_shape = mixed_x_layer.size()[:-1] + \
|
198 |
+
(self.num_attention_heads_per_partition,
|
199 |
+
3 * self.hidden_size_per_attention_head)
|
200 |
+
mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
|
201 |
+
|
202 |
+
# [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
|
203 |
+
query_layer, key_layer, value_layer = mixed_x_layer.split(
|
204 |
+
self.hidden_size_per_attention_head, dim=-1)
|
205 |
+
|
206 |
+
# [sq, b, np, hn] -> [sq, b * np, hn]
|
207 |
+
query_layer = query_layer.view(sq,
|
208 |
+
b * self.num_attention_heads_per_partition,
|
209 |
+
self.hidden_size_per_attention_head).transpose(0, 1)
|
210 |
+
# [sk, b, np, hn] -> [sk, b * np, hn]
|
211 |
+
key_layer = key_layer.view(sk,
|
212 |
+
b * self.num_attention_heads_per_partition,
|
213 |
+
self.hidden_size_per_attention_head).transpose(0, 1)
|
214 |
+
|
215 |
+
q_scaled = query_layer / self.norm_factor
|
216 |
+
if attn_mask is not None:
|
217 |
+
attention_probs = torch.baddbmm(attn_mask, q_scaled, key_layer.transpose(-2, -1))
|
218 |
+
else:
|
219 |
+
attention_probs = torch.bmm(q_scaled, key_layer.transpose(-2, -1))
|
220 |
+
attention_probs = attention_probs.softmax(dim=-1)
|
221 |
+
|
222 |
+
value_layer = value_layer.view(sk,
|
223 |
+
b * self.num_attention_heads_per_partition,
|
224 |
+
self.hidden_size_per_attention_head).transpose(0, 1)
|
225 |
+
|
226 |
+
# matmul: [b * np, sq, hn]
|
227 |
+
context_layer = torch.bmm(attention_probs, value_layer)
|
228 |
+
|
229 |
+
# change view [b, np, sq, hn]
|
230 |
+
context_layer = context_layer.view(b,
|
231 |
+
self.num_attention_heads_per_partition,
|
232 |
+
sq, self.hidden_size_per_attention_head)
|
233 |
+
|
234 |
+
# [b, np, sq, hn] --> [sq, b, np, hn]
|
235 |
+
context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
|
236 |
+
|
237 |
+
# [sq, b, np, hn] --> [sq, b, hp]
|
238 |
+
new_context_layer_shape = context_layer.size()[:-2] + \
|
239 |
+
(self.hidden_size_per_partition,)
|
240 |
+
context_layer = context_layer.view(*new_context_layer_shape)
|
241 |
+
|
242 |
+
output = self.out_proj(context_layer)
|
243 |
+
|
244 |
+
return output
|
245 |
+
|
246 |
+
|
247 |
+
class VisualAttentionBlock(nn.Module):
|
248 |
+
def __init__(
|
249 |
+
self,
|
250 |
+
d_model: int,
|
251 |
+
n_head: int,
|
252 |
+
mlp_ratio: float = 4.0,
|
253 |
+
act_layer: Callable = nn.GELU,
|
254 |
+
norm_layer: Callable = nn.LayerNorm,
|
255 |
+
is_cross_attention: bool = False,
|
256 |
+
):
|
257 |
+
super().__init__()
|
258 |
+
|
259 |
+
self.ln_1 = norm_layer(d_model)
|
260 |
+
if is_cross_attention:
|
261 |
+
self.ln_1_kv = norm_layer(d_model)
|
262 |
+
|
263 |
+
self.ln_2 = norm_layer(d_model)
|
264 |
+
mlp_width = int(d_model * mlp_ratio)
|
265 |
+
self.attn = VisualAttention(d_model, n_head)
|
266 |
+
self.mlp = nn.Sequential(OrderedDict([
|
267 |
+
("c_fc", nn.Linear(d_model, mlp_width)),
|
268 |
+
("gelu", act_layer()),
|
269 |
+
("c_proj", nn.Linear(mlp_width, d_model))
|
270 |
+
]))
|
271 |
+
|
272 |
+
def attention(
|
273 |
+
self,
|
274 |
+
q_x: torch.Tensor,
|
275 |
+
k_x: Optional[torch.Tensor] = None,
|
276 |
+
v_x: Optional[torch.Tensor] = None,
|
277 |
+
attn_mask: Optional[torch.Tensor] = None,
|
278 |
+
):
|
279 |
+
k_x = k_x if k_x is not None else q_x
|
280 |
+
v_x = v_x if v_x is not None else q_x
|
281 |
+
|
282 |
+
attn_mask = attn_mask.to(q_x.dtype) if attn_mask is not None else None
|
283 |
+
return self.attn(q_x, k_x, v_x, attn_mask=attn_mask)
|
284 |
+
|
285 |
+
def forward(
|
286 |
+
self,
|
287 |
+
q_x: torch.Tensor,
|
288 |
+
k_x: Optional[torch.Tensor] = None,
|
289 |
+
v_x: Optional[torch.Tensor] = None,
|
290 |
+
attn_mask: Optional[torch.Tensor] = None,
|
291 |
+
):
|
292 |
+
k_x = self.ln_1_kv(k_x) if hasattr(self, "ln_1_kv") and k_x is not None else None
|
293 |
+
v_x = self.ln_1_kv(v_x) if hasattr(self, "ln_1_kv") and v_x is not None else None
|
294 |
+
|
295 |
+
x = q_x + self.attention(q_x=self.ln_1(q_x), k_x=k_x, v_x=v_x, attn_mask=attn_mask)
|
296 |
+
x = x + self.mlp(self.ln_2(x))
|
297 |
+
return x
|
298 |
+
|
299 |
+
|
300 |
+
class TransformerBlock(nn.Module):
|
301 |
+
def __init__(
|
302 |
+
self,
|
303 |
+
width: int,
|
304 |
+
layers: int,
|
305 |
+
heads: int,
|
306 |
+
mlp_ratio: float = 4.0,
|
307 |
+
act_layer: Callable = nn.GELU,
|
308 |
+
norm_layer: Callable = nn.LayerNorm,
|
309 |
+
):
|
310 |
+
super().__init__()
|
311 |
+
self.width = width
|
312 |
+
self.layers = layers
|
313 |
+
|
314 |
+
self.resblocks = nn.ModuleList([
|
315 |
+
VisualAttentionBlock(
|
316 |
+
width, heads, mlp_ratio, act_layer=act_layer, norm_layer=norm_layer)
|
317 |
+
for _ in range(layers)
|
318 |
+
])
|
319 |
+
|
320 |
+
def get_cast_dtype(self) -> torch.dtype:
|
321 |
+
return self.resblocks[0].mlp.c_fc.weight.dtype
|
322 |
+
|
323 |
+
def get_cast_device(self) -> torch.device:
|
324 |
+
return self.resblocks[0].mlp.c_fc.weight.device
|
325 |
+
|
326 |
+
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
|
327 |
+
for r in self.resblocks:
|
328 |
+
x = r(x, attn_mask=attn_mask)
|
329 |
+
return x
|
330 |
+
|
331 |
+
|
332 |
+
class VisionTransformer(nn.Module):
|
333 |
+
|
334 |
+
def __init__(
|
335 |
+
self,
|
336 |
+
image_size: int,
|
337 |
+
patch_size: int,
|
338 |
+
width: int,
|
339 |
+
layers: int,
|
340 |
+
heads: int,
|
341 |
+
mlp_ratio: float,
|
342 |
+
n_queries: int = 256,
|
343 |
+
output_dim: int = 512,
|
344 |
+
**kwargs
|
345 |
+
):
|
346 |
+
super().__init__()
|
347 |
+
image_height, image_width = self.image_size = (image_size, image_size)
|
348 |
+
patch_height, patch_width = self.patch_size = (patch_size, patch_size)
|
349 |
+
self.grid_size = (image_height // patch_height, image_width // patch_width)
|
350 |
+
self.output_dim = output_dim
|
351 |
+
|
352 |
+
mean = (0.48145466, 0.4578275, 0.40821073)
|
353 |
+
std = (0.26862954, 0.26130258, 0.27577711)
|
354 |
+
self.image_transform = transforms.Compose([
|
355 |
+
transforms.Resize(
|
356 |
+
(image_size, image_size),
|
357 |
+
interpolation=InterpolationMode.BICUBIC
|
358 |
+
),
|
359 |
+
transforms.ToTensor(),
|
360 |
+
transforms.Normalize(mean=mean, std=std),
|
361 |
+
])
|
362 |
+
|
363 |
+
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
|
364 |
+
|
365 |
+
# class embeddings and positional embeddings
|
366 |
+
scale = width ** -0.5
|
367 |
+
self.positional_embedding = nn.Parameter(scale * torch.randn(256, width))
|
368 |
+
|
369 |
+
norm_layer = partial(nn.LayerNorm, eps=1e-6)
|
370 |
+
act_layer = nn.GELU
|
371 |
+
|
372 |
+
self.ln_pre = norm_layer(width)
|
373 |
+
self.transformer = TransformerBlock(
|
374 |
+
width,
|
375 |
+
layers,
|
376 |
+
heads,
|
377 |
+
mlp_ratio,
|
378 |
+
act_layer=act_layer,
|
379 |
+
norm_layer=norm_layer,
|
380 |
+
)
|
381 |
+
|
382 |
+
self.attn_pool = Resampler(
|
383 |
+
grid_size=int(math.sqrt(n_queries)),
|
384 |
+
embed_dim=output_dim,
|
385 |
+
num_heads=output_dim // 128,
|
386 |
+
kv_dim=width,
|
387 |
+
norm_layer=norm_layer,
|
388 |
+
)
|
389 |
+
self.ln_post = norm_layer(output_dim)
|
390 |
+
self.proj = nn.Parameter((output_dim** -0.5) * torch.randn(output_dim, output_dim))
|
391 |
+
|
392 |
+
def forward(self, x: torch.Tensor):
|
393 |
+
x = x.to(
|
394 |
+
dtype=self.transformer.get_cast_dtype(),
|
395 |
+
device=self.transformer.get_cast_device(),
|
396 |
+
)
|
397 |
+
# to patches
|
398 |
+
x = self.conv1(x) # shape = [*, width, grid, grid]
|
399 |
+
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
|
400 |
+
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
|
401 |
+
|
402 |
+
x = x + get_abs_pos(self.positional_embedding, x.size(1))
|
403 |
+
|
404 |
+
x = self.ln_pre(x)
|
405 |
+
|
406 |
+
x = x.permute(1, 0, 2) # NLD -> LND
|
407 |
+
x = self.transformer(x)
|
408 |
+
x = x.permute(1, 0, 2) # LND -> NLD
|
409 |
+
|
410 |
+
x = self.attn_pool(x)
|
411 |
+
x = self.ln_post(x)
|
412 |
+
x = x @ self.proj
|
413 |
+
|
414 |
+
return x
|
415 |
+
|
416 |
+
def encode(self, image_paths: List[str]):
|
417 |
+
images = []
|
418 |
+
for image_path in image_paths:
|
419 |
+
if image_path.startswith("http://") or image_path.startswith("https://"):
|
420 |
+
image = Image.open(requests.get(image_path, stream=True).raw)
|
421 |
+
else:
|
422 |
+
image = Image.open(image_path)
|
423 |
+
image = image.convert("RGB")
|
424 |
+
images.append(self.image_transform(image))
|
425 |
+
images = torch.stack(images, dim=0)
|
426 |
+
return self(images)
|