1inkusFace commited on
Commit
6c8a9a7
·
verified ·
1 Parent(s): ec04216

Create resampler.py

Browse files
Files changed (1) hide show
  1. models/resampler.py +303 -0
models/resampler.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modified from https://github.com/mlfoundations/open_flamingo/blob/main/open_flamingo/src/helpers.py
2
+ import math
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+
7
+ from diffusers.models.embeddings import Timesteps, TimestepEmbedding
8
+
9
+ def get_timestep_embedding(
10
+ timesteps: torch.Tensor,
11
+ embedding_dim: int,
12
+ flip_sin_to_cos: bool = False,
13
+ downscale_freq_shift: float = 1,
14
+ scale: float = 1,
15
+ max_period: int = 10000,
16
+ ):
17
+ """
18
+ This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings.
19
+
20
+ :param timesteps: a 1-D Tensor of N indices, one per batch element.
21
+ These may be fractional.
22
+ :param embedding_dim: the dimension of the output. :param max_period: controls the minimum frequency of the
23
+ embeddings. :return: an [N x dim] Tensor of positional embeddings.
24
+ """
25
+ assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array"
26
+
27
+ half_dim = embedding_dim // 2
28
+ exponent = -math.log(max_period) * torch.arange(
29
+ start=0, end=half_dim, dtype=torch.float32, device=timesteps.device
30
+ )
31
+ exponent = exponent / (half_dim - downscale_freq_shift)
32
+
33
+ emb = torch.exp(exponent)
34
+ emb = timesteps[:, None].float() * emb[None, :]
35
+
36
+ # scale embeddings
37
+ emb = scale * emb
38
+
39
+ # concat sine and cosine embeddings
40
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1)
41
+
42
+ # flip sine and cosine embeddings
43
+ if flip_sin_to_cos:
44
+ emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1)
45
+
46
+ # zero pad
47
+ if embedding_dim % 2 == 1:
48
+ emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
49
+ return emb
50
+
51
+
52
+ # FFN
53
+ def FeedForward(dim, mult=4):
54
+ inner_dim = int(dim * mult)
55
+ return nn.Sequential(
56
+ nn.LayerNorm(dim),
57
+ nn.Linear(dim, inner_dim, bias=False),
58
+ nn.GELU(),
59
+ nn.Linear(inner_dim, dim, bias=False),
60
+ )
61
+
62
+
63
+ def reshape_tensor(x, heads):
64
+ bs, length, width = x.shape
65
+ #(bs, length, width) --> (bs, length, n_heads, dim_per_head)
66
+ x = x.view(bs, length, heads, -1)
67
+ # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head)
68
+ x = x.transpose(1, 2)
69
+ # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head)
70
+ x = x.reshape(bs, heads, length, -1)
71
+ return x
72
+
73
+
74
+ class PerceiverAttention(nn.Module):
75
+ def __init__(self, *, dim, dim_head=64, heads=8):
76
+ super().__init__()
77
+ self.scale = dim_head**-0.5
78
+ self.dim_head = dim_head
79
+ self.heads = heads
80
+ inner_dim = dim_head * heads
81
+
82
+ self.norm1 = nn.LayerNorm(dim)
83
+ self.norm2 = nn.LayerNorm(dim)
84
+
85
+ self.to_q = nn.Linear(dim, inner_dim, bias=False)
86
+ self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
87
+ self.to_out = nn.Linear(inner_dim, dim, bias=False)
88
+
89
+
90
+ def forward(self, x, latents, shift=None, scale=None):
91
+ """
92
+ Args:
93
+ x (torch.Tensor): image features
94
+ shape (b, n1, D)
95
+ latent (torch.Tensor): latent features
96
+ shape (b, n2, D)
97
+ """
98
+ x = self.norm1(x)
99
+ latents = self.norm2(latents)
100
+
101
+ if shift is not None and scale is not None:
102
+ latents = latents * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
103
+
104
+ b, l, _ = latents.shape
105
+
106
+ q = self.to_q(latents)
107
+ kv_input = torch.cat((x, latents), dim=-2)
108
+ k, v = self.to_kv(kv_input).chunk(2, dim=-1)
109
+
110
+ q = reshape_tensor(q, self.heads)
111
+ k = reshape_tensor(k, self.heads)
112
+ v = reshape_tensor(v, self.heads)
113
+
114
+ # attention
115
+ scale = 1 / math.sqrt(math.sqrt(self.dim_head))
116
+ weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards
117
+ weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
118
+ out = weight @ v
119
+
120
+ out = out.permute(0, 2, 1, 3).reshape(b, l, -1)
121
+
122
+ return self.to_out(out)
123
+
124
+
125
+ class Resampler(nn.Module):
126
+ def __init__(
127
+ self,
128
+ dim=1024,
129
+ depth=8,
130
+ dim_head=64,
131
+ heads=16,
132
+ num_queries=8,
133
+ embedding_dim=768,
134
+ output_dim=1024,
135
+ ff_mult=4,
136
+ *args,
137
+ **kwargs,
138
+ ):
139
+ super().__init__()
140
+
141
+ self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5)
142
+
143
+ self.proj_in = nn.Linear(embedding_dim, dim)
144
+
145
+ self.proj_out = nn.Linear(dim, output_dim)
146
+ self.norm_out = nn.LayerNorm(output_dim)
147
+
148
+ self.layers = nn.ModuleList([])
149
+ for _ in range(depth):
150
+ self.layers.append(
151
+ nn.ModuleList(
152
+ [
153
+ PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
154
+ FeedForward(dim=dim, mult=ff_mult),
155
+ ]
156
+ )
157
+ )
158
+
159
+ def forward(self, x):
160
+
161
+ latents = self.latents.repeat(x.size(0), 1, 1)
162
+
163
+ x = self.proj_in(x)
164
+
165
+ for attn, ff in self.layers:
166
+ latents = attn(x, latents) + latents
167
+ latents = ff(latents) + latents
168
+
169
+ latents = self.proj_out(latents)
170
+ return self.norm_out(latents)
171
+
172
+
173
+ class TimeResampler(nn.Module):
174
+ def __init__(
175
+ self,
176
+ dim=1024,
177
+ depth=8,
178
+ dim_head=64,
179
+ heads=16,
180
+ num_queries=8,
181
+ embedding_dim=768,
182
+ output_dim=1024,
183
+ ff_mult=4,
184
+ timestep_in_dim=320,
185
+ timestep_flip_sin_to_cos=True,
186
+ timestep_freq_shift=0,
187
+ ):
188
+ super().__init__()
189
+
190
+ self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5)
191
+
192
+ self.proj_in = nn.Linear(embedding_dim, dim)
193
+
194
+ self.proj_out = nn.Linear(dim, output_dim)
195
+ self.norm_out = nn.LayerNorm(output_dim)
196
+
197
+ self.layers = nn.ModuleList([])
198
+ for _ in range(depth):
199
+ self.layers.append(
200
+ nn.ModuleList(
201
+ [
202
+ # msa
203
+ PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
204
+ # ff
205
+ FeedForward(dim=dim, mult=ff_mult),
206
+ # adaLN
207
+ nn.Sequential(nn.SiLU(), nn.Linear(dim, 4 * dim, bias=True))
208
+ ]
209
+ )
210
+ )
211
+
212
+ # time
213
+ self.time_proj = Timesteps(timestep_in_dim, timestep_flip_sin_to_cos, timestep_freq_shift)
214
+ self.time_embedding = TimestepEmbedding(timestep_in_dim, dim, act_fn="silu")
215
+
216
+ # adaLN
217
+ # self.adaLN_modulation = nn.Sequential(
218
+ # nn.SiLU(),
219
+ # nn.Linear(timestep_out_dim, 6 * timestep_out_dim, bias=True)
220
+ # )
221
+
222
+
223
+ def forward(self, x, timestep, need_temb=False):
224
+ timestep_emb = self.embedding_time(x, timestep) # bs, dim
225
+
226
+ latents = self.latents.repeat(x.size(0), 1, 1)
227
+
228
+ x = self.proj_in(x)
229
+ x = x + timestep_emb[:, None]
230
+
231
+ for attn, ff, adaLN_modulation in self.layers:
232
+ shift_msa, scale_msa, shift_mlp, scale_mlp = adaLN_modulation(timestep_emb).chunk(4, dim=1)
233
+ latents = attn(x, latents, shift_msa, scale_msa) + latents
234
+
235
+ res = latents
236
+ for idx_ff in range(len(ff)):
237
+ layer_ff = ff[idx_ff]
238
+ latents = layer_ff(latents)
239
+ if idx_ff == 0 and isinstance(layer_ff, nn.LayerNorm): # adaLN
240
+ latents = latents * (1 + scale_mlp.unsqueeze(1)) + shift_mlp.unsqueeze(1)
241
+ latents = latents + res
242
+
243
+ # latents = ff(latents) + latents
244
+
245
+ latents = self.proj_out(latents)
246
+ latents = self.norm_out(latents)
247
+
248
+ if need_temb:
249
+ return latents, timestep_emb
250
+ else:
251
+ return latents
252
+
253
+
254
+
255
+ def embedding_time(self, sample, timestep):
256
+
257
+ # 1. time
258
+ timesteps = timestep
259
+ if not torch.is_tensor(timesteps):
260
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
261
+ # This would be a good case for the `match` statement (Python 3.10+)
262
+ is_mps = sample.device.type == "mps"
263
+ if isinstance(timestep, float):
264
+ dtype = torch.float32 if is_mps else torch.float64
265
+ else:
266
+ dtype = torch.int32 if is_mps else torch.int64
267
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
268
+ elif len(timesteps.shape) == 0:
269
+ timesteps = timesteps[None].to(sample.device)
270
+
271
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
272
+ timesteps = timesteps.expand(sample.shape[0])
273
+
274
+ t_emb = self.time_proj(timesteps)
275
+
276
+ # timesteps does not contain any weights and will always return f32 tensors
277
+ # but time_embedding might actually be running in fp16. so we need to cast here.
278
+ # there might be better ways to encapsulate this.
279
+ t_emb = t_emb.to(dtype=sample.dtype)
280
+
281
+ emb = self.time_embedding(t_emb, None)
282
+ return emb
283
+
284
+
285
+
286
+
287
+
288
+ if __name__ == '__main__':
289
+ model = TimeResampler(
290
+ dim=1280,
291
+ depth=4,
292
+ dim_head=64,
293
+ heads=20,
294
+ num_queries=16,
295
+ embedding_dim=512,
296
+ output_dim=2048,
297
+ ff_mult=4,
298
+ timestep_in_dim=320,
299
+ timestep_flip_sin_to_cos=True,
300
+ timestep_freq_shift=0,
301
+ in_channel_extra_emb=2048,
302
+ )
303
+