zzl commited on
Commit
7062b81
·
1 Parent(s): 2219b6e

[Release] Demo v1.0

Browse files
app.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from demo_img import demo_img
4
+ from demo_vid import demo_vid
5
+ with gr.Blocks(css='style.css') as demo:
6
+ gr.HTML(
7
+ """
8
+ <div style="text-align: center; max-width: 1200px; margin: 20px auto;">
9
+ <h1 style="font-weight: 900; font-size: 2rem; margin: 0rem">
10
+ AMT: All-Pairs Multi-Field Transforms for Efficient Frame Interpolation
11
+ </h1>
12
+ <h2 style="font-weight: 450; font-size: 1rem; margin: 0rem">
13
+ <a href="https://paper99.github.io" style="color:blue;">Zhen Li</a><sup>1*</sup>,
14
+ <a href="https://github.com/NK-CS-ZZL" style="color:blue;">Zuo-Liang Zhu</a><sup>1*</sup>,
15
+ <a href="https://github.com/hlh981029" style="color:blue;">Ling-Hao Han</a><sup>1*</sup>,
16
+ <a href="https://houqb.github.io" style="color:blue;">Qibin Hou</a><sup>1*</sup>,
17
+ <a href="https://github.com" style="color:blue;">Chun-Le Guo</a><sup>1*</sup>,
18
+ <a href="https://mmcheng.net" style="color:blue;">Ming-Ming Cheng</a><sup>1*</sup>,
19
+ </h2>
20
+ <h2 style="font-weight: 450; font-size: 1rem; margin: 0rem">
21
+ <sup>1</sup>Nankai University <sup>*</sup> represents the equal contribution and <sup>#</sup> represents the corresponding author.
22
+ </h2>
23
+ <h2 style="font-weight: 450; font-size: 1rem; margin: 0rem">
24
+ [<a href="https://arxiv.org/abs/2303.13439" style="color:blue;">arXiv</a>]
25
+ [<a href="https://github.com/MCG-NKU/AMT" style="color:blue;">GitHub</a>]
26
+ </h2>
27
+ </div>
28
+ """)
29
+
30
+ with gr.Tab('Img2Vid'):
31
+ demo_img()
32
+ with gr.Tab('VFI'):
33
+ demo_vid()
34
+
35
+ demo.launch(debug=False)
demo_img.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import glob
3
+ import torch
4
+ import gradio as gr
5
+ from huggingface_hub import hf_hub_download
6
+
7
+ from networks.amts import Model as AMTS
8
+ from networks.amtl import Model as AMTL
9
+ from networks.amtg import Model as AMTG
10
+ from utils import img2tensor, tensor2img, InputPadder
11
+
12
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
13
+ model_dict = {
14
+ 'AMT-S': AMTS, 'AMT-L': AMTL, 'AMT-G': AMTG
15
+ }
16
+
17
+ def img2vid(model_type, img0, img1, frame_ratio, iters):
18
+ model = model_dict[model_type]()
19
+ model.to(device)
20
+ ckpt_path = hf_hub_download(repo_id='lalala125/AMT', filename=f'{model_type.lower()}.pth')
21
+ print(model_type)
22
+ ckpt = torch.load(ckpt_path)
23
+ model.load_state_dict(ckpt['state_dict'])
24
+ model.eval()
25
+ img0_t = img2tensor(img0).to(device)
26
+ img1_t = img2tensor(img1).to(device)
27
+ padder = InputPadder(img0_t.shape, 16)
28
+ img0_t, img1_t = padder.pad(img0_t, img1_t)
29
+ inputs = [img0_t, img1_t]
30
+ embt = torch.tensor(1/2).float().view(1, 1, 1, 1).to(device)
31
+
32
+ for i in range(iters):
33
+ print(f'Iter {i+1}. input_frames={len(inputs)} output_frames={2*len(inputs)-1}')
34
+ outputs = [img0_t]
35
+ for in_0, in_1 in zip(inputs[:-1], inputs[1:]):
36
+ with torch.no_grad():
37
+ imgt_pred = model(in_0, in_1, embt, eval=True)['imgt_pred']
38
+ imgt_pred = padder.unpad(imgt_pred)
39
+ in_1 = padder.unpad(in_1)
40
+ outputs += [imgt_pred, in_1]
41
+ inputs = outputs
42
+
43
+ out_path = 'results'
44
+ size = outputs[0].shape[2:][::-1]
45
+ writer = cv2.VideoWriter(f'{out_path}/demo.mp4', cv2.VideoWriter_fourcc(*'mp4v'), frame_ratio, size)
46
+ for i, imgt_pred in enumerate(outputs):
47
+ imgt_pred = tensor2img(imgt_pred)
48
+ imgt_pred = cv2.cvtColor(imgt_pred, cv2.COLOR_RGB2BGR)
49
+ writer.write(imgt_pred)
50
+ writer.release()
51
+ return 'results/demo.mp4'
52
+
53
+
54
+ def demo_img():
55
+ with gr.Blocks() as demo:
56
+ with gr.Row():
57
+ gr.Markdown('## Image Demo')
58
+ with gr.Row():
59
+ gr.HTML(
60
+ """
61
+ <div style="text-align: left; auto;">
62
+ <h2 style="font-weight: 450; font-size: 1rem; margin: 0rem">
63
+ Description: With 2 input images, you can generate a short video from them.
64
+ </h3>
65
+ </div>
66
+ """)
67
+
68
+ with gr.Row():
69
+ with gr.Column():
70
+ img0 = gr.Image(label='Image0')
71
+ img1 = gr.Image(label='Image1')
72
+ with gr.Column():
73
+ result = gr.Video(label="Generated Video")
74
+ with gr.Accordion('Advanced options', open=False):
75
+ ratio = gr.Slider(label='Multiple Ratio',
76
+ minimum=4,
77
+ maximum=7,
78
+ value=6,
79
+ step=1)
80
+ frame_ratio = gr.Slider(label='Frame Ratio',
81
+ minimum=8,
82
+ maximum=64,
83
+ value=16,
84
+ step=1)
85
+ model_type = gr.Radio(['AMT-S', 'AMT-L', 'AMT-G'],
86
+ label='Model Select',
87
+ value='AMT-S')
88
+ run_button = gr.Button(label='Run')
89
+ inputs = [
90
+ model_type,
91
+ img0,
92
+ img1,
93
+ frame_ratio,
94
+ ratio,
95
+ ]
96
+
97
+ gr.Examples(examples=glob.glob("examples/*.png"),
98
+ inputs=img0,
99
+ label='Example images (drag them to input windows)',
100
+ run_on_click=False,
101
+ )
102
+
103
+ run_button.click(fn=img2vid,
104
+ inputs=inputs,
105
+ outputs=result,)
106
+ return demo
demo_vid.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import glob
3
+ import torch
4
+ import gradio as gr
5
+ from huggingface_hub import hf_hub_download
6
+
7
+ from networks.amts import Model as AMTS
8
+ from networks.amtl import Model as AMTL
9
+ from networks.amtg import Model as AMTG
10
+ from utils import img2tensor, tensor2img, InputPadder
11
+
12
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
13
+ model_dict = {
14
+ 'AMT-S': AMTS, 'AMT-L': AMTL, 'AMT-G': AMTG
15
+ }
16
+
17
+
18
+ def vid2vid(model_type, video, iters):
19
+ model = model_dict[model_type]()
20
+ model.to(device)
21
+ ckpt_path = hf_hub_download(repo_id='lalala125/AMT', filename=f'{model_type.lower()}.pth')
22
+ print(model_type)
23
+ ckpt = torch.load(ckpt_path)
24
+ model.load_state_dict(ckpt['state_dict'])
25
+ model.eval()
26
+ vcap = cv2.VideoCapture(video)
27
+ ori_frame_rate = vcap.get(cv2.CAP_PROP_FPS)
28
+ inputs = []
29
+ h = int(vcap.get(cv2.CAP_PROP_FRAME_WIDTH))
30
+ w = int(vcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
31
+ padder = InputPadder((h, w), 16)
32
+ while True:
33
+ ret, frame = vcap.read()
34
+ if ret is False:
35
+ break
36
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
37
+ frame_t = img2tensor(frame).to(device)
38
+ frame_t = padder.pad(frame_t)
39
+ inputs.append(frame_t)
40
+ embt = torch.tensor(1/2).float().view(1, 1, 1, 1).to(device)
41
+
42
+ for i in range(iters):
43
+ print(f'Iter {i+1}. input_frames={len(inputs)} output_frames={2*len(inputs)-1}')
44
+ outputs = [inputs[0]]
45
+ for in_0, in_1 in zip(inputs[:-1], inputs[1:]):
46
+ with torch.no_grad():
47
+ imgt_pred = model(in_0, in_1, embt, eval=True)['imgt_pred']
48
+ imgt_pred = padder.unpad(imgt_pred)
49
+ in_1 = padder.unpad(in_1)
50
+ outputs += [imgt_pred, in_1]
51
+ inputs = outputs
52
+
53
+ out_path = 'results'
54
+ size = outputs[0].shape[2:][::-1]
55
+ writer = cv2.VideoWriter(f'{out_path}/demo_vfi.mp4',
56
+ cv2.VideoWriter_fourcc(*'mp4v'),
57
+ ori_frame_rate * 2 ** iters, size)
58
+ for i, imgt_pred in enumerate(outputs):
59
+ imgt_pred = tensor2img(imgt_pred)
60
+ imgt_pred = cv2.cvtColor(imgt_pred, cv2.COLOR_RGB2BGR)
61
+ writer.write(imgt_pred)
62
+ writer.release()
63
+ return 'results/demo_vfi.mp4'
64
+
65
+
66
+ def demo_vid():
67
+ with gr.Blocks() as demo:
68
+ with gr.Row():
69
+ gr.Markdown('## Image Demo')
70
+ with gr.Row():
71
+ gr.HTML(
72
+ """
73
+ <div style="text-align: left; auto;">
74
+ <h2 style="font-weight: 450; font-size: 1rem; margin: 0rem">
75
+ Description: With 2 input images, you can generate a short video from them.
76
+ </h3>
77
+ </div>
78
+ """)
79
+
80
+ with gr.Row():
81
+ with gr.Column():
82
+ video = gr.Video(label='Video Input')
83
+ with gr.Column():
84
+ result = gr.Video(label="Generated Video")
85
+ with gr.Accordion('Advanced options', open=False):
86
+ ratio = gr.Slider(label='Multiple Ratio',
87
+ minimum=1,
88
+ maximum=4,
89
+ value=2,
90
+ step=1)
91
+ model_type = gr.Radio(['AMT-S', 'AMT-L', 'AMT-G'],
92
+ label='Model Select',
93
+ value='AMT-S')
94
+ run_button = gr.Button(label='Run')
95
+ inputs = [
96
+ model_type,
97
+ video,
98
+ ratio,
99
+ ]
100
+
101
+ gr.Examples(examples=glob.glob("examples/*.mp4"),
102
+ inputs=video,
103
+ label='Example images (drag them to input windows)',
104
+ run_on_click=False,
105
+ )
106
+
107
+ run_button.click(fn=vid2vid,
108
+ inputs=inputs,
109
+ outputs=result,)
110
+ return demo
examples/00001808.png ADDED
examples/00001810.png ADDED
networks/__init__.py ADDED
File without changes
networks/amtg.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from networks.blocks.raft import (
5
+ coords_grid,
6
+ BasicUpdateBlock, BidirCorrBlock
7
+ )
8
+ from networks.blocks.feat_enc import (
9
+ LargeEncoder
10
+ )
11
+ from networks.blocks.ifrnet import (
12
+ resize,
13
+ Encoder,
14
+ InitDecoder,
15
+ IntermediateDecoder
16
+ )
17
+ from networks.blocks.multi_flow import (
18
+ multi_flow_combine,
19
+ MultiFlowDecoder
20
+ )
21
+
22
+ class Model(nn.Module):
23
+ def __init__(self,
24
+ corr_radius=3,
25
+ corr_lvls=4,
26
+ num_flows=5,
27
+ channels=[84, 96, 112, 128],
28
+ skip_channels=84):
29
+ super(Model, self).__init__()
30
+ self.radius = corr_radius
31
+ self.corr_levels = corr_lvls
32
+ self.num_flows = num_flows
33
+
34
+ self.feat_encoder = LargeEncoder(output_dim=128, norm_fn='instance', dropout=0.)
35
+ self.encoder = Encoder(channels, large=True)
36
+ self.decoder4 = InitDecoder(channels[3], channels[2], skip_channels)
37
+ self.decoder3 = IntermediateDecoder(channels[2], channels[1], skip_channels)
38
+ self.decoder2 = IntermediateDecoder(channels[1], channels[0], skip_channels)
39
+ self.decoder1 = MultiFlowDecoder(channels[0], skip_channels, num_flows)
40
+
41
+ self.update4 = self._get_updateblock(112, None)
42
+ self.update3_low = self._get_updateblock(96, 2.0)
43
+ self.update2_low = self._get_updateblock(84, 4.0)
44
+
45
+ self.update3_high = self._get_updateblock(96, None)
46
+ self.update2_high = self._get_updateblock(84, None)
47
+
48
+ self.comb_block = nn.Sequential(
49
+ nn.Conv2d(3*self.num_flows, 6*self.num_flows, 7, 1, 3),
50
+ nn.PReLU(6*self.num_flows),
51
+ nn.Conv2d(6*self.num_flows, 3, 7, 1, 3),
52
+ )
53
+
54
+ def _get_updateblock(self, cdim, scale_factor=None):
55
+ return BasicUpdateBlock(cdim=cdim, hidden_dim=192, flow_dim=64,
56
+ corr_dim=256, corr_dim2=192, fc_dim=188,
57
+ scale_factor=scale_factor, corr_levels=self.corr_levels,
58
+ radius=self.radius)
59
+
60
+ def _corr_scale_lookup(self, corr_fn, coord, flow0, flow1, embt, downsample=1):
61
+ # convert t -> 0 to 0 -> 1 | convert t -> 1 to 1 -> 0
62
+ # based on linear assumption
63
+ t1_scale = 1. / embt
64
+ t0_scale = 1. / (1. - embt)
65
+ if downsample != 1:
66
+ inv = 1 / downsample
67
+ flow0 = inv * resize(flow0, scale_factor=inv)
68
+ flow1 = inv * resize(flow1, scale_factor=inv)
69
+
70
+ corr0, corr1 = corr_fn(coord + flow1 * t1_scale, coord + flow0 * t0_scale)
71
+ corr = torch.cat([corr0, corr1], dim=1)
72
+ flow = torch.cat([flow0, flow1], dim=1)
73
+ return corr, flow
74
+
75
+ def forward(self, img0, img1, embt, scale_factor=1.0, eval=False, **kwargs):
76
+ mean_ = torch.cat([img0, img1], 2).mean(1, keepdim=True).mean(2, keepdim=True).mean(3, keepdim=True)
77
+ img0 = img0 - mean_
78
+ img1 = img1 - mean_
79
+ img0_ = resize(img0, scale_factor) if scale_factor != 1.0 else img0
80
+ img1_ = resize(img1, scale_factor) if scale_factor != 1.0 else img1
81
+ b, _, h, w = img0_.shape
82
+ coord = coords_grid(b, h // 8, w // 8, img0.device)
83
+
84
+ fmap0, fmap1 = self.feat_encoder([img0_, img1_]) # [1, 128, H//8, W//8]
85
+ corr_fn = BidirCorrBlock(fmap0, fmap1, radius=self.radius, num_levels=self.corr_levels)
86
+
87
+ # f0_1: [1, c0, H//2, W//2] | f0_2: [1, c1, H//4, W//4]
88
+ # f0_3: [1, c2, H//8, W//8] | f0_4: [1, c3, H//16, W//16]
89
+ f0_1, f0_2, f0_3, f0_4 = self.encoder(img0_)
90
+ f1_1, f1_2, f1_3, f1_4 = self.encoder(img1_)
91
+
92
+ ######################################### the 4th decoder #########################################
93
+ up_flow0_4, up_flow1_4, ft_3_ = self.decoder4(f0_4, f1_4, embt)
94
+ corr_4, flow_4 = self._corr_scale_lookup(corr_fn, coord,
95
+ up_flow0_4, up_flow1_4,
96
+ embt, downsample=1)
97
+
98
+ # residue update with lookup corr
99
+ delta_ft_3_, delta_flow_4 = self.update4(ft_3_, flow_4, corr_4)
100
+ delta_flow0_4, delta_flow1_4 = torch.chunk(delta_flow_4, 2, 1)
101
+ up_flow0_4 = up_flow0_4 + delta_flow0_4
102
+ up_flow1_4 = up_flow1_4 + delta_flow1_4
103
+ ft_3_ = ft_3_ + delta_ft_3_
104
+
105
+ ######################################### the 3rd decoder #########################################
106
+ up_flow0_3, up_flow1_3, ft_2_ = self.decoder3(ft_3_, f0_3, f1_3, up_flow0_4, up_flow1_4)
107
+ corr_3, flow_3 = self._corr_scale_lookup(corr_fn,
108
+ coord, up_flow0_3, up_flow1_3,
109
+ embt, downsample=2)
110
+
111
+ # residue update with lookup corr
112
+ delta_ft_2_, delta_flow_3 = self.update3_low(ft_2_, flow_3, corr_3)
113
+ delta_flow0_3, delta_flow1_3 = torch.chunk(delta_flow_3, 2, 1)
114
+ up_flow0_3 = up_flow0_3 + delta_flow0_3
115
+ up_flow1_3 = up_flow1_3 + delta_flow1_3
116
+ ft_2_ = ft_2_ + delta_ft_2_
117
+
118
+ # residue update with lookup corr (hr)
119
+ corr_3 = resize(corr_3, scale_factor=2.0)
120
+ up_flow_3 = torch.cat([up_flow0_3, up_flow1_3], dim=1)
121
+ delta_ft_2_, delta_up_flow_3 = self.update3_high(ft_2_, up_flow_3, corr_3)
122
+ ft_2_ += delta_ft_2_
123
+ up_flow0_3 += delta_up_flow_3[:, 0:2]
124
+ up_flow1_3 += delta_up_flow_3[:, 2:4]
125
+
126
+ ######################################### the 2nd decoder #########################################
127
+ up_flow0_2, up_flow1_2, ft_1_ = self.decoder2(ft_2_, f0_2, f1_2, up_flow0_3, up_flow1_3)
128
+ corr_2, flow_2 = self._corr_scale_lookup(corr_fn,
129
+ coord, up_flow0_2, up_flow1_2,
130
+ embt, downsample=4)
131
+
132
+ # residue update with lookup corr
133
+ delta_ft_1_, delta_flow_2 = self.update2_low(ft_1_, flow_2, corr_2)
134
+ delta_flow0_2, delta_flow1_2 = torch.chunk(delta_flow_2, 2, 1)
135
+ up_flow0_2 = up_flow0_2 + delta_flow0_2
136
+ up_flow1_2 = up_flow1_2 + delta_flow1_2
137
+ ft_1_ = ft_1_ + delta_ft_1_
138
+
139
+ # residue update with lookup corr (hr)
140
+ corr_2 = resize(corr_2, scale_factor=4.0)
141
+ up_flow_2 = torch.cat([up_flow0_2, up_flow1_2], dim=1)
142
+ delta_ft_1_, delta_up_flow_2 = self.update2_high(ft_1_, up_flow_2, corr_2)
143
+ ft_1_ += delta_ft_1_
144
+ up_flow0_2 += delta_up_flow_2[:, 0:2]
145
+ up_flow1_2 += delta_up_flow_2[:, 2:4]
146
+
147
+ ######################################### the 1st decoder #########################################
148
+ up_flow0_1, up_flow1_1, mask, img_res = self.decoder1(ft_1_, f0_1, f1_1, up_flow0_2, up_flow1_2)
149
+
150
+ if scale_factor != 1.0:
151
+ up_flow0_1 = resize(up_flow0_1, scale_factor=(1.0/scale_factor)) * (1.0/scale_factor)
152
+ up_flow1_1 = resize(up_flow1_1, scale_factor=(1.0/scale_factor)) * (1.0/scale_factor)
153
+ mask = resize(mask, scale_factor=(1.0/scale_factor))
154
+ img_res = resize(img_res, scale_factor=(1.0/scale_factor))
155
+
156
+ imgt_pred = multi_flow_combine(self.comb_block, img0, img1, up_flow0_1, up_flow1_1,
157
+ mask, img_res, mean_)
158
+ imgt_pred = torch.clamp(imgt_pred, 0, 1)
159
+
160
+ if eval:
161
+ return { 'imgt_pred': imgt_pred, }
162
+ else:
163
+ up_flow0_1 = up_flow0_1.reshape(b, self.num_flows, 2, h, w)
164
+ up_flow1_1 = up_flow1_1.reshape(b, self.num_flows, 2, h, w)
165
+ return {
166
+ 'imgt_pred': imgt_pred,
167
+ 'flow0_pred': [up_flow0_1, up_flow0_2, up_flow0_3, up_flow0_4],
168
+ 'flow1_pred': [up_flow1_1, up_flow1_2, up_flow1_3, up_flow1_4],
169
+ 'ft_pred': [ft_1_, ft_2_, ft_3_],
170
+ }
networks/amtl.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from networks.blocks.raft import (
4
+ coords_grid,
5
+ BasicUpdateBlock, BidirCorrBlock
6
+ )
7
+ from networks.blocks.feat_enc import (
8
+ BasicEncoder
9
+ )
10
+ from networks.blocks.ifrnet import (
11
+ resize,
12
+ Encoder,
13
+ InitDecoder,
14
+ IntermediateDecoder
15
+ )
16
+ from networks.blocks.multi_flow import (
17
+ multi_flow_combine,
18
+ MultiFlowDecoder
19
+ )
20
+
21
+ class Model(nn.Module):
22
+ def __init__(self,
23
+ corr_radius=3,
24
+ corr_lvls=4,
25
+ num_flows=5,
26
+ channels=[48, 64, 72, 128],
27
+ skip_channels=48
28
+ ):
29
+ super(Model, self).__init__()
30
+ self.radius = corr_radius
31
+ self.corr_levels = corr_lvls
32
+ self.num_flows = num_flows
33
+
34
+ self.feat_encoder = BasicEncoder(output_dim=128, norm_fn='instance', dropout=0.)
35
+ self.encoder = Encoder([48, 64, 72, 128], large=True)
36
+
37
+ self.decoder4 = InitDecoder(channels[3], channels[2], skip_channels)
38
+ self.decoder3 = IntermediateDecoder(channels[2], channels[1], skip_channels)
39
+ self.decoder2 = IntermediateDecoder(channels[1], channels[0], skip_channels)
40
+ self.decoder1 = MultiFlowDecoder(channels[0], skip_channels, num_flows)
41
+
42
+ self.update4 = self._get_updateblock(72, None)
43
+ self.update3 = self._get_updateblock(64, 2.0)
44
+ self.update2 = self._get_updateblock(48, 4.0)
45
+
46
+ self.comb_block = nn.Sequential(
47
+ nn.Conv2d(3*self.num_flows, 6*self.num_flows, 7, 1, 3),
48
+ nn.PReLU(6*self.num_flows),
49
+ nn.Conv2d(6*self.num_flows, 3, 7, 1, 3),
50
+ )
51
+
52
+ def _get_updateblock(self, cdim, scale_factor=None):
53
+ return BasicUpdateBlock(cdim=cdim, hidden_dim=128, flow_dim=48,
54
+ corr_dim=256, corr_dim2=160, fc_dim=124,
55
+ scale_factor=scale_factor, corr_levels=self.corr_levels,
56
+ radius=self.radius)
57
+
58
+ def _corr_scale_lookup(self, corr_fn, coord, flow0, flow1, embt, downsample=1):
59
+ # convert t -> 0 to 0 -> 1 | convert t -> 1 to 1 -> 0
60
+ # based on linear assumption
61
+ t1_scale = 1. / embt
62
+ t0_scale = 1. / (1. - embt)
63
+ if downsample != 1:
64
+ inv = 1 / downsample
65
+ flow0 = inv * resize(flow0, scale_factor=inv)
66
+ flow1 = inv * resize(flow1, scale_factor=inv)
67
+
68
+ corr0, corr1 = corr_fn(coord + flow1 * t1_scale, coord + flow0 * t0_scale)
69
+ corr = torch.cat([corr0, corr1], dim=1)
70
+ flow = torch.cat([flow0, flow1], dim=1)
71
+ return corr, flow
72
+
73
+ def forward(self, img0, img1, embt, scale_factor=1.0, eval=False, **kwargs):
74
+ mean_ = torch.cat([img0, img1], 2).mean(1, keepdim=True).mean(2, keepdim=True).mean(3, keepdim=True)
75
+ img0 = img0 - mean_
76
+ img1 = img1 - mean_
77
+ img0_ = resize(img0, scale_factor) if scale_factor != 1.0 else img0
78
+ img1_ = resize(img1, scale_factor) if scale_factor != 1.0 else img1
79
+ b, _, h, w = img0_.shape
80
+ coord = coords_grid(b, h // 8, w // 8, img0.device)
81
+
82
+ fmap0, fmap1 = self.feat_encoder([img0_, img1_]) # [1, 128, H//8, W//8]
83
+ corr_fn = BidirCorrBlock(fmap0, fmap1, radius=self.radius, num_levels=self.corr_levels)
84
+
85
+ # f0_1: [1, c0, H//2, W//2] | f0_2: [1, c1, H//4, W//4]
86
+ # f0_3: [1, c2, H//8, W//8] | f0_4: [1, c3, H//16, W//16]
87
+ f0_1, f0_2, f0_3, f0_4 = self.encoder(img0_)
88
+ f1_1, f1_2, f1_3, f1_4 = self.encoder(img1_)
89
+
90
+ ######################################### the 4th decoder #########################################
91
+ up_flow0_4, up_flow1_4, ft_3_ = self.decoder4(f0_4, f1_4, embt)
92
+ corr_4, flow_4 = self._corr_scale_lookup(corr_fn, coord,
93
+ up_flow0_4, up_flow1_4,
94
+ embt, downsample=1)
95
+
96
+ # residue update with lookup corr
97
+ delta_ft_3_, delta_flow_4 = self.update4(ft_3_, flow_4, corr_4)
98
+ delta_flow0_4, delta_flow1_4 = torch.chunk(delta_flow_4, 2, 1)
99
+ up_flow0_4 = up_flow0_4 + delta_flow0_4
100
+ up_flow1_4 = up_flow1_4 + delta_flow1_4
101
+ ft_3_ = ft_3_ + delta_ft_3_
102
+
103
+ ######################################### the 3rd decoder #########################################
104
+ up_flow0_3, up_flow1_3, ft_2_ = self.decoder3(ft_3_, f0_3, f1_3, up_flow0_4, up_flow1_4)
105
+ corr_3, flow_3 = self._corr_scale_lookup(corr_fn,
106
+ coord, up_flow0_3, up_flow1_3,
107
+ embt, downsample=2)
108
+
109
+ # residue update with lookup corr
110
+ delta_ft_2_, delta_flow_3 = self.update3(ft_2_, flow_3, corr_3)
111
+ delta_flow0_3, delta_flow1_3 = torch.chunk(delta_flow_3, 2, 1)
112
+ up_flow0_3 = up_flow0_3 + delta_flow0_3
113
+ up_flow1_3 = up_flow1_3 + delta_flow1_3
114
+ ft_2_ = ft_2_ + delta_ft_2_
115
+
116
+ ######################################### the 2nd decoder #########################################
117
+ up_flow0_2, up_flow1_2, ft_1_ = self.decoder2(ft_2_, f0_2, f1_2, up_flow0_3, up_flow1_3)
118
+ corr_2, flow_2 = self._corr_scale_lookup(corr_fn,
119
+ coord, up_flow0_2, up_flow1_2,
120
+ embt, downsample=4)
121
+
122
+ # residue update with lookup corr
123
+ delta_ft_1_, delta_flow_2 = self.update2(ft_1_, flow_2, corr_2)
124
+ delta_flow0_2, delta_flow1_2 = torch.chunk(delta_flow_2, 2, 1)
125
+ up_flow0_2 = up_flow0_2 + delta_flow0_2
126
+ up_flow1_2 = up_flow1_2 + delta_flow1_2
127
+ ft_1_ = ft_1_ + delta_ft_1_
128
+
129
+ ######################################### the 1st decoder #########################################
130
+ up_flow0_1, up_flow1_1, mask, img_res = self.decoder1(ft_1_, f0_1, f1_1, up_flow0_2, up_flow1_2)
131
+
132
+ if scale_factor != 1.0:
133
+ up_flow0_1 = resize(up_flow0_1, scale_factor=(1.0/scale_factor)) * (1.0/scale_factor)
134
+ up_flow1_1 = resize(up_flow1_1, scale_factor=(1.0/scale_factor)) * (1.0/scale_factor)
135
+ mask = resize(mask, scale_factor=(1.0/scale_factor))
136
+ img_res = resize(img_res, scale_factor=(1.0/scale_factor))
137
+
138
+ imgt_pred = multi_flow_combine(self.comb_block, img0, img1, up_flow0_1, up_flow1_1,
139
+ mask, img_res, mean_)
140
+ imgt_pred = torch.clamp(imgt_pred, 0, 1)
141
+
142
+ if eval:
143
+ return { 'imgt_pred': imgt_pred, }
144
+ else:
145
+ up_flow0_1 = up_flow0_1.reshape(b, self.num_flows, 2, h, w)
146
+ up_flow1_1 = up_flow1_1.reshape(b, self.num_flows, 2, h, w)
147
+ return {
148
+ 'imgt_pred': imgt_pred,
149
+ 'flow0_pred': [up_flow0_1, up_flow0_2, up_flow0_3, up_flow0_4],
150
+ 'flow1_pred': [up_flow1_1, up_flow1_2, up_flow1_3, up_flow1_4],
151
+ 'ft_pred': [ft_1_, ft_2_, ft_3_],
152
+ }
153
+
networks/amts.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from networks.blocks.raft import (
4
+ coords_grid,
5
+ SmallUpdateBlock, BidirCorrBlock
6
+ )
7
+ from networks.blocks.feat_enc import (
8
+ SmallEncoder
9
+ )
10
+ from networks.blocks.ifrnet import (
11
+ resize,
12
+ Encoder,
13
+ InitDecoder,
14
+ IntermediateDecoder
15
+ )
16
+ from networks.blocks.multi_flow import (
17
+ multi_flow_combine,
18
+ MultiFlowDecoder
19
+ )
20
+
21
+ class Model(nn.Module):
22
+ def __init__(self,
23
+ corr_radius=3,
24
+ corr_lvls=4,
25
+ num_flows=3,
26
+ channels=[20, 32, 44, 56],
27
+ skip_channels=20):
28
+ super(Model, self).__init__()
29
+ self.radius = corr_radius
30
+ self.corr_levels = corr_lvls
31
+ self.num_flows = num_flows
32
+ self.channels = channels
33
+ self.skip_channels = skip_channels
34
+
35
+ self.feat_encoder = SmallEncoder(output_dim=84, norm_fn='instance', dropout=0.)
36
+ self.encoder = Encoder(channels)
37
+
38
+ self.decoder4 = InitDecoder(channels[3], channels[2], skip_channels)
39
+ self.decoder3 = IntermediateDecoder(channels[2], channels[1], skip_channels)
40
+ self.decoder2 = IntermediateDecoder(channels[1], channels[0], skip_channels)
41
+ self.decoder1 = MultiFlowDecoder(channels[0], skip_channels, num_flows)
42
+
43
+ self.update4 = self._get_updateblock(44)
44
+ self.update3 = self._get_updateblock(32, 2)
45
+ self.update2 = self._get_updateblock(20, 4)
46
+
47
+ self.comb_block = nn.Sequential(
48
+ nn.Conv2d(3*num_flows, 6*num_flows, 3, 1, 1),
49
+ nn.PReLU(6*num_flows),
50
+ nn.Conv2d(6*num_flows, 3, 3, 1, 1),
51
+ )
52
+
53
+ def _get_updateblock(self, cdim, scale_factor=None):
54
+ return SmallUpdateBlock(cdim=cdim, hidden_dim=76, flow_dim=20, corr_dim=64,
55
+ fc_dim=68, scale_factor=scale_factor,
56
+ corr_levels=self.corr_levels, radius=self.radius)
57
+
58
+ def _corr_scale_lookup(self, corr_fn, coord, flow0, flow1, embt, downsample=1):
59
+ # convert t -> 0 to 0 -> 1 | convert t -> 1 to 1 -> 0
60
+ # based on linear assumption
61
+ t1_scale = 1. / embt
62
+ t0_scale = 1. / (1. - embt)
63
+ if downsample != 1:
64
+ inv = 1 / downsample
65
+ flow0 = inv * resize(flow0, scale_factor=inv)
66
+ flow1 = inv * resize(flow1, scale_factor=inv)
67
+
68
+ corr0, corr1 = corr_fn(coord + flow1 * t1_scale, coord + flow0 * t0_scale)
69
+ corr = torch.cat([corr0, corr1], dim=1)
70
+ flow = torch.cat([flow0, flow1], dim=1)
71
+ return corr, flow
72
+
73
+ def forward(self, img0, img1, embt, scale_factor=1.0, eval=False, **kwargs):
74
+ mean_ = torch.cat([img0, img1], 2).mean(1, keepdim=True).mean(2, keepdim=True).mean(3, keepdim=True)
75
+ img0 = img0 - mean_
76
+ img1 = img1 - mean_
77
+ img0_ = resize(img0, scale_factor) if scale_factor != 1.0 else img0
78
+ img1_ = resize(img1, scale_factor) if scale_factor != 1.0 else img1
79
+ b, _, h, w = img0_.shape
80
+ coord = coords_grid(b, h // 8, w // 8, img0.device)
81
+
82
+ fmap0, fmap1 = self.feat_encoder([img0_, img1_]) # [1, 128, H//8, W//8]
83
+ corr_fn = BidirCorrBlock(fmap0, fmap1, radius=self.radius, num_levels=self.corr_levels)
84
+
85
+ # f0_1: [1, c0, H//2, W//2] | f0_2: [1, c1, H//4, W//4]
86
+ # f0_3: [1, c2, H//8, W//8] | f0_4: [1, c3, H//16, W//16]
87
+ f0_1, f0_2, f0_3, f0_4 = self.encoder(img0_)
88
+ f1_1, f1_2, f1_3, f1_4 = self.encoder(img1_)
89
+
90
+ ######################################### the 4th decoder #########################################
91
+ up_flow0_4, up_flow1_4, ft_3_ = self.decoder4(f0_4, f1_4, embt)
92
+ corr_4, flow_4 = self._corr_scale_lookup(corr_fn, coord,
93
+ up_flow0_4, up_flow1_4,
94
+ embt, downsample=1)
95
+
96
+ # residue update with lookup corr
97
+ delta_ft_3_, delta_flow_4 = self.update4(ft_3_, flow_4, corr_4)
98
+ delta_flow0_4, delta_flow1_4 = torch.chunk(delta_flow_4, 2, 1)
99
+ up_flow0_4 = up_flow0_4 + delta_flow0_4
100
+ up_flow1_4 = up_flow1_4 + delta_flow1_4
101
+ ft_3_ = ft_3_ + delta_ft_3_
102
+
103
+ ######################################### the 3rd decoder #########################################
104
+ up_flow0_3, up_flow1_3, ft_2_ = self.decoder3(ft_3_, f0_3, f1_3, up_flow0_4, up_flow1_4)
105
+ corr_3, flow_3 = self._corr_scale_lookup(corr_fn,
106
+ coord, up_flow0_3, up_flow1_3,
107
+ embt, downsample=2)
108
+
109
+ # residue update with lookup corr
110
+ delta_ft_2_, delta_flow_3 = self.update3(ft_2_, flow_3, corr_3)
111
+ delta_flow0_3, delta_flow1_3 = torch.chunk(delta_flow_3, 2, 1)
112
+ up_flow0_3 = up_flow0_3 + delta_flow0_3
113
+ up_flow1_3 = up_flow1_3 + delta_flow1_3
114
+ ft_2_ = ft_2_ + delta_ft_2_
115
+
116
+ ######################################### the 2nd decoder #########################################
117
+ up_flow0_2, up_flow1_2, ft_1_ = self.decoder2(ft_2_, f0_2, f1_2, up_flow0_3, up_flow1_3)
118
+ corr_2, flow_2 = self._corr_scale_lookup(corr_fn,
119
+ coord, up_flow0_2, up_flow1_2,
120
+ embt, downsample=4)
121
+
122
+ # residue update with lookup corr
123
+ delta_ft_1_, delta_flow_2 = self.update2(ft_1_, flow_2, corr_2)
124
+ delta_flow0_2, delta_flow1_2 = torch.chunk(delta_flow_2, 2, 1)
125
+ up_flow0_2 = up_flow0_2 + delta_flow0_2
126
+ up_flow1_2 = up_flow1_2 + delta_flow1_2
127
+ ft_1_ = ft_1_ + delta_ft_1_
128
+
129
+ ######################################### the 1st decoder #########################################
130
+ up_flow0_1, up_flow1_1, mask, img_res = self.decoder1(ft_1_, f0_1, f1_1, up_flow0_2, up_flow1_2)
131
+
132
+ if scale_factor != 1.0:
133
+ up_flow0_1 = resize(up_flow0_1, scale_factor=(1.0/scale_factor)) * (1.0/scale_factor)
134
+ up_flow1_1 = resize(up_flow1_1, scale_factor=(1.0/scale_factor)) * (1.0/scale_factor)
135
+ mask = resize(mask, scale_factor=(1.0/scale_factor))
136
+ img_res = resize(img_res, scale_factor=(1.0/scale_factor))
137
+
138
+ imgt_pred = multi_flow_combine(self.comb_block, img0, img1, up_flow0_1, up_flow1_1,
139
+ mask, img_res, mean_)
140
+ imgt_pred = torch.clamp(imgt_pred, 0, 1)
141
+
142
+ if eval:
143
+ return { 'imgt_pred': imgt_pred, }
144
+ else:
145
+ up_flow0_1 = up_flow0_1.reshape(b, self.num_flows, 2, h, w)
146
+ up_flow1_1 = up_flow1_1.reshape(b, self.num_flows, 2, h, w)
147
+ return {
148
+ 'imgt_pred': imgt_pred,
149
+ 'flow0_pred': [up_flow0_1, up_flow0_2, up_flow0_3, up_flow0_4],
150
+ 'flow1_pred': [up_flow1_1, up_flow1_2, up_flow1_3, up_flow1_4],
151
+ 'ft_pred': [ft_1_, ft_2_, ft_3_],
152
+ }
networks/blocks/__init__.py ADDED
File without changes
networks/blocks/feat_enc.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ This code is partially borrowed from RAFT (https://github.com/princeton-vl/RAFT).
3
+ '''
4
+ import torch
5
+ import torch.nn as nn
6
+
7
+ class BottleneckBlock(nn.Module):
8
+ def __init__(self, in_planes, planes, norm_fn='group', stride=1):
9
+ super(BottleneckBlock, self).__init__()
10
+
11
+ self.conv1 = nn.Conv2d(in_planes, planes//4, kernel_size=1, padding=0)
12
+ self.conv2 = nn.Conv2d(planes//4, planes//4, kernel_size=3, padding=1, stride=stride)
13
+ self.conv3 = nn.Conv2d(planes//4, planes, kernel_size=1, padding=0)
14
+ self.relu = nn.ReLU(inplace=True)
15
+
16
+ num_groups = planes // 8
17
+
18
+ if norm_fn == 'group':
19
+ self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
20
+ self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
21
+ self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
22
+ if not stride == 1:
23
+ self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
24
+
25
+
26
+ elif norm_fn == 'batch':
27
+ self.norm1 = nn.BatchNorm2d(planes//4)
28
+ self.norm2 = nn.BatchNorm2d(planes//4)
29
+ self.norm3 = nn.BatchNorm2d(planes)
30
+ if not stride == 1:
31
+ self.norm4 = nn.BatchNorm2d(planes)
32
+
33
+ elif norm_fn == 'instance':
34
+ self.norm1 = nn.InstanceNorm2d(planes//4)
35
+ self.norm2 = nn.InstanceNorm2d(planes//4)
36
+ self.norm3 = nn.InstanceNorm2d(planes)
37
+ if not stride == 1:
38
+ self.norm4 = nn.InstanceNorm2d(planes)
39
+
40
+ elif norm_fn == 'none':
41
+ self.norm1 = nn.Sequential()
42
+ self.norm2 = nn.Sequential()
43
+ self.norm3 = nn.Sequential()
44
+ if not stride == 1:
45
+ self.norm4 = nn.Sequential()
46
+
47
+ if stride == 1:
48
+ self.downsample = None
49
+
50
+ else:
51
+ self.downsample = nn.Sequential(
52
+ nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4)
53
+
54
+
55
+ def forward(self, x):
56
+ y = x
57
+ y = self.relu(self.norm1(self.conv1(y)))
58
+ y = self.relu(self.norm2(self.conv2(y)))
59
+ y = self.relu(self.norm3(self.conv3(y)))
60
+
61
+ if self.downsample is not None:
62
+ x = self.downsample(x)
63
+
64
+ return self.relu(x+y)
65
+
66
+
67
+ class ResidualBlock(nn.Module):
68
+ def __init__(self, in_planes, planes, norm_fn='group', stride=1):
69
+ super(ResidualBlock, self).__init__()
70
+
71
+ self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride)
72
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
73
+ self.relu = nn.ReLU(inplace=True)
74
+
75
+ num_groups = planes // 8
76
+
77
+ if norm_fn == 'group':
78
+ self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
79
+ self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
80
+ if not stride == 1:
81
+ self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
82
+
83
+ elif norm_fn == 'batch':
84
+ self.norm1 = nn.BatchNorm2d(planes)
85
+ self.norm2 = nn.BatchNorm2d(planes)
86
+ if not stride == 1:
87
+ self.norm3 = nn.BatchNorm2d(planes)
88
+
89
+ elif norm_fn == 'instance':
90
+ self.norm1 = nn.InstanceNorm2d(planes)
91
+ self.norm2 = nn.InstanceNorm2d(planes)
92
+ if not stride == 1:
93
+ self.norm3 = nn.InstanceNorm2d(planes)
94
+
95
+ elif norm_fn == 'none':
96
+ self.norm1 = nn.Sequential()
97
+ self.norm2 = nn.Sequential()
98
+ if not stride == 1:
99
+ self.norm3 = nn.Sequential()
100
+
101
+ if stride == 1:
102
+ self.downsample = None
103
+
104
+ else:
105
+ self.downsample = nn.Sequential(
106
+ nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3)
107
+
108
+
109
+ def forward(self, x):
110
+ y = x
111
+ y = self.relu(self.norm1(self.conv1(y)))
112
+ y = self.relu(self.norm2(self.conv2(y)))
113
+
114
+ if self.downsample is not None:
115
+ x = self.downsample(x)
116
+
117
+ return self.relu(x+y)
118
+
119
+
120
+ class SmallEncoder(nn.Module):
121
+ def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
122
+ super(SmallEncoder, self).__init__()
123
+ self.norm_fn = norm_fn
124
+
125
+ if self.norm_fn == 'group':
126
+ self.norm1 = nn.GroupNorm(num_groups=8, num_channels=32)
127
+
128
+ elif self.norm_fn == 'batch':
129
+ self.norm1 = nn.BatchNorm2d(32)
130
+
131
+ elif self.norm_fn == 'instance':
132
+ self.norm1 = nn.InstanceNorm2d(32)
133
+
134
+ elif self.norm_fn == 'none':
135
+ self.norm1 = nn.Sequential()
136
+
137
+ self.conv1 = nn.Conv2d(3, 32, kernel_size=7, stride=2, padding=3)
138
+ self.relu1 = nn.ReLU(inplace=True)
139
+
140
+ self.in_planes = 32
141
+ self.layer1 = self._make_layer(32, stride=1)
142
+ self.layer2 = self._make_layer(64, stride=2)
143
+ self.layer3 = self._make_layer(96, stride=2)
144
+
145
+ self.dropout = None
146
+ if dropout > 0:
147
+ self.dropout = nn.Dropout2d(p=dropout)
148
+
149
+ self.conv2 = nn.Conv2d(96, output_dim, kernel_size=1)
150
+
151
+ for m in self.modules():
152
+ if isinstance(m, nn.Conv2d):
153
+ nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
154
+ elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
155
+ if m.weight is not None:
156
+ nn.init.constant_(m.weight, 1)
157
+ if m.bias is not None:
158
+ nn.init.constant_(m.bias, 0)
159
+
160
+ def _make_layer(self, dim, stride=1):
161
+ layer1 = BottleneckBlock(self.in_planes, dim, self.norm_fn, stride=stride)
162
+ layer2 = BottleneckBlock(dim, dim, self.norm_fn, stride=1)
163
+ layers = (layer1, layer2)
164
+
165
+ self.in_planes = dim
166
+ return nn.Sequential(*layers)
167
+
168
+
169
+ def forward(self, x):
170
+
171
+ # if input is list, combine batch dimension
172
+ is_list = isinstance(x, tuple) or isinstance(x, list)
173
+ if is_list:
174
+ batch_dim = x[0].shape[0]
175
+ x = torch.cat(x, dim=0)
176
+
177
+ x = self.conv1(x)
178
+ x = self.norm1(x)
179
+ x = self.relu1(x)
180
+
181
+ x = self.layer1(x)
182
+ x = self.layer2(x)
183
+ x = self.layer3(x)
184
+ x = self.conv2(x)
185
+
186
+ if self.training and self.dropout is not None:
187
+ x = self.dropout(x)
188
+
189
+ if is_list:
190
+ x = torch.split(x, [batch_dim, batch_dim], dim=0)
191
+
192
+ return x
193
+
194
+ class BasicEncoder(nn.Module):
195
+ def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
196
+ super(BasicEncoder, self).__init__()
197
+ self.norm_fn = norm_fn
198
+
199
+ if self.norm_fn == 'group':
200
+ self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64)
201
+
202
+ elif self.norm_fn == 'batch':
203
+ self.norm1 = nn.BatchNorm2d(64)
204
+
205
+ elif self.norm_fn == 'instance':
206
+ self.norm1 = nn.InstanceNorm2d(64)
207
+
208
+ elif self.norm_fn == 'none':
209
+ self.norm1 = nn.Sequential()
210
+
211
+ self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
212
+ self.relu1 = nn.ReLU(inplace=True)
213
+
214
+ self.in_planes = 64
215
+ self.layer1 = self._make_layer(64, stride=1)
216
+ self.layer2 = self._make_layer(72, stride=2)
217
+ self.layer3 = self._make_layer(128, stride=2)
218
+
219
+ # output convolution
220
+ self.conv2 = nn.Conv2d(128, output_dim, kernel_size=1)
221
+
222
+ self.dropout = None
223
+ if dropout > 0:
224
+ self.dropout = nn.Dropout2d(p=dropout)
225
+
226
+ for m in self.modules():
227
+ if isinstance(m, nn.Conv2d):
228
+ nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
229
+ elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
230
+ if m.weight is not None:
231
+ nn.init.constant_(m.weight, 1)
232
+ if m.bias is not None:
233
+ nn.init.constant_(m.bias, 0)
234
+
235
+ def _make_layer(self, dim, stride=1):
236
+ layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)
237
+ layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)
238
+ layers = (layer1, layer2)
239
+
240
+ self.in_planes = dim
241
+ return nn.Sequential(*layers)
242
+
243
+
244
+ def forward(self, x):
245
+
246
+ # if input is list, combine batch dimension
247
+ is_list = isinstance(x, tuple) or isinstance(x, list)
248
+ if is_list:
249
+ batch_dim = x[0].shape[0]
250
+ x = torch.cat(x, dim=0)
251
+
252
+ x = self.conv1(x)
253
+ x = self.norm1(x)
254
+ x = self.relu1(x)
255
+
256
+ x = self.layer1(x)
257
+ x = self.layer2(x)
258
+ x = self.layer3(x)
259
+
260
+ x = self.conv2(x)
261
+
262
+ if self.training and self.dropout is not None:
263
+ x = self.dropout(x)
264
+
265
+ if is_list:
266
+ x = torch.split(x, [batch_dim, batch_dim], dim=0)
267
+
268
+ return x
269
+
270
+ class LargeEncoder(nn.Module):
271
+ def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
272
+ super(LargeEncoder, self).__init__()
273
+ self.norm_fn = norm_fn
274
+
275
+ if self.norm_fn == 'group':
276
+ self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64)
277
+
278
+ elif self.norm_fn == 'batch':
279
+ self.norm1 = nn.BatchNorm2d(64)
280
+
281
+ elif self.norm_fn == 'instance':
282
+ self.norm1 = nn.InstanceNorm2d(64)
283
+
284
+ elif self.norm_fn == 'none':
285
+ self.norm1 = nn.Sequential()
286
+
287
+ self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
288
+ self.relu1 = nn.ReLU(inplace=True)
289
+
290
+ self.in_planes = 64
291
+ self.layer1 = self._make_layer(64, stride=1)
292
+ self.layer2 = self._make_layer(112, stride=2)
293
+ self.layer3 = self._make_layer(160, stride=2)
294
+ self.layer3_2 = self._make_layer(160, stride=1)
295
+
296
+ # output convolution
297
+ self.conv2 = nn.Conv2d(self.in_planes, output_dim, kernel_size=1)
298
+
299
+ self.dropout = None
300
+ if dropout > 0:
301
+ self.dropout = nn.Dropout2d(p=dropout)
302
+
303
+ for m in self.modules():
304
+ if isinstance(m, nn.Conv2d):
305
+ nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
306
+ elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
307
+ if m.weight is not None:
308
+ nn.init.constant_(m.weight, 1)
309
+ if m.bias is not None:
310
+ nn.init.constant_(m.bias, 0)
311
+
312
+ def _make_layer(self, dim, stride=1):
313
+ layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)
314
+ layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)
315
+ layers = (layer1, layer2)
316
+
317
+ self.in_planes = dim
318
+ return nn.Sequential(*layers)
319
+
320
+
321
+ def forward(self, x):
322
+
323
+ # if input is list, combine batch dimension
324
+ is_list = isinstance(x, tuple) or isinstance(x, list)
325
+ if is_list:
326
+ batch_dim = x[0].shape[0]
327
+ x = torch.cat(x, dim=0)
328
+
329
+ x = self.conv1(x)
330
+ x = self.norm1(x)
331
+ x = self.relu1(x)
332
+
333
+ x = self.layer1(x)
334
+ x = self.layer2(x)
335
+ x = self.layer3(x)
336
+ x = self.layer3_2(x)
337
+
338
+ x = self.conv2(x)
339
+
340
+ if self.training and self.dropout is not None:
341
+ x = self.dropout(x)
342
+
343
+ if is_list:
344
+ x = torch.split(x, [batch_dim, batch_dim], dim=0)
345
+
346
+ return x
networks/blocks/ifrnet.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ This code are partially borrowed from IFRNet (https://github.com/ltkong218/IFRNet).
3
+ '''
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ from utils import warp
8
+
9
+ def resize(x, scale_factor):
10
+ return F.interpolate(x, scale_factor=scale_factor, mode="bilinear", align_corners=False)
11
+
12
+ def convrelu(in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation=1, groups=1, bias=True):
13
+ return nn.Sequential(
14
+ nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=bias),
15
+ nn.PReLU(out_channels)
16
+ )
17
+
18
+ class ResBlock(nn.Module):
19
+ def __init__(self, in_channels, side_channels, bias=True):
20
+ super(ResBlock, self).__init__()
21
+ self.side_channels = side_channels
22
+ self.conv1 = nn.Sequential(
23
+ nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1, bias=bias),
24
+ nn.PReLU(in_channels)
25
+ )
26
+ self.conv2 = nn.Sequential(
27
+ nn.Conv2d(side_channels, side_channels, kernel_size=3, stride=1, padding=1, bias=bias),
28
+ nn.PReLU(side_channels)
29
+ )
30
+ self.conv3 = nn.Sequential(
31
+ nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1, bias=bias),
32
+ nn.PReLU(in_channels)
33
+ )
34
+ self.conv4 = nn.Sequential(
35
+ nn.Conv2d(side_channels, side_channels, kernel_size=3, stride=1, padding=1, bias=bias),
36
+ nn.PReLU(side_channels)
37
+ )
38
+ self.conv5 = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1, bias=bias)
39
+ self.prelu = nn.PReLU(in_channels)
40
+
41
+ def forward(self, x):
42
+ out = self.conv1(x)
43
+
44
+ res_feat = out[:, :-self.side_channels, ...]
45
+ side_feat = out[:, -self.side_channels:, :, :]
46
+ side_feat = self.conv2(side_feat)
47
+ out = self.conv3(torch.cat([res_feat, side_feat], 1))
48
+
49
+ res_feat = out[:, :-self.side_channels, ...]
50
+ side_feat = out[:, -self.side_channels:, :, :]
51
+ side_feat = self.conv4(side_feat)
52
+ out = self.conv5(torch.cat([res_feat, side_feat], 1))
53
+
54
+ out = self.prelu(x + out)
55
+ return out
56
+
57
+ class Encoder(nn.Module):
58
+ def __init__(self, channels, large=False):
59
+ super(Encoder, self).__init__()
60
+ self.channels = channels
61
+ prev_ch = 3
62
+ for idx, ch in enumerate(channels, 1):
63
+ k = 7 if large and idx == 1 else 3
64
+ p = 3 if k ==7 else 1
65
+ self.register_module(f'pyramid{idx}',
66
+ nn.Sequential(
67
+ convrelu(prev_ch, ch, k, 2, p),
68
+ convrelu(ch, ch, 3, 1, 1)
69
+ ))
70
+ prev_ch = ch
71
+
72
+ def forward(self, in_x):
73
+ fs = []
74
+ for idx in range(len(self.channels)):
75
+ out_x = getattr(self, f'pyramid{idx+1}')(in_x)
76
+ fs.append(out_x)
77
+ in_x = out_x
78
+ return fs
79
+
80
+ class InitDecoder(nn.Module):
81
+ def __init__(self, in_ch, out_ch, skip_ch) -> None:
82
+ super().__init__()
83
+ self.convblock = nn.Sequential(
84
+ convrelu(in_ch*2+1, in_ch*2),
85
+ ResBlock(in_ch*2, skip_ch),
86
+ nn.ConvTranspose2d(in_ch*2, out_ch+4, 4, 2, 1, bias=True)
87
+ )
88
+ def forward(self, f0, f1, embt):
89
+ h, w = f0.shape[2:]
90
+ embt = embt.repeat(1, 1, h, w)
91
+ out = self.convblock(torch.cat([f0, f1, embt], 1))
92
+ flow0, flow1 = torch.chunk(out[:, :4, ...], 2, 1)
93
+ ft_ = out[:, 4:, ...]
94
+ return flow0, flow1, ft_
95
+
96
+ class IntermediateDecoder(nn.Module):
97
+ def __init__(self, in_ch, out_ch, skip_ch) -> None:
98
+ super().__init__()
99
+ self.convblock = nn.Sequential(
100
+ convrelu(in_ch*3+4, in_ch*3),
101
+ ResBlock(in_ch*3, skip_ch),
102
+ nn.ConvTranspose2d(in_ch*3, out_ch+4, 4, 2, 1, bias=True)
103
+ )
104
+ def forward(self, ft_, f0, f1, flow0_in, flow1_in):
105
+ f0_warp = warp(f0, flow0_in)
106
+ f1_warp = warp(f1, flow1_in)
107
+ f_in = torch.cat([ft_, f0_warp, f1_warp, flow0_in, flow1_in], 1)
108
+ out = self.convblock(f_in)
109
+ flow0, flow1 = torch.chunk(out[:, :4, ...], 2, 1)
110
+ ft_ = out[:, 4:, ...]
111
+ flow0 = flow0 + 2.0 * resize(flow0_in, scale_factor=2.0)
112
+ flow1 = flow1 + 2.0 * resize(flow1_in, scale_factor=2.0)
113
+ return flow0, flow1, ft_
networks/blocks/multi_flow.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from utils import warp
4
+ from networks.blocks.ifrnet import (
5
+ convrelu, resize,
6
+ ResBlock,
7
+ )
8
+
9
+ def multi_flow_combine(comb_block, img0, img1, flow0, flow1,
10
+ mask=None, img_res=None, mean=None):
11
+ '''
12
+ A parallel implementation of multiple flow field warping
13
+ comb_block: A nn.Seqential object.
14
+ img shape: [b, c, h, w]
15
+ flow shape: [b, 2*num_flows, h, w]
16
+ mask (opt):
17
+ If 'mask' is None, the function conduct a simple average.
18
+ img_res (opt):
19
+ If 'img_res' is None, the function adds zero instead.
20
+ mean (opt):
21
+ If 'mean' is None, the function adds zero instead.
22
+ '''
23
+ b, c, h, w = flow0.shape
24
+ num_flows = c // 2
25
+ flow0 = flow0.reshape(b, num_flows, 2, h, w).reshape(-1, 2, h, w)
26
+ flow1 = flow1.reshape(b, num_flows, 2, h, w).reshape(-1, 2, h, w)
27
+
28
+ mask = mask.reshape(b, num_flows, 1, h, w
29
+ ).reshape(-1, 1, h, w) if mask is not None else None
30
+ img_res = img_res.reshape(b, num_flows, 3, h, w
31
+ ).reshape(-1, 3, h, w) if img_res is not None else 0
32
+ img0 = torch.stack([img0] * num_flows, 1).reshape(-1, 3, h, w)
33
+ img1 = torch.stack([img1] * num_flows, 1).reshape(-1, 3, h, w)
34
+ mean = torch.stack([mean] * num_flows, 1).reshape(-1, 1, 1, 1
35
+ ) if mean is not None else 0
36
+
37
+ img0_warp = warp(img0, flow0)
38
+ img1_warp = warp(img1, flow1)
39
+ img_warps = mask * img0_warp + (1 - mask) * img1_warp + mean + img_res
40
+ img_warps = img_warps.reshape(b, num_flows, 3, h, w)
41
+ imgt_pred = img_warps.mean(1) + comb_block(img_warps.view(b, -1, h, w))
42
+ return imgt_pred
43
+
44
+ class MultiFlowDecoder(nn.Module):
45
+ def __init__(self, in_ch, skip_ch, num_flows=3):
46
+ super(MultiFlowDecoder, self).__init__()
47
+ self.num_flows = num_flows
48
+ self.convblock = nn.Sequential(
49
+ convrelu(in_ch*3+4, in_ch*3),
50
+ ResBlock(in_ch*3, skip_ch),
51
+ nn.ConvTranspose2d(in_ch*3, 8*num_flows, 4, 2, 1, bias=True)
52
+ )
53
+
54
+ def forward(self, ft_, f0, f1, flow0, flow1):
55
+ n = self.num_flows
56
+ f0_warp = warp(f0, flow0)
57
+ f1_warp = warp(f1, flow1)
58
+ out = self.convblock(torch.cat([ft_, f0_warp, f1_warp, flow0, flow1], 1))
59
+ delta_flow0, delta_flow1, mask, img_res = torch.split(out, [2*n, 2*n, n, 3*n], 1)
60
+ mask = torch.sigmoid(mask)
61
+
62
+ flow0 = delta_flow0 + 2.0 * resize(flow0, scale_factor=2.0
63
+ ).repeat(1, self.num_flows, 1, 1)
64
+ flow1 = delta_flow1 + 2.0 * resize(flow1, scale_factor=2.0
65
+ ).repeat(1, self.num_flows, 1, 1)
66
+
67
+ return flow0, flow1, mask, img_res
networks/blocks/raft.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ This code is partially borrowed from RAFT (https://github.com/princeton-vl/RAFT).
3
+ '''
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+
8
+ def resize(x, scale_factor):
9
+ return F.interpolate(x, scale_factor=scale_factor, mode="bilinear", align_corners=False)
10
+
11
+ def bilinear_sampler(img, coords, mask=False):
12
+ """ Wrapper for grid_sample, uses pixel coordinates """
13
+ H, W = img.shape[-2:]
14
+ xgrid, ygrid = coords.split([1,1], dim=-1)
15
+ xgrid = 2*xgrid/(W-1) - 1
16
+ ygrid = 2*ygrid/(H-1) - 1
17
+
18
+ grid = torch.cat([xgrid, ygrid], dim=-1)
19
+ img = F.grid_sample(img, grid, align_corners=True)
20
+
21
+ if mask:
22
+ mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1)
23
+ return img, mask.float()
24
+
25
+ return img
26
+
27
+ def coords_grid(batch, ht, wd, device):
28
+ coords = torch.meshgrid(torch.arange(ht, device=device),
29
+ torch.arange(wd, device=device),
30
+ indexing='ij')
31
+ coords = torch.stack(coords[::-1], dim=0).float()
32
+ return coords[None].repeat(batch, 1, 1, 1)
33
+
34
+ class SmallUpdateBlock(nn.Module):
35
+ def __init__(self, cdim, hidden_dim, flow_dim, corr_dim, fc_dim,
36
+ corr_levels=4, radius=3, scale_factor=None):
37
+ super(SmallUpdateBlock, self).__init__()
38
+ cor_planes = corr_levels * (2 * radius + 1) **2
39
+ self.scale_factor = scale_factor
40
+
41
+ self.convc1 = nn.Conv2d(2 * cor_planes, corr_dim, 1, padding=0)
42
+ self.convf1 = nn.Conv2d(4, flow_dim*2, 7, padding=3)
43
+ self.convf2 = nn.Conv2d(flow_dim*2, flow_dim, 3, padding=1)
44
+ self.conv = nn.Conv2d(corr_dim+flow_dim, fc_dim, 3, padding=1)
45
+
46
+ self.gru = nn.Sequential(
47
+ nn.Conv2d(fc_dim+4+cdim, hidden_dim, 3, padding=1),
48
+ nn.LeakyReLU(negative_slope=0.1, inplace=True),
49
+ nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1),
50
+ )
51
+
52
+ self.feat_head = nn.Sequential(
53
+ nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1),
54
+ nn.LeakyReLU(negative_slope=0.1, inplace=True),
55
+ nn.Conv2d(hidden_dim, cdim, 3, padding=1),
56
+ )
57
+
58
+ self.flow_head = nn.Sequential(
59
+ nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1),
60
+ nn.LeakyReLU(negative_slope=0.1, inplace=True),
61
+ nn.Conv2d(hidden_dim, 4, 3, padding=1),
62
+ )
63
+
64
+ self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
65
+
66
+ def forward(self, net, flow, corr):
67
+ net = resize(net, 1 / self.scale_factor
68
+ ) if self.scale_factor is not None else net
69
+ cor = self.lrelu(self.convc1(corr))
70
+ flo = self.lrelu(self.convf1(flow))
71
+ flo = self.lrelu(self.convf2(flo))
72
+ cor_flo = torch.cat([cor, flo], dim=1)
73
+ inp = self.lrelu(self.conv(cor_flo))
74
+ inp = torch.cat([inp, flow, net], dim=1)
75
+
76
+ out = self.gru(inp)
77
+ delta_net = self.feat_head(out)
78
+ delta_flow = self.flow_head(out)
79
+
80
+ if self.scale_factor is not None:
81
+ delta_net = resize(delta_net, scale_factor=self.scale_factor)
82
+ delta_flow = self.scale_factor * resize(delta_flow, scale_factor=self.scale_factor)
83
+
84
+ return delta_net, delta_flow
85
+
86
+ class BasicUpdateBlock(nn.Module):
87
+ def __init__(self, cdim, hidden_dim, flow_dim, corr_dim, corr_dim2,
88
+ fc_dim, corr_levels=4, radius=3, scale_factor=None, out_num=1):
89
+ super(BasicUpdateBlock, self).__init__()
90
+ cor_planes = corr_levels * (2 * radius + 1) **2
91
+
92
+ self.scale_factor = scale_factor
93
+ self.convc1 = nn.Conv2d(2 * cor_planes, corr_dim, 1, padding=0)
94
+ self.convc2 = nn.Conv2d(corr_dim, corr_dim2, 3, padding=1)
95
+ self.convf1 = nn.Conv2d(4, flow_dim*2, 7, padding=3)
96
+ self.convf2 = nn.Conv2d(flow_dim*2, flow_dim, 3, padding=1)
97
+ self.conv = nn.Conv2d(flow_dim+corr_dim2, fc_dim, 3, padding=1)
98
+
99
+ self.gru = nn.Sequential(
100
+ nn.Conv2d(fc_dim+4+cdim, hidden_dim, 3, padding=1),
101
+ nn.LeakyReLU(negative_slope=0.1, inplace=True),
102
+ nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1),
103
+ )
104
+
105
+ self.feat_head = nn.Sequential(
106
+ nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1),
107
+ nn.LeakyReLU(negative_slope=0.1, inplace=True),
108
+ nn.Conv2d(hidden_dim, cdim, 3, padding=1),
109
+ )
110
+
111
+ self.flow_head = nn.Sequential(
112
+ nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1),
113
+ nn.LeakyReLU(negative_slope=0.1, inplace=True),
114
+ nn.Conv2d(hidden_dim, 4*out_num, 3, padding=1),
115
+ )
116
+
117
+ self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
118
+
119
+ def forward(self, net, flow, corr):
120
+ net = resize(net, 1 / self.scale_factor
121
+ ) if self.scale_factor is not None else net
122
+ cor = self.lrelu(self.convc1(corr))
123
+ cor = self.lrelu(self.convc2(cor))
124
+ flo = self.lrelu(self.convf1(flow))
125
+ flo = self.lrelu(self.convf2(flo))
126
+ cor_flo = torch.cat([cor, flo], dim=1)
127
+ inp = self.lrelu(self.conv(cor_flo))
128
+ inp = torch.cat([inp, flow, net], dim=1)
129
+
130
+ out = self.gru(inp)
131
+ delta_net = self.feat_head(out)
132
+ delta_flow = self.flow_head(out)
133
+
134
+ if self.scale_factor is not None:
135
+ delta_net = resize(delta_net, scale_factor=self.scale_factor)
136
+ delta_flow = self.scale_factor * resize(delta_flow, scale_factor=self.scale_factor)
137
+ return delta_net, delta_flow
138
+
139
+ class BidirCorrBlock:
140
+ def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
141
+ self.num_levels = num_levels
142
+ self.radius = radius
143
+ self.corr_pyramid = []
144
+ self.corr_pyramid_T = []
145
+
146
+ corr = BidirCorrBlock.corr(fmap1, fmap2)
147
+ batch, h1, w1, dim, h2, w2 = corr.shape
148
+ corr_T = corr.clone().permute(0, 4, 5, 3, 1, 2)
149
+
150
+ corr = corr.reshape(batch*h1*w1, dim, h2, w2)
151
+ corr_T = corr_T.reshape(batch*h2*w2, dim, h1, w1)
152
+
153
+ self.corr_pyramid.append(corr)
154
+ self.corr_pyramid_T.append(corr_T)
155
+
156
+ for _ in range(self.num_levels-1):
157
+ corr = F.avg_pool2d(corr, 2, stride=2)
158
+ corr_T = F.avg_pool2d(corr_T, 2, stride=2)
159
+ self.corr_pyramid.append(corr)
160
+ self.corr_pyramid_T.append(corr_T)
161
+
162
+ def __call__(self, coords0, coords1):
163
+ r = self.radius
164
+ coords0 = coords0.permute(0, 2, 3, 1)
165
+ coords1 = coords1.permute(0, 2, 3, 1)
166
+ assert coords0.shape == coords1.shape, f"coords0 shape: [{coords0.shape}] is not equal to [{coords1.shape}]"
167
+ batch, h1, w1, _ = coords0.shape
168
+
169
+ out_pyramid = []
170
+ out_pyramid_T = []
171
+ for i in range(self.num_levels):
172
+ corr = self.corr_pyramid[i]
173
+ corr_T = self.corr_pyramid_T[i]
174
+
175
+ dx = torch.linspace(-r, r, 2*r+1, device=coords0.device)
176
+ dy = torch.linspace(-r, r, 2*r+1, device=coords0.device)
177
+ delta = torch.stack(torch.meshgrid(dy, dx, indexing='ij'), axis=-1)
178
+ delta_lvl = delta.view(1, 2*r+1, 2*r+1, 2)
179
+
180
+ centroid_lvl_0 = coords0.reshape(batch*h1*w1, 1, 1, 2) / 2**i
181
+ centroid_lvl_1 = coords1.reshape(batch*h1*w1, 1, 1, 2) / 2**i
182
+ coords_lvl_0 = centroid_lvl_0 + delta_lvl
183
+ coords_lvl_1 = centroid_lvl_1 + delta_lvl
184
+
185
+ corr = bilinear_sampler(corr, coords_lvl_0)
186
+ corr_T = bilinear_sampler(corr_T, coords_lvl_1)
187
+ corr = corr.view(batch, h1, w1, -1)
188
+ corr_T = corr_T.view(batch, h1, w1, -1)
189
+ out_pyramid.append(corr)
190
+ out_pyramid_T.append(corr_T)
191
+
192
+ out = torch.cat(out_pyramid, dim=-1)
193
+ out_T = torch.cat(out_pyramid_T, dim=-1)
194
+ return out.permute(0, 3, 1, 2).contiguous().float(), out_T.permute(0, 3, 1, 2).contiguous().float()
195
+
196
+ @staticmethod
197
+ def corr(fmap1, fmap2):
198
+ batch, dim, ht, wd = fmap1.shape
199
+ fmap1 = fmap1.view(batch, dim, ht*wd)
200
+ fmap2 = fmap2.view(batch, dim, ht*wd)
201
+
202
+ corr = torch.matmul(fmap1.transpose(1,2), fmap2)
203
+ corr = corr.view(batch, ht, wd, 1, ht, wd)
204
+ return corr / torch.sqrt(torch.tensor(dim).float())
205
+
206
+
utils.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ This code is partially borrowed from IFRNet (https://github.com/ltkong218/IFRNet).
3
+ '''
4
+ import re
5
+ import sys
6
+ import torch
7
+ import random
8
+ import numpy as np
9
+ from PIL import ImageFile
10
+ import torch.nn.functional as F
11
+ from imageio import imread, imwrite
12
+ ImageFile.LOAD_TRUNCATED_IMAGES = True
13
+
14
+ class InputPadder:
15
+ """ Pads images such that dimensions are divisible by divisor """
16
+ def __init__(self, dims, divisor=16):
17
+ self.ht, self.wd = dims[-2:]
18
+ pad_ht = (((self.ht // divisor) + 1) * divisor - self.ht) % divisor
19
+ pad_wd = (((self.wd // divisor) + 1) * divisor - self.wd) % divisor
20
+ self._pad = [pad_wd//2, pad_wd - pad_wd//2, pad_ht//2, pad_ht - pad_ht//2]
21
+
22
+ def pad(self, *inputs):
23
+ if len(inputs) == 1:
24
+ return F.pad(inputs[0], self._pad, mode='replicate')
25
+ else:
26
+ return [F.pad(x, self._pad, mode='replicate') for x in inputs]
27
+
28
+ def unpad(self, *inputs):
29
+ if len(inputs) == 1:
30
+ return self._unpad(inputs[0])
31
+ else:
32
+ return [self._unpad(x) for x in inputs]
33
+
34
+ def _unpad(self, x):
35
+ ht, wd = x.shape[-2:]
36
+ c = [self._pad[2], ht-self._pad[3], self._pad[0], wd-self._pad[1]]
37
+ return x[..., c[0]:c[1], c[2]:c[3]]
38
+
39
+ def img2tensor(img):
40
+ return torch.tensor(img).permute(2, 0, 1).unsqueeze(0) / 255.0
41
+
42
+ def tensor2img(img_t):
43
+ return (img_t * 255.).detach(
44
+ ).squeeze(0).permute(1, 2, 0).cpu().numpy(
45
+ ).clip(0, 255).astype(np.uint8)
46
+
47
+
48
+ def read(file):
49
+ if file.endswith('.float3'): return readFloat(file)
50
+ elif file.endswith('.flo'): return readFlow(file)
51
+ elif file.endswith('.ppm'): return readImage(file)
52
+ elif file.endswith('.pgm'): return readImage(file)
53
+ elif file.endswith('.png'): return readImage(file)
54
+ elif file.endswith('.jpg'): return readImage(file)
55
+ elif file.endswith('.pfm'): return readPFM(file)[0]
56
+ else: raise Exception('don\'t know how to read %s' % file)
57
+
58
+ def write(file, data):
59
+ if file.endswith('.float3'): return writeFloat(file, data)
60
+ elif file.endswith('.flo'): return writeFlow(file, data)
61
+ elif file.endswith('.ppm'): return writeImage(file, data)
62
+ elif file.endswith('.pgm'): return writeImage(file, data)
63
+ elif file.endswith('.png'): return writeImage(file, data)
64
+ elif file.endswith('.jpg'): return writeImage(file, data)
65
+ elif file.endswith('.pfm'): return writePFM(file, data)
66
+ else: raise Exception('don\'t know how to write %s' % file)
67
+
68
+ def readPFM(file):
69
+ file = open(file, 'rb')
70
+
71
+ color = None
72
+ width = None
73
+ height = None
74
+ scale = None
75
+ endian = None
76
+
77
+ header = file.readline().rstrip()
78
+ if header.decode("ascii") == 'PF':
79
+ color = True
80
+ elif header.decode("ascii") == 'Pf':
81
+ color = False
82
+ else:
83
+ raise Exception('Not a PFM file.')
84
+
85
+ dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode("ascii"))
86
+ if dim_match:
87
+ width, height = list(map(int, dim_match.groups()))
88
+ else:
89
+ raise Exception('Malformed PFM header.')
90
+
91
+ scale = float(file.readline().decode("ascii").rstrip())
92
+ if scale < 0:
93
+ endian = '<'
94
+ scale = -scale
95
+ else:
96
+ endian = '>'
97
+
98
+ data = np.fromfile(file, endian + 'f')
99
+ shape = (height, width, 3) if color else (height, width)
100
+
101
+ data = np.reshape(data, shape)
102
+ data = np.flipud(data)
103
+ return data, scale
104
+
105
+ def writePFM(file, image, scale=1):
106
+ file = open(file, 'wb')
107
+
108
+ color = None
109
+
110
+ if image.dtype.name != 'float32':
111
+ raise Exception('Image dtype must be float32.')
112
+
113
+ image = np.flipud(image)
114
+
115
+ if len(image.shape) == 3 and image.shape[2] == 3:
116
+ color = True
117
+ elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1:
118
+ color = False
119
+ else:
120
+ raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
121
+
122
+ file.write('PF\n' if color else 'Pf\n'.encode())
123
+ file.write('%d %d\n'.encode() % (image.shape[1], image.shape[0]))
124
+
125
+ endian = image.dtype.byteorder
126
+
127
+ if endian == '<' or endian == '=' and sys.byteorder == 'little':
128
+ scale = -scale
129
+
130
+ file.write('%f\n'.encode() % scale)
131
+
132
+ image.tofile(file)
133
+
134
+ def readFlow(name):
135
+ if name.endswith('.pfm') or name.endswith('.PFM'):
136
+ return readPFM(name)[0][:,:,0:2]
137
+
138
+ f = open(name, 'rb')
139
+
140
+ header = f.read(4)
141
+ if header.decode("utf-8") != 'PIEH':
142
+ raise Exception('Flow file header does not contain PIEH')
143
+
144
+ width = np.fromfile(f, np.int32, 1).squeeze()
145
+ height = np.fromfile(f, np.int32, 1).squeeze()
146
+
147
+ flow = np.fromfile(f, np.float32, width * height * 2).reshape((height, width, 2))
148
+
149
+ return flow.astype(np.float32)
150
+
151
+ def readImage(name):
152
+ if name.endswith('.pfm') or name.endswith('.PFM'):
153
+ data = readPFM(name)[0]
154
+ if len(data.shape)==3:
155
+ return data[:,:,0:3]
156
+ else:
157
+ return data
158
+ return imread(name)
159
+
160
+ def writeImage(name, data):
161
+ if name.endswith('.pfm') or name.endswith('.PFM'):
162
+ return writePFM(name, data, 1)
163
+ return imwrite(name, data)
164
+
165
+ def writeFlow(name, flow):
166
+ f = open(name, 'wb')
167
+ f.write('PIEH'.encode('utf-8'))
168
+ np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f)
169
+ flow = flow.astype(np.float32)
170
+ flow.tofile(f)
171
+
172
+ def readFloat(name):
173
+ f = open(name, 'rb')
174
+
175
+ if(f.readline().decode("utf-8")) != 'float\n':
176
+ raise Exception('float file %s did not contain <float> keyword' % name)
177
+
178
+ dim = int(f.readline())
179
+
180
+ dims = []
181
+ count = 1
182
+ for i in range(0, dim):
183
+ d = int(f.readline())
184
+ dims.append(d)
185
+ count *= d
186
+
187
+ dims = list(reversed(dims))
188
+
189
+ data = np.fromfile(f, np.float32, count).reshape(dims)
190
+ if dim > 2:
191
+ data = np.transpose(data, (2, 1, 0))
192
+ data = np.transpose(data, (1, 0, 2))
193
+
194
+ return data
195
+
196
+ def writeFloat(name, data):
197
+ f = open(name, 'wb')
198
+
199
+ dim=len(data.shape)
200
+ if dim>3:
201
+ raise Exception('bad float file dimension: %d' % dim)
202
+
203
+ f.write(('float\n').encode('ascii'))
204
+ f.write(('%d\n' % dim).encode('ascii'))
205
+
206
+ if dim == 1:
207
+ f.write(('%d\n' % data.shape[0]).encode('ascii'))
208
+ else:
209
+ f.write(('%d\n' % data.shape[1]).encode('ascii'))
210
+ f.write(('%d\n' % data.shape[0]).encode('ascii'))
211
+ for i in range(2, dim):
212
+ f.write(('%d\n' % data.shape[i]).encode('ascii'))
213
+
214
+ data = data.astype(np.float32)
215
+ if dim==2:
216
+ data.tofile(f)
217
+
218
+ else:
219
+ np.transpose(data, (2, 0, 1)).tofile(f)
220
+
221
+ def warp(img, flow):
222
+ B, _, H, W = flow.shape
223
+ xx = torch.linspace(-1.0, 1.0, W).view(1, 1, 1, W).expand(B, -1, H, -1)
224
+ yy = torch.linspace(-1.0, 1.0, H).view(1, 1, H, 1).expand(B, -1, -1, W)
225
+ grid = torch.cat([xx, yy], 1).to(img)
226
+ flow_ = torch.cat([flow[:, 0:1, :, :] / ((W - 1.0) / 2.0), flow[:, 1:2, :, :] / ((H - 1.0) / 2.0)], 1)
227
+ grid_ = (grid + flow_).permute(0, 2, 3, 1)
228
+ output = F.grid_sample(input=img, grid=grid_, mode='bilinear', padding_mode='border', align_corners=True)
229
+ return output