7767517 9168 9167 pnnx.Input pnnx_input_0 0 1 a.1 #a.1=(1,3,384,384)f32 pnnx.Input pnnx_input_1 0 1 b.1 #b.1=(1,3,384,384)f32 pnnx.Attribute pnnx_4 0 1 137 @pnnx_4=(1,3,1,1)f32 #137=(1,3,1,1)f32 pnnx.Expression pnnx_expr_17779 0 1 106 expr=[0,int(0),0,int(0)] pnnx.Expression pnnx_expr_17754 0 1 133 expr=[0,int(0),0,int(0)] F.pad F.pad_0 2 1 a.1 106 a0.1 mode=reflect $input=a.1 $pad=106 #a.1=(1,3,384,384)f32 #a0.1=(1,3,384,384)f32 aten::type_as pnnx_58 2 1 137 a0.1 139 #137=(1,3,1,1)f32 #a0.1=(1,3,384,384)f32 #139=(1,3,1,1)f32 pnnx.Expression pnnx_expr_17752 2 1 a0.1 139 143 expr=sub(@0,@1) #a0.1=(1,3,384,384)f32 #139=(1,3,1,1)f32 #143=(1,3,384,384)f32 F.pad F.pad_1 2 1 b.1 133 b0.1 mode=reflect $input=b.1 $pad=133 #b.1=(1,3,384,384)f32 #b0.1=(1,3,384,384)f32 pnnx.Expression pnnx_expr_17750 2 1 b0.1 139 149 expr=sub(@0,@1) #b0.1=(1,3,384,384)f32 #139=(1,3,1,1)f32 #149=(1,3,384,384)f32 nn.Conv2d conv_first 1 1 143 154 bias=True dilation=(1,1) groups=1 in_channels=3 kernel_size=(8,8) out_channels=192 padding=(0,0) padding_mode=zeros stride=(8,8) @bias=(192)f32 @weight=(192,3,8,8)f32 #143=(1,3,384,384)f32 #154=(1,192,48,48)f32 nn.Conv2d pnnx_unique_0 1 1 149 157 bias=True dilation=(1,1) groups=1 in_channels=3 kernel_size=(8,8) out_channels=192 padding=(0,0) padding_mode=zeros stride=(8,8) @bias=(192)f32 @weight=(192,3,8,8)f32 #149=(1,3,384,384)f32 #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_17585 0 1 2244 expr=-1 pnnx.Expression pnnx_expr_17584 0 1 2245 expr=2 pnnx.Expression pnnx_expr_17583 0 1 2246 expr=1 pnnx.Expression pnnx_expr_17582 0 1 20028 expr=2 torch.flatten torch.flatten_2183 3 1 154 2245 2244 2248 $input=154 $start_dim=2245 $end_dim=2244 #154=(1,192,48,48)f32 #2248=(1,192,2304)f32 torch.transpose torch.transpose_2960 3 1 2248 2246 20028 input.9 $input=2248 $dim0=2246 $dim1=20028 #2248=(1,192,2304)f32 #input.9=(1,2304,192)f32 nn.LayerNorm patch_embed_dfe.norm 1 1 input.9 2250 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.9=(1,2304,192)f32 #2250=(1,2304,192)f32 nn.Dropout pos_drop_dfe 1 1 2250 642 #2250=(1,2304,192)f32 #642=(1,2304,192)f32 nn.LayerNorm layers_dfe.0.residual_group.blocks.0.norm1 1 1 642 2294 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #642=(1,2304,192)f32 #2294=(1,2304,192)f32 pnnx.Expression pnnx_expr_17559 2 1 642 154 2295 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #642=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_974 2 1 2294 2295 x.3 $input=2294 $shape=2295 #2294=(1,2304,192)f32 #x.3=(1,48,48,192)f32 pnnx.Expression pnnx_expr_17538 1 1 x.3 2312 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.3=(1,48,48,192)f32 pnnx.Expression pnnx_expr_17533 0 1 2314 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_975 2 1 x.3 2312 x0.3 $input=x.3 $shape=2312 #x.3=(1,48,48,192)f32 #x0.3=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_17530 1 1 x.3 2317 expr=[-1,8,8,int(size(@0,3))] #x.3=(1,48,48,192)f32 torch.permute torch.permute_2528 2 1 x0.3 2314 2315 $input=x0.3 $dims=2314 #x0.3=(1,6,8,6,8,192)f32 #2315=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_2 1 1 2315 2316 memory_format=torch.contiguous_format $input=2315 #2315=(1,6,6,8,8,192)f32 #2316=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_17528 1 1 642 2319 expr=[-1,64,int(size(@0,2))] #642=(1,2304,192)f32 pnnx.Expression pnnx_expr_17518 0 1 2330 expr=-2 pnnx.Expression pnnx_expr_17517 0 1 2331 expr=-1 pnnx.Attribute layers_dfe.0.residual_group.blocks.0.attn 0 1 relative_position_bias_table.3 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.3=(225,6)f32 pnnx.Attribute pnnx_unique_1 0 1 relative_position_index.3 @relative_position_index=(64,64)i64 #relative_position_index.3=(64,64)i64 Tensor.view Tensor.view_976 2 1 2316 2317 x_windows.3 $input=2316 $shape=2317 #2316=(1,6,6,8,8,192)f32 #x_windows.3=(36,8,8,192)f32 Tensor.view Tensor.view_977 2 1 x_windows.3 2319 x1.3 $input=x_windows.3 $shape=2319 #x_windows.3=(36,8,8,192)f32 #x1.3=(36,64,192)f32 nn.Linear layers_dfe.0.residual_group.blocks.0.attn.qkv 1 1 x1.3 2351 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.3=(36,64,192)f32 #2351=(36,64,576)f32 pnnx.Expression pnnx_expr_17502 1 1 x1.3 2354 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.3=(36,64,192)f32 pnnx.Expression pnnx_expr_17497 0 1 2356 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_434 2 1 2351 2354 2355 $input=2351 $shape=2354 #2351=(36,64,576)f32 #2355=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_17496 0 1 20046 expr=0 pnnx.Expression pnnx_expr_17495 0 1 20047 expr=0 pnnx.Expression pnnx_expr_17494 0 1 20048 expr=0 pnnx.Expression pnnx_expr_17493 0 1 20049 expr=1 pnnx.Expression pnnx_expr_17492 0 1 20050 expr=0 pnnx.Expression pnnx_expr_17491 0 1 20051 expr=2 torch.permute torch.permute_2529 2 1 2355 2356 qkv0.3 $input=2355 $dims=2356 #2355=(36,64,3,6,32)f32 #qkv0.3=(3,36,6,64,32)f32 Tensor.select Tensor.select_650 3 1 qkv0.3 20046 20047 q.3 $input=qkv0.3 $dim=20046 $index=20047 #qkv0.3=(3,36,6,64,32)f32 #q.3=(36,6,64,32)f32 pnnx.Expression pnnx_expr_17490 1 1 q.3 q0.3 expr=mul(@0,1.767767e-01) #q.3=(36,6,64,32)f32 #q0.3=(36,6,64,32)f32 Tensor.select Tensor.select_651 3 1 qkv0.3 20048 20049 k.3 $input=qkv0.3 $dim=20048 $index=20049 #qkv0.3=(3,36,6,64,32)f32 #k.3=(36,6,64,32)f32 pnnx.Expression pnnx_expr_17488 0 1 2364 expr=[-1] Tensor.view Tensor.view_978 2 1 relative_position_index.3 2364 2365 $input=relative_position_index.3 $shape=2364 #relative_position_index.3=(64,64)i64 #2365=(4096)i64 pnnx.Expression pnnx_expr_17487 1 1 2365 2366 expr=[@0] #2365=(4096)i64 pnnx.Expression pnnx_expr_17484 0 1 2368 expr=[64,64,-1] Tensor.index Tensor.index_326 2 1 relative_position_bias_table.3 2366 2367 $input=relative_position_bias_table.3 $expr=2366 #relative_position_bias_table.3=(225,6)f32 #2367=(4096,6)f32 pnnx.Expression pnnx_expr_17480 0 1 2370 expr=[2,0,1] Tensor.view Tensor.view_979 2 1 2367 2368 relative_position_bias.3 $input=2367 $shape=2368 #2367=(4096,6)f32 #relative_position_bias.3=(64,64,6)f32 pnnx.Expression pnnx_expr_17479 0 1 20059 expr=0 torch.permute torch.permute_2530 2 1 relative_position_bias.3 2370 2371 $input=relative_position_bias.3 $dims=2370 #relative_position_bias.3=(64,64,6)f32 #2371=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_3 1 1 2371 relative_position_bias0.3 memory_format=torch.contiguous_format $input=2371 #2371=(6,64,64)f32 #relative_position_bias0.3=(6,64,64)f32 torch.transpose torch.transpose_2961 3 1 k.3 2330 2331 2362 $input=k.3 $dim0=2330 $dim1=2331 #k.3=(36,6,64,32)f32 #2362=(36,6,32,64)f32 torch.matmul torch.matmul_2204 2 1 q0.3 2362 attn.7 $input=q0.3 $other=2362 #q0.3=(36,6,64,32)f32 #2362=(36,6,32,64)f32 #attn.7=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3218 2 1 relative_position_bias0.3 20059 2373 $input=relative_position_bias0.3 $dim=20059 #relative_position_bias0.3=(6,64,64)f32 #2373=(1,6,64,64)f32 pnnx.Expression pnnx_expr_17477 2 1 attn.7 2373 input.11 expr=add(@0,@1) #attn.7=(36,6,64,64)f32 #2373=(1,6,64,64)f32 #input.11=(36,6,64,64)f32 nn.Softmax layers_dfe.0.residual_group.blocks.0.attn.softmax 1 1 input.11 2375 dim=-1 #input.11=(36,6,64,64)f32 #2375=(36,6,64,64)f32 nn.Dropout layers_dfe.0.residual_group.blocks.0.attn.attn_drop 1 1 2375 2376 #2375=(36,6,64,64)f32 #2376=(36,6,64,64)f32 Tensor.select Tensor.select_652 3 1 qkv0.3 20050 20051 v.3 $input=qkv0.3 $dim=20050 $index=20051 #qkv0.3=(3,36,6,64,32)f32 #v.3=(36,6,64,32)f32 pnnx.Expression pnnx_expr_17476 0 1 20061 expr=1 pnnx.Expression pnnx_expr_17475 0 1 20062 expr=2 torch.matmul torch.matmul_2205 2 1 2376 v.3 2377 $input=2376 $other=v.3 #2376=(36,6,64,64)f32 #v.3=(36,6,64,32)f32 #2377=(36,6,64,32)f32 pnnx.Expression pnnx_expr_17474 1 1 x1.3 2379 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.3=(36,64,192)f32 torch.transpose torch.transpose_2962 3 1 2377 20061 20062 2378 $input=2377 $dim0=20061 $dim1=20062 #2377=(36,6,64,32)f32 #2378=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_435 2 1 2378 2379 input0.5 $input=2378 $shape=2379 #2378=(36,64,6,32)f32 #input0.5=(36,64,192)f32 nn.Linear layers_dfe.0.residual_group.blocks.0.attn.proj 1 1 input0.5 2381 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.5=(36,64,192)f32 #2381=(36,64,192)f32 nn.Dropout layers_dfe.0.residual_group.blocks.0.attn.proj_drop 1 1 2381 2382 #2381=(36,64,192)f32 #2382=(36,64,192)f32 pnnx.Expression pnnx_expr_17470 1 1 642 2383 expr=[-1,8,8,int(size(@0,2))] #642=(1,2304,192)f32 pnnx.Expression pnnx_expr_17457 1 1 154 2389 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_17450 0 1 2391 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_980 2 1 2382 2383 windows.3 $input=2382 $shape=2383 #2382=(36,64,192)f32 #windows.3=(36,8,8,192)f32 Tensor.view Tensor.view_981 2 1 windows.3 2389 x2.3 $input=windows.3 $shape=2389 #windows.3=(36,8,8,192)f32 #x2.3=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_17447 1 1 154 2394 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2531 2 1 x2.3 2391 2392 $input=x2.3 $dims=2391 #x2.3=(1,6,6,8,8,192)f32 #2392=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_4 1 1 2392 2393 memory_format=torch.contiguous_format $input=2392 #2392=(1,6,8,6,8,192)f32 #2393=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_17444 2 1 642 154 2398 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #642=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_982 2 1 2393 2394 x3.3 $input=2393 $shape=2394 #2393=(1,6,8,6,8,192)f32 #x3.3=(1,48,48,192)f32 Tensor.view Tensor.view_983 2 1 x3.3 2398 x4.3 $input=x3.3 $shape=2398 #x3.3=(1,48,48,192)f32 #x4.3=(1,2304,192)f32 pnnx.Expression pnnx_expr_17442 2 1 642 x4.3 input.13 expr=add(@0,@1) #642=(1,2304,192)f32 #x4.3=(1,2304,192)f32 #input.13=(1,2304,192)f32 nn.LayerNorm layers_dfe.0.residual_group.blocks.0.norm2 1 1 input.13 2402 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.13=(1,2304,192)f32 #2402=(1,2304,192)f32 nn.Linear layers_dfe.0.residual_group.blocks.0.mlp.fc1 1 1 2402 2407 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #2402=(1,2304,192)f32 #2407=(1,2304,384)f32 nn.GELU layers_dfe.0.residual_group.blocks.0.mlp.act 1 1 2407 2408 #2407=(1,2304,384)f32 #2408=(1,2304,384)f32 nn.Dropout layers_dfe.0.residual_group.blocks.0.mlp.drop 1 1 2408 2409 #2408=(1,2304,384)f32 #2409=(1,2304,384)f32 nn.Linear layers_dfe.0.residual_group.blocks.0.mlp.fc2 1 1 2409 2410 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #2409=(1,2304,384)f32 #2410=(1,2304,192)f32 nn.Dropout pnnx_unique_2 1 1 2410 2411 #2410=(1,2304,192)f32 #2411=(1,2304,192)f32 pnnx.Expression pnnx_expr_17440 2 1 input.13 2411 2413 expr=add(@0,@1) #input.13=(1,2304,192)f32 #2411=(1,2304,192)f32 #2413=(1,2304,192)f32 pnnx.Attribute layers_dfe.0.residual_group.blocks.1 0 1 attn_mask.3 @attn_mask=(36,64,64)f32 #attn_mask.3=(36,64,64)f32 nn.LayerNorm layers_dfe.0.residual_group.blocks.1.norm1 1 1 2413 2442 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #2413=(1,2304,192)f32 #2442=(1,2304,192)f32 pnnx.Expression pnnx_expr_17417 2 1 2413 154 2443 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #2413=(1,2304,192)f32 #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_17415 0 1 2445 expr=[-4,-4] pnnx.Expression pnnx_expr_17413 0 1 2446 expr=[1,2] Tensor.view Tensor.view_984 2 1 2442 2443 x.5 $input=2442 $shape=2443 #2442=(1,2304,192)f32 #x.5=(1,48,48,192)f32 torch.roll torch.roll_2420 3 1 x.5 2445 2446 x0.5 $input=x.5 $shifts=2445 $dims=2446 #x.5=(1,48,48,192)f32 #x0.5=(1,48,48,192)f32 pnnx.Expression pnnx_expr_17391 1 1 x0.5 2463 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.5=(1,48,48,192)f32 pnnx.Expression pnnx_expr_17386 0 1 2465 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_985 2 1 x0.5 2463 x1.5 $input=x0.5 $shape=2463 #x0.5=(1,48,48,192)f32 #x1.5=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_17383 1 1 x0.5 2468 expr=[-1,8,8,int(size(@0,3))] #x0.5=(1,48,48,192)f32 torch.permute torch.permute_2532 2 1 x1.5 2465 2466 $input=x1.5 $dims=2465 #x1.5=(1,6,8,6,8,192)f32 #2466=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_5 1 1 2466 2467 memory_format=torch.contiguous_format $input=2466 #2466=(1,6,6,8,8,192)f32 #2467=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_17381 1 1 2413 2470 expr=[-1,64,int(size(@0,2))] #2413=(1,2304,192)f32 pnnx.Expression pnnx_expr_17371 0 1 2481 expr=-2 pnnx.Expression pnnx_expr_17370 0 1 2482 expr=-1 pnnx.Attribute layers_dfe.0.residual_group.blocks.1.attn 0 1 relative_position_bias_table.5 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.5=(225,6)f32 pnnx.Attribute pnnx_unique_3 0 1 relative_position_index.5 @relative_position_index=(64,64)i64 #relative_position_index.5=(64,64)i64 Tensor.view Tensor.view_986 2 1 2467 2468 x_windows.5 $input=2467 $shape=2468 #2467=(1,6,6,8,8,192)f32 #x_windows.5=(36,8,8,192)f32 Tensor.view Tensor.view_987 2 1 x_windows.5 2470 x2.5 $input=x_windows.5 $shape=2470 #x_windows.5=(36,8,8,192)f32 #x2.5=(36,64,192)f32 nn.Linear layers_dfe.0.residual_group.blocks.1.attn.qkv 1 1 x2.5 2506 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.5=(36,64,192)f32 #2506=(36,64,576)f32 pnnx.Expression pnnx_expr_17351 1 1 x2.5 2509 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.5=(36,64,192)f32 pnnx.Expression pnnx_expr_17346 0 1 2511 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_436 2 1 2506 2509 2510 $input=2506 $shape=2509 #2506=(36,64,576)f32 #2510=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_17345 0 1 20105 expr=0 pnnx.Expression pnnx_expr_17344 0 1 20106 expr=0 pnnx.Expression pnnx_expr_17343 0 1 20107 expr=0 pnnx.Expression pnnx_expr_17342 0 1 20108 expr=1 pnnx.Expression pnnx_expr_17341 0 1 20109 expr=0 pnnx.Expression pnnx_expr_17340 0 1 20110 expr=2 torch.permute torch.permute_2533 2 1 2510 2511 qkv0.5 $input=2510 $dims=2511 #2510=(36,64,3,6,32)f32 #qkv0.5=(3,36,6,64,32)f32 Tensor.select Tensor.select_653 3 1 qkv0.5 20105 20106 q.5 $input=qkv0.5 $dim=20105 $index=20106 #qkv0.5=(3,36,6,64,32)f32 #q.5=(36,6,64,32)f32 pnnx.Expression pnnx_expr_17339 1 1 q.5 q0.5 expr=mul(@0,1.767767e-01) #q.5=(36,6,64,32)f32 #q0.5=(36,6,64,32)f32 Tensor.select Tensor.select_654 3 1 qkv0.5 20107 20108 k.5 $input=qkv0.5 $dim=20107 $index=20108 #qkv0.5=(3,36,6,64,32)f32 #k.5=(36,6,64,32)f32 pnnx.Expression pnnx_expr_17337 0 1 2519 expr=[-1] Tensor.view Tensor.view_988 2 1 relative_position_index.5 2519 2520 $input=relative_position_index.5 $shape=2519 #relative_position_index.5=(64,64)i64 #2520=(4096)i64 pnnx.Expression pnnx_expr_17336 1 1 2520 2521 expr=[@0] #2520=(4096)i64 pnnx.Expression pnnx_expr_17333 0 1 2523 expr=[64,64,-1] Tensor.index Tensor.index_327 2 1 relative_position_bias_table.5 2521 2522 $input=relative_position_bias_table.5 $expr=2521 #relative_position_bias_table.5=(225,6)f32 #2522=(4096,6)f32 pnnx.Expression pnnx_expr_17329 0 1 2525 expr=[2,0,1] Tensor.view Tensor.view_989 2 1 2522 2523 relative_position_bias.5 $input=2522 $shape=2523 #2522=(4096,6)f32 #relative_position_bias.5=(64,64,6)f32 pnnx.Expression pnnx_expr_17328 0 1 20118 expr=0 torch.permute torch.permute_2534 2 1 relative_position_bias.5 2525 2526 $input=relative_position_bias.5 $dims=2525 #relative_position_bias.5=(64,64,6)f32 #2526=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_6 1 1 2526 relative_position_bias0.5 memory_format=torch.contiguous_format $input=2526 #2526=(6,64,64)f32 #relative_position_bias0.5=(6,64,64)f32 torch.transpose torch.transpose_2963 3 1 k.5 2481 2482 2517 $input=k.5 $dim0=2481 $dim1=2482 #k.5=(36,6,64,32)f32 #2517=(36,6,32,64)f32 torch.matmul torch.matmul_2206 2 1 q0.5 2517 attn.11 $input=q0.5 $other=2517 #q0.5=(36,6,64,32)f32 #2517=(36,6,32,64)f32 #attn.11=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3219 2 1 relative_position_bias0.5 20118 2528 $input=relative_position_bias0.5 $dim=20118 #relative_position_bias0.5=(6,64,64)f32 #2528=(1,6,64,64)f32 pnnx.Expression pnnx_expr_17326 2 1 attn.11 2528 attn0.3 expr=add(@0,@1) #attn.11=(36,6,64,64)f32 #2528=(1,6,64,64)f32 #attn0.3=(36,6,64,64)f32 pnnx.Expression pnnx_expr_17317 2 1 x2.5 attn_mask.3 2535 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.5=(36,64,192)f32 #attn_mask.3=(36,64,64)f32 pnnx.Expression pnnx_expr_17316 0 1 20123 expr=1 pnnx.Expression pnnx_expr_17315 0 1 20124 expr=0 Tensor.view Tensor.view_990 2 1 attn0.3 2535 2536 $input=attn0.3 $shape=2535 #attn0.3=(36,6,64,64)f32 #2536=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3220 2 1 attn_mask.3 20123 2537 $input=attn_mask.3 $dim=20123 #attn_mask.3=(36,64,64)f32 #2537=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3221 2 1 2537 20124 2538 $input=2537 $dim=20124 #2537=(36,1,64,64)f32 #2538=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_17313 2 1 2536 2538 attn1.3 expr=add(@0,@1) #2536=(1,36,6,64,64)f32 #2538=(1,36,1,64,64)f32 #attn1.3=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_17310 1 1 x2.5 2540 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.5=(36,64,192)f32 Tensor.view Tensor.view_991 2 1 attn1.3 2540 input.15 $input=attn1.3 $shape=2540 #attn1.3=(1,36,6,64,64)f32 #input.15=(36,6,64,64)f32 nn.Softmax layers_dfe.0.residual_group.blocks.1.attn.softmax 1 1 input.15 2542 dim=-1 #input.15=(36,6,64,64)f32 #2542=(36,6,64,64)f32 nn.Dropout layers_dfe.0.residual_group.blocks.1.attn.attn_drop 1 1 2542 2543 #2542=(36,6,64,64)f32 #2543=(36,6,64,64)f32 Tensor.select Tensor.select_655 3 1 qkv0.5 20109 20110 v.5 $input=qkv0.5 $dim=20109 $index=20110 #qkv0.5=(3,36,6,64,32)f32 #v.5=(36,6,64,32)f32 pnnx.Expression pnnx_expr_17309 0 1 20128 expr=1 pnnx.Expression pnnx_expr_17308 0 1 20129 expr=2 torch.matmul torch.matmul_2207 2 1 2543 v.5 2544 $input=2543 $other=v.5 #2543=(36,6,64,64)f32 #v.5=(36,6,64,32)f32 #2544=(36,6,64,32)f32 pnnx.Expression pnnx_expr_17307 1 1 x2.5 2546 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.5=(36,64,192)f32 torch.transpose torch.transpose_2964 3 1 2544 20128 20129 2545 $input=2544 $dim0=20128 $dim1=20129 #2544=(36,6,64,32)f32 #2545=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_437 2 1 2545 2546 input0.7 $input=2545 $shape=2546 #2545=(36,64,6,32)f32 #input0.7=(36,64,192)f32 nn.Linear layers_dfe.0.residual_group.blocks.1.attn.proj 1 1 input0.7 2548 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.7=(36,64,192)f32 #2548=(36,64,192)f32 nn.Dropout layers_dfe.0.residual_group.blocks.1.attn.proj_drop 1 1 2548 2549 #2548=(36,64,192)f32 #2549=(36,64,192)f32 pnnx.Expression pnnx_expr_17303 1 1 2413 2550 expr=[-1,8,8,int(size(@0,2))] #2413=(1,2304,192)f32 pnnx.Expression pnnx_expr_17290 1 1 154 2556 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_17283 0 1 2558 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_992 2 1 2549 2550 windows.5 $input=2549 $shape=2550 #2549=(36,64,192)f32 #windows.5=(36,8,8,192)f32 Tensor.view Tensor.view_993 2 1 windows.5 2556 x3.5 $input=windows.5 $shape=2556 #windows.5=(36,8,8,192)f32 #x3.5=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_17280 1 1 154 2561 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2535 2 1 x3.5 2558 2559 $input=x3.5 $dims=2558 #x3.5=(1,6,6,8,8,192)f32 #2559=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_7 1 1 2559 2560 memory_format=torch.contiguous_format $input=2559 #2559=(1,6,8,6,8,192)f32 #2560=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_17277 0 1 2563 expr=[4,4] pnnx.Expression pnnx_expr_17274 0 1 2564 expr=[1,2] Tensor.view Tensor.view_994 2 1 2560 2561 shifted_x.3 $input=2560 $shape=2561 #2560=(1,6,8,6,8,192)f32 #shifted_x.3=(1,48,48,192)f32 pnnx.Expression pnnx_expr_17271 2 1 2413 154 2568 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #2413=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.roll torch.roll_2421 3 1 shifted_x.3 2563 2564 x4.5 $input=shifted_x.3 $shifts=2563 $dims=2564 #shifted_x.3=(1,48,48,192)f32 #x4.5=(1,48,48,192)f32 Tensor.view Tensor.view_995 2 1 x4.5 2568 x5.3 $input=x4.5 $shape=2568 #x4.5=(1,48,48,192)f32 #x5.3=(1,2304,192)f32 pnnx.Expression pnnx_expr_17269 2 1 2413 x5.3 input.17 expr=add(@0,@1) #2413=(1,2304,192)f32 #x5.3=(1,2304,192)f32 #input.17=(1,2304,192)f32 nn.LayerNorm layers_dfe.0.residual_group.blocks.1.norm2 1 1 input.17 2572 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.17=(1,2304,192)f32 #2572=(1,2304,192)f32 nn.Linear layers_dfe.0.residual_group.blocks.1.mlp.fc1 1 1 2572 2577 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #2572=(1,2304,192)f32 #2577=(1,2304,384)f32 nn.GELU layers_dfe.0.residual_group.blocks.1.mlp.act 1 1 2577 2578 #2577=(1,2304,384)f32 #2578=(1,2304,384)f32 nn.Dropout layers_dfe.0.residual_group.blocks.1.mlp.drop 1 1 2578 2579 #2578=(1,2304,384)f32 #2579=(1,2304,384)f32 nn.Linear layers_dfe.0.residual_group.blocks.1.mlp.fc2 1 1 2579 2580 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #2579=(1,2304,384)f32 #2580=(1,2304,192)f32 nn.Dropout pnnx_unique_4 1 1 2580 2581 #2580=(1,2304,192)f32 #2581=(1,2304,192)f32 pnnx.Expression pnnx_expr_17267 2 1 input.17 2581 2583 expr=add(@0,@1) #input.17=(1,2304,192)f32 #2581=(1,2304,192)f32 #2583=(1,2304,192)f32 nn.LayerNorm layers_dfe.0.residual_group.blocks.2.norm1 1 1 2583 2610 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #2583=(1,2304,192)f32 #2610=(1,2304,192)f32 pnnx.Expression pnnx_expr_17245 2 1 2583 154 2611 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #2583=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_996 2 1 2610 2611 x.7 $input=2610 $shape=2611 #2610=(1,2304,192)f32 #x.7=(1,48,48,192)f32 pnnx.Expression pnnx_expr_17224 1 1 x.7 2628 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.7=(1,48,48,192)f32 pnnx.Expression pnnx_expr_17219 0 1 2630 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_997 2 1 x.7 2628 x0.7 $input=x.7 $shape=2628 #x.7=(1,48,48,192)f32 #x0.7=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_17216 1 1 x.7 2633 expr=[-1,8,8,int(size(@0,3))] #x.7=(1,48,48,192)f32 torch.permute torch.permute_2536 2 1 x0.7 2630 2631 $input=x0.7 $dims=2630 #x0.7=(1,6,8,6,8,192)f32 #2631=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_8 1 1 2631 2632 memory_format=torch.contiguous_format $input=2631 #2631=(1,6,6,8,8,192)f32 #2632=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_17214 1 1 2583 2635 expr=[-1,64,int(size(@0,2))] #2583=(1,2304,192)f32 pnnx.Expression pnnx_expr_17204 0 1 2646 expr=-2 pnnx.Expression pnnx_expr_17203 0 1 2647 expr=-1 pnnx.Attribute layers_dfe.0.residual_group.blocks.2.attn 0 1 relative_position_bias_table.7 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.7=(225,6)f32 pnnx.Attribute pnnx_unique_5 0 1 relative_position_index.7 @relative_position_index=(64,64)i64 #relative_position_index.7=(64,64)i64 Tensor.view Tensor.view_998 2 1 2632 2633 x_windows.7 $input=2632 $shape=2633 #2632=(1,6,6,8,8,192)f32 #x_windows.7=(36,8,8,192)f32 Tensor.view Tensor.view_999 2 1 x_windows.7 2635 x1.7 $input=x_windows.7 $shape=2635 #x_windows.7=(36,8,8,192)f32 #x1.7=(36,64,192)f32 nn.Linear layers_dfe.0.residual_group.blocks.2.attn.qkv 1 1 x1.7 2667 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.7=(36,64,192)f32 #2667=(36,64,576)f32 pnnx.Expression pnnx_expr_17188 1 1 x1.7 2670 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.7=(36,64,192)f32 pnnx.Expression pnnx_expr_17183 0 1 2672 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_438 2 1 2667 2670 2671 $input=2667 $shape=2670 #2667=(36,64,576)f32 #2671=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_17182 0 1 20173 expr=0 pnnx.Expression pnnx_expr_17181 0 1 20174 expr=0 pnnx.Expression pnnx_expr_17180 0 1 20175 expr=0 pnnx.Expression pnnx_expr_17179 0 1 20176 expr=1 pnnx.Expression pnnx_expr_17178 0 1 20177 expr=0 pnnx.Expression pnnx_expr_17177 0 1 20178 expr=2 torch.permute torch.permute_2537 2 1 2671 2672 qkv0.7 $input=2671 $dims=2672 #2671=(36,64,3,6,32)f32 #qkv0.7=(3,36,6,64,32)f32 Tensor.select Tensor.select_656 3 1 qkv0.7 20173 20174 q.7 $input=qkv0.7 $dim=20173 $index=20174 #qkv0.7=(3,36,6,64,32)f32 #q.7=(36,6,64,32)f32 pnnx.Expression pnnx_expr_17176 1 1 q.7 q0.7 expr=mul(@0,1.767767e-01) #q.7=(36,6,64,32)f32 #q0.7=(36,6,64,32)f32 Tensor.select Tensor.select_657 3 1 qkv0.7 20175 20176 k.7 $input=qkv0.7 $dim=20175 $index=20176 #qkv0.7=(3,36,6,64,32)f32 #k.7=(36,6,64,32)f32 pnnx.Expression pnnx_expr_17174 0 1 2680 expr=[-1] Tensor.view Tensor.view_1000 2 1 relative_position_index.7 2680 2681 $input=relative_position_index.7 $shape=2680 #relative_position_index.7=(64,64)i64 #2681=(4096)i64 pnnx.Expression pnnx_expr_17173 1 1 2681 2682 expr=[@0] #2681=(4096)i64 pnnx.Expression pnnx_expr_17170 0 1 2684 expr=[64,64,-1] Tensor.index Tensor.index_328 2 1 relative_position_bias_table.7 2682 2683 $input=relative_position_bias_table.7 $expr=2682 #relative_position_bias_table.7=(225,6)f32 #2683=(4096,6)f32 pnnx.Expression pnnx_expr_17166 0 1 2686 expr=[2,0,1] Tensor.view Tensor.view_1001 2 1 2683 2684 relative_position_bias.7 $input=2683 $shape=2684 #2683=(4096,6)f32 #relative_position_bias.7=(64,64,6)f32 pnnx.Expression pnnx_expr_17165 0 1 20186 expr=0 torch.permute torch.permute_2538 2 1 relative_position_bias.7 2686 2687 $input=relative_position_bias.7 $dims=2686 #relative_position_bias.7=(64,64,6)f32 #2687=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_9 1 1 2687 relative_position_bias0.7 memory_format=torch.contiguous_format $input=2687 #2687=(6,64,64)f32 #relative_position_bias0.7=(6,64,64)f32 torch.transpose torch.transpose_2965 3 1 k.7 2646 2647 2678 $input=k.7 $dim0=2646 $dim1=2647 #k.7=(36,6,64,32)f32 #2678=(36,6,32,64)f32 torch.matmul torch.matmul_2208 2 1 q0.7 2678 attn.15 $input=q0.7 $other=2678 #q0.7=(36,6,64,32)f32 #2678=(36,6,32,64)f32 #attn.15=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3222 2 1 relative_position_bias0.7 20186 2689 $input=relative_position_bias0.7 $dim=20186 #relative_position_bias0.7=(6,64,64)f32 #2689=(1,6,64,64)f32 pnnx.Expression pnnx_expr_17163 2 1 attn.15 2689 input.19 expr=add(@0,@1) #attn.15=(36,6,64,64)f32 #2689=(1,6,64,64)f32 #input.19=(36,6,64,64)f32 nn.Softmax layers_dfe.0.residual_group.blocks.2.attn.softmax 1 1 input.19 2691 dim=-1 #input.19=(36,6,64,64)f32 #2691=(36,6,64,64)f32 nn.Dropout layers_dfe.0.residual_group.blocks.2.attn.attn_drop 1 1 2691 2692 #2691=(36,6,64,64)f32 #2692=(36,6,64,64)f32 Tensor.select Tensor.select_658 3 1 qkv0.7 20177 20178 v.7 $input=qkv0.7 $dim=20177 $index=20178 #qkv0.7=(3,36,6,64,32)f32 #v.7=(36,6,64,32)f32 pnnx.Expression pnnx_expr_17162 0 1 20188 expr=1 pnnx.Expression pnnx_expr_17161 0 1 20189 expr=2 torch.matmul torch.matmul_2209 2 1 2692 v.7 2693 $input=2692 $other=v.7 #2692=(36,6,64,64)f32 #v.7=(36,6,64,32)f32 #2693=(36,6,64,32)f32 pnnx.Expression pnnx_expr_17160 1 1 x1.7 2695 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.7=(36,64,192)f32 torch.transpose torch.transpose_2966 3 1 2693 20188 20189 2694 $input=2693 $dim0=20188 $dim1=20189 #2693=(36,6,64,32)f32 #2694=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_439 2 1 2694 2695 input0.9 $input=2694 $shape=2695 #2694=(36,64,6,32)f32 #input0.9=(36,64,192)f32 nn.Linear layers_dfe.0.residual_group.blocks.2.attn.proj 1 1 input0.9 2697 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.9=(36,64,192)f32 #2697=(36,64,192)f32 nn.Dropout layers_dfe.0.residual_group.blocks.2.attn.proj_drop 1 1 2697 2698 #2697=(36,64,192)f32 #2698=(36,64,192)f32 pnnx.Expression pnnx_expr_17156 1 1 2583 2699 expr=[-1,8,8,int(size(@0,2))] #2583=(1,2304,192)f32 pnnx.Expression pnnx_expr_17143 1 1 154 2705 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_17136 0 1 2707 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1002 2 1 2698 2699 windows.7 $input=2698 $shape=2699 #2698=(36,64,192)f32 #windows.7=(36,8,8,192)f32 Tensor.view Tensor.view_1003 2 1 windows.7 2705 x2.7 $input=windows.7 $shape=2705 #windows.7=(36,8,8,192)f32 #x2.7=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_17133 1 1 154 2710 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2539 2 1 x2.7 2707 2708 $input=x2.7 $dims=2707 #x2.7=(1,6,6,8,8,192)f32 #2708=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_10 1 1 2708 2709 memory_format=torch.contiguous_format $input=2708 #2708=(1,6,8,6,8,192)f32 #2709=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_17130 2 1 2583 154 2714 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #2583=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1004 2 1 2709 2710 x3.7 $input=2709 $shape=2710 #2709=(1,6,8,6,8,192)f32 #x3.7=(1,48,48,192)f32 Tensor.view Tensor.view_1005 2 1 x3.7 2714 x4.7 $input=x3.7 $shape=2714 #x3.7=(1,48,48,192)f32 #x4.7=(1,2304,192)f32 pnnx.Expression pnnx_expr_17128 2 1 2583 x4.7 input.21 expr=add(@0,@1) #2583=(1,2304,192)f32 #x4.7=(1,2304,192)f32 #input.21=(1,2304,192)f32 nn.LayerNorm layers_dfe.0.residual_group.blocks.2.norm2 1 1 input.21 2718 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.21=(1,2304,192)f32 #2718=(1,2304,192)f32 nn.Linear layers_dfe.0.residual_group.blocks.2.mlp.fc1 1 1 2718 2723 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #2718=(1,2304,192)f32 #2723=(1,2304,384)f32 nn.GELU layers_dfe.0.residual_group.blocks.2.mlp.act 1 1 2723 2724 #2723=(1,2304,384)f32 #2724=(1,2304,384)f32 nn.Dropout layers_dfe.0.residual_group.blocks.2.mlp.drop 1 1 2724 2725 #2724=(1,2304,384)f32 #2725=(1,2304,384)f32 nn.Linear layers_dfe.0.residual_group.blocks.2.mlp.fc2 1 1 2725 2726 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #2725=(1,2304,384)f32 #2726=(1,2304,192)f32 nn.Dropout pnnx_unique_6 1 1 2726 2727 #2726=(1,2304,192)f32 #2727=(1,2304,192)f32 pnnx.Expression pnnx_expr_17126 2 1 input.21 2727 2729 expr=add(@0,@1) #input.21=(1,2304,192)f32 #2727=(1,2304,192)f32 #2729=(1,2304,192)f32 pnnx.Attribute layers_dfe.0.residual_group.blocks.3 0 1 attn_mask.5 @attn_mask=(36,64,64)f32 #attn_mask.5=(36,64,64)f32 nn.LayerNorm layers_dfe.0.residual_group.blocks.3.norm1 1 1 2729 2758 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #2729=(1,2304,192)f32 #2758=(1,2304,192)f32 pnnx.Expression pnnx_expr_17103 2 1 2729 154 2759 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #2729=(1,2304,192)f32 #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_17101 0 1 2761 expr=[-4,-4] pnnx.Expression pnnx_expr_17099 0 1 2762 expr=[1,2] Tensor.view Tensor.view_1006 2 1 2758 2759 x.9 $input=2758 $shape=2759 #2758=(1,2304,192)f32 #x.9=(1,48,48,192)f32 torch.roll torch.roll_2422 3 1 x.9 2761 2762 x0.9 $input=x.9 $shifts=2761 $dims=2762 #x.9=(1,48,48,192)f32 #x0.9=(1,48,48,192)f32 pnnx.Expression pnnx_expr_17077 1 1 x0.9 2779 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.9=(1,48,48,192)f32 pnnx.Expression pnnx_expr_17072 0 1 2781 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1007 2 1 x0.9 2779 x1.9 $input=x0.9 $shape=2779 #x0.9=(1,48,48,192)f32 #x1.9=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_17069 1 1 x0.9 2784 expr=[-1,8,8,int(size(@0,3))] #x0.9=(1,48,48,192)f32 torch.permute torch.permute_2540 2 1 x1.9 2781 2782 $input=x1.9 $dims=2781 #x1.9=(1,6,8,6,8,192)f32 #2782=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_11 1 1 2782 2783 memory_format=torch.contiguous_format $input=2782 #2782=(1,6,6,8,8,192)f32 #2783=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_17067 1 1 2729 2786 expr=[-1,64,int(size(@0,2))] #2729=(1,2304,192)f32 pnnx.Expression pnnx_expr_17057 0 1 2797 expr=-2 pnnx.Expression pnnx_expr_17056 0 1 2798 expr=-1 pnnx.Attribute layers_dfe.0.residual_group.blocks.3.attn 0 1 relative_position_bias_table.9 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.9=(225,6)f32 pnnx.Attribute pnnx_unique_7 0 1 relative_position_index.9 @relative_position_index=(64,64)i64 #relative_position_index.9=(64,64)i64 Tensor.view Tensor.view_1008 2 1 2783 2784 x_windows.9 $input=2783 $shape=2784 #2783=(1,6,6,8,8,192)f32 #x_windows.9=(36,8,8,192)f32 Tensor.view Tensor.view_1009 2 1 x_windows.9 2786 x2.9 $input=x_windows.9 $shape=2786 #x_windows.9=(36,8,8,192)f32 #x2.9=(36,64,192)f32 nn.Linear layers_dfe.0.residual_group.blocks.3.attn.qkv 1 1 x2.9 2822 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.9=(36,64,192)f32 #2822=(36,64,576)f32 pnnx.Expression pnnx_expr_17037 1 1 x2.9 2825 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.9=(36,64,192)f32 pnnx.Expression pnnx_expr_17032 0 1 2827 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_440 2 1 2822 2825 2826 $input=2822 $shape=2825 #2822=(36,64,576)f32 #2826=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_17031 0 1 20232 expr=0 pnnx.Expression pnnx_expr_17030 0 1 20233 expr=0 pnnx.Expression pnnx_expr_17029 0 1 20234 expr=0 pnnx.Expression pnnx_expr_17028 0 1 20235 expr=1 pnnx.Expression pnnx_expr_17027 0 1 20236 expr=0 pnnx.Expression pnnx_expr_17026 0 1 20237 expr=2 torch.permute torch.permute_2541 2 1 2826 2827 qkv0.9 $input=2826 $dims=2827 #2826=(36,64,3,6,32)f32 #qkv0.9=(3,36,6,64,32)f32 Tensor.select Tensor.select_659 3 1 qkv0.9 20232 20233 q.9 $input=qkv0.9 $dim=20232 $index=20233 #qkv0.9=(3,36,6,64,32)f32 #q.9=(36,6,64,32)f32 pnnx.Expression pnnx_expr_17025 1 1 q.9 q0.9 expr=mul(@0,1.767767e-01) #q.9=(36,6,64,32)f32 #q0.9=(36,6,64,32)f32 Tensor.select Tensor.select_660 3 1 qkv0.9 20234 20235 k.9 $input=qkv0.9 $dim=20234 $index=20235 #qkv0.9=(3,36,6,64,32)f32 #k.9=(36,6,64,32)f32 pnnx.Expression pnnx_expr_17023 0 1 2835 expr=[-1] Tensor.view Tensor.view_1010 2 1 relative_position_index.9 2835 2836 $input=relative_position_index.9 $shape=2835 #relative_position_index.9=(64,64)i64 #2836=(4096)i64 pnnx.Expression pnnx_expr_17022 1 1 2836 2837 expr=[@0] #2836=(4096)i64 pnnx.Expression pnnx_expr_17019 0 1 2839 expr=[64,64,-1] Tensor.index Tensor.index_329 2 1 relative_position_bias_table.9 2837 2838 $input=relative_position_bias_table.9 $expr=2837 #relative_position_bias_table.9=(225,6)f32 #2838=(4096,6)f32 pnnx.Expression pnnx_expr_17015 0 1 2841 expr=[2,0,1] Tensor.view Tensor.view_1011 2 1 2838 2839 relative_position_bias.9 $input=2838 $shape=2839 #2838=(4096,6)f32 #relative_position_bias.9=(64,64,6)f32 pnnx.Expression pnnx_expr_17014 0 1 20245 expr=0 torch.permute torch.permute_2542 2 1 relative_position_bias.9 2841 2842 $input=relative_position_bias.9 $dims=2841 #relative_position_bias.9=(64,64,6)f32 #2842=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_12 1 1 2842 relative_position_bias0.9 memory_format=torch.contiguous_format $input=2842 #2842=(6,64,64)f32 #relative_position_bias0.9=(6,64,64)f32 torch.transpose torch.transpose_2967 3 1 k.9 2797 2798 2833 $input=k.9 $dim0=2797 $dim1=2798 #k.9=(36,6,64,32)f32 #2833=(36,6,32,64)f32 torch.matmul torch.matmul_2210 2 1 q0.9 2833 attn.19 $input=q0.9 $other=2833 #q0.9=(36,6,64,32)f32 #2833=(36,6,32,64)f32 #attn.19=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3223 2 1 relative_position_bias0.9 20245 2844 $input=relative_position_bias0.9 $dim=20245 #relative_position_bias0.9=(6,64,64)f32 #2844=(1,6,64,64)f32 pnnx.Expression pnnx_expr_17012 2 1 attn.19 2844 attn0.5 expr=add(@0,@1) #attn.19=(36,6,64,64)f32 #2844=(1,6,64,64)f32 #attn0.5=(36,6,64,64)f32 pnnx.Expression pnnx_expr_17003 2 1 x2.9 attn_mask.5 2851 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.9=(36,64,192)f32 #attn_mask.5=(36,64,64)f32 pnnx.Expression pnnx_expr_17002 0 1 20250 expr=1 pnnx.Expression pnnx_expr_17001 0 1 20251 expr=0 Tensor.view Tensor.view_1012 2 1 attn0.5 2851 2852 $input=attn0.5 $shape=2851 #attn0.5=(36,6,64,64)f32 #2852=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3224 2 1 attn_mask.5 20250 2853 $input=attn_mask.5 $dim=20250 #attn_mask.5=(36,64,64)f32 #2853=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3225 2 1 2853 20251 2854 $input=2853 $dim=20251 #2853=(36,1,64,64)f32 #2854=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_16999 2 1 2852 2854 attn1.5 expr=add(@0,@1) #2852=(1,36,6,64,64)f32 #2854=(1,36,1,64,64)f32 #attn1.5=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_16996 1 1 x2.9 2856 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.9=(36,64,192)f32 Tensor.view Tensor.view_1013 2 1 attn1.5 2856 input.23 $input=attn1.5 $shape=2856 #attn1.5=(1,36,6,64,64)f32 #input.23=(36,6,64,64)f32 nn.Softmax layers_dfe.0.residual_group.blocks.3.attn.softmax 1 1 input.23 2858 dim=-1 #input.23=(36,6,64,64)f32 #2858=(36,6,64,64)f32 nn.Dropout layers_dfe.0.residual_group.blocks.3.attn.attn_drop 1 1 2858 2859 #2858=(36,6,64,64)f32 #2859=(36,6,64,64)f32 Tensor.select Tensor.select_661 3 1 qkv0.9 20236 20237 v.9 $input=qkv0.9 $dim=20236 $index=20237 #qkv0.9=(3,36,6,64,32)f32 #v.9=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16995 0 1 20255 expr=1 pnnx.Expression pnnx_expr_16994 0 1 20256 expr=2 torch.matmul torch.matmul_2211 2 1 2859 v.9 2860 $input=2859 $other=v.9 #2859=(36,6,64,64)f32 #v.9=(36,6,64,32)f32 #2860=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16993 1 1 x2.9 2862 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.9=(36,64,192)f32 torch.transpose torch.transpose_2968 3 1 2860 20255 20256 2861 $input=2860 $dim0=20255 $dim1=20256 #2860=(36,6,64,32)f32 #2861=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_441 2 1 2861 2862 input0.11 $input=2861 $shape=2862 #2861=(36,64,6,32)f32 #input0.11=(36,64,192)f32 nn.Linear layers_dfe.0.residual_group.blocks.3.attn.proj 1 1 input0.11 2864 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.11=(36,64,192)f32 #2864=(36,64,192)f32 nn.Dropout layers_dfe.0.residual_group.blocks.3.attn.proj_drop 1 1 2864 2865 #2864=(36,64,192)f32 #2865=(36,64,192)f32 pnnx.Expression pnnx_expr_16989 1 1 2729 2866 expr=[-1,8,8,int(size(@0,2))] #2729=(1,2304,192)f32 pnnx.Expression pnnx_expr_16976 1 1 154 2872 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_16969 0 1 2874 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1014 2 1 2865 2866 windows.9 $input=2865 $shape=2866 #2865=(36,64,192)f32 #windows.9=(36,8,8,192)f32 Tensor.view Tensor.view_1015 2 1 windows.9 2872 x3.9 $input=windows.9 $shape=2872 #windows.9=(36,8,8,192)f32 #x3.9=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_16966 1 1 154 2877 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2543 2 1 x3.9 2874 2875 $input=x3.9 $dims=2874 #x3.9=(1,6,6,8,8,192)f32 #2875=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_13 1 1 2875 2876 memory_format=torch.contiguous_format $input=2875 #2875=(1,6,8,6,8,192)f32 #2876=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_16963 0 1 2879 expr=[4,4] pnnx.Expression pnnx_expr_16960 0 1 2880 expr=[1,2] Tensor.view Tensor.view_1016 2 1 2876 2877 shifted_x.5 $input=2876 $shape=2877 #2876=(1,6,8,6,8,192)f32 #shifted_x.5=(1,48,48,192)f32 pnnx.Expression pnnx_expr_16957 2 1 2729 154 2884 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #2729=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.roll torch.roll_2423 3 1 shifted_x.5 2879 2880 x4.9 $input=shifted_x.5 $shifts=2879 $dims=2880 #shifted_x.5=(1,48,48,192)f32 #x4.9=(1,48,48,192)f32 Tensor.view Tensor.view_1017 2 1 x4.9 2884 x5.5 $input=x4.9 $shape=2884 #x4.9=(1,48,48,192)f32 #x5.5=(1,2304,192)f32 pnnx.Expression pnnx_expr_16955 2 1 2729 x5.5 input.25 expr=add(@0,@1) #2729=(1,2304,192)f32 #x5.5=(1,2304,192)f32 #input.25=(1,2304,192)f32 nn.LayerNorm layers_dfe.0.residual_group.blocks.3.norm2 1 1 input.25 2888 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.25=(1,2304,192)f32 #2888=(1,2304,192)f32 nn.Linear layers_dfe.0.residual_group.blocks.3.mlp.fc1 1 1 2888 2893 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #2888=(1,2304,192)f32 #2893=(1,2304,384)f32 nn.GELU layers_dfe.0.residual_group.blocks.3.mlp.act 1 1 2893 2894 #2893=(1,2304,384)f32 #2894=(1,2304,384)f32 nn.Dropout layers_dfe.0.residual_group.blocks.3.mlp.drop 1 1 2894 2895 #2894=(1,2304,384)f32 #2895=(1,2304,384)f32 nn.Linear layers_dfe.0.residual_group.blocks.3.mlp.fc2 1 1 2895 2896 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #2895=(1,2304,384)f32 #2896=(1,2304,192)f32 nn.Dropout pnnx_unique_8 1 1 2896 2897 #2896=(1,2304,192)f32 #2897=(1,2304,192)f32 pnnx.Expression pnnx_expr_16953 2 1 input.25 2897 2899 expr=add(@0,@1) #input.25=(1,2304,192)f32 #2897=(1,2304,192)f32 #2899=(1,2304,192)f32 nn.LayerNorm layers_dfe.0.residual_group.blocks.4.norm1 1 1 2899 2926 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #2899=(1,2304,192)f32 #2926=(1,2304,192)f32 pnnx.Expression pnnx_expr_16931 2 1 2899 154 2927 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #2899=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1018 2 1 2926 2927 x.11 $input=2926 $shape=2927 #2926=(1,2304,192)f32 #x.11=(1,48,48,192)f32 pnnx.Expression pnnx_expr_16910 1 1 x.11 2944 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.11=(1,48,48,192)f32 pnnx.Expression pnnx_expr_16905 0 1 2946 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1019 2 1 x.11 2944 x0.11 $input=x.11 $shape=2944 #x.11=(1,48,48,192)f32 #x0.11=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_16902 1 1 x.11 2949 expr=[-1,8,8,int(size(@0,3))] #x.11=(1,48,48,192)f32 torch.permute torch.permute_2544 2 1 x0.11 2946 2947 $input=x0.11 $dims=2946 #x0.11=(1,6,8,6,8,192)f32 #2947=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_14 1 1 2947 2948 memory_format=torch.contiguous_format $input=2947 #2947=(1,6,6,8,8,192)f32 #2948=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_16900 1 1 2899 2951 expr=[-1,64,int(size(@0,2))] #2899=(1,2304,192)f32 pnnx.Expression pnnx_expr_16890 0 1 2962 expr=-2 pnnx.Expression pnnx_expr_16889 0 1 2963 expr=-1 pnnx.Attribute layers_dfe.0.residual_group.blocks.4.attn 0 1 relative_position_bias_table.11 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.11=(225,6)f32 pnnx.Attribute pnnx_unique_9 0 1 relative_position_index.11 @relative_position_index=(64,64)i64 #relative_position_index.11=(64,64)i64 Tensor.view Tensor.view_1020 2 1 2948 2949 x_windows.11 $input=2948 $shape=2949 #2948=(1,6,6,8,8,192)f32 #x_windows.11=(36,8,8,192)f32 Tensor.view Tensor.view_1021 2 1 x_windows.11 2951 x1.11 $input=x_windows.11 $shape=2951 #x_windows.11=(36,8,8,192)f32 #x1.11=(36,64,192)f32 nn.Linear layers_dfe.0.residual_group.blocks.4.attn.qkv 1 1 x1.11 2983 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.11=(36,64,192)f32 #2983=(36,64,576)f32 pnnx.Expression pnnx_expr_16874 1 1 x1.11 2986 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.11=(36,64,192)f32 pnnx.Expression pnnx_expr_16869 0 1 2988 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_442 2 1 2983 2986 2987 $input=2983 $shape=2986 #2983=(36,64,576)f32 #2987=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_16868 0 1 20300 expr=0 pnnx.Expression pnnx_expr_16867 0 1 20301 expr=0 pnnx.Expression pnnx_expr_16866 0 1 20302 expr=0 pnnx.Expression pnnx_expr_16865 0 1 20303 expr=1 pnnx.Expression pnnx_expr_16864 0 1 20304 expr=0 pnnx.Expression pnnx_expr_16863 0 1 20305 expr=2 torch.permute torch.permute_2545 2 1 2987 2988 qkv0.11 $input=2987 $dims=2988 #2987=(36,64,3,6,32)f32 #qkv0.11=(3,36,6,64,32)f32 Tensor.select Tensor.select_662 3 1 qkv0.11 20300 20301 q.11 $input=qkv0.11 $dim=20300 $index=20301 #qkv0.11=(3,36,6,64,32)f32 #q.11=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16862 1 1 q.11 q0.11 expr=mul(@0,1.767767e-01) #q.11=(36,6,64,32)f32 #q0.11=(36,6,64,32)f32 Tensor.select Tensor.select_663 3 1 qkv0.11 20302 20303 k.11 $input=qkv0.11 $dim=20302 $index=20303 #qkv0.11=(3,36,6,64,32)f32 #k.11=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16860 0 1 2996 expr=[-1] Tensor.view Tensor.view_1022 2 1 relative_position_index.11 2996 2997 $input=relative_position_index.11 $shape=2996 #relative_position_index.11=(64,64)i64 #2997=(4096)i64 pnnx.Expression pnnx_expr_16859 1 1 2997 2998 expr=[@0] #2997=(4096)i64 pnnx.Expression pnnx_expr_16856 0 1 3000 expr=[64,64,-1] Tensor.index Tensor.index_330 2 1 relative_position_bias_table.11 2998 2999 $input=relative_position_bias_table.11 $expr=2998 #relative_position_bias_table.11=(225,6)f32 #2999=(4096,6)f32 pnnx.Expression pnnx_expr_16852 0 1 3002 expr=[2,0,1] Tensor.view Tensor.view_1023 2 1 2999 3000 relative_position_bias.11 $input=2999 $shape=3000 #2999=(4096,6)f32 #relative_position_bias.11=(64,64,6)f32 pnnx.Expression pnnx_expr_16851 0 1 20313 expr=0 torch.permute torch.permute_2546 2 1 relative_position_bias.11 3002 3003 $input=relative_position_bias.11 $dims=3002 #relative_position_bias.11=(64,64,6)f32 #3003=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_15 1 1 3003 relative_position_bias0.11 memory_format=torch.contiguous_format $input=3003 #3003=(6,64,64)f32 #relative_position_bias0.11=(6,64,64)f32 torch.transpose torch.transpose_2969 3 1 k.11 2962 2963 2994 $input=k.11 $dim0=2962 $dim1=2963 #k.11=(36,6,64,32)f32 #2994=(36,6,32,64)f32 torch.matmul torch.matmul_2212 2 1 q0.11 2994 attn.23 $input=q0.11 $other=2994 #q0.11=(36,6,64,32)f32 #2994=(36,6,32,64)f32 #attn.23=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3226 2 1 relative_position_bias0.11 20313 3005 $input=relative_position_bias0.11 $dim=20313 #relative_position_bias0.11=(6,64,64)f32 #3005=(1,6,64,64)f32 pnnx.Expression pnnx_expr_16849 2 1 attn.23 3005 input.27 expr=add(@0,@1) #attn.23=(36,6,64,64)f32 #3005=(1,6,64,64)f32 #input.27=(36,6,64,64)f32 nn.Softmax layers_dfe.0.residual_group.blocks.4.attn.softmax 1 1 input.27 3007 dim=-1 #input.27=(36,6,64,64)f32 #3007=(36,6,64,64)f32 nn.Dropout layers_dfe.0.residual_group.blocks.4.attn.attn_drop 1 1 3007 3008 #3007=(36,6,64,64)f32 #3008=(36,6,64,64)f32 Tensor.select Tensor.select_664 3 1 qkv0.11 20304 20305 v.11 $input=qkv0.11 $dim=20304 $index=20305 #qkv0.11=(3,36,6,64,32)f32 #v.11=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16848 0 1 20315 expr=1 pnnx.Expression pnnx_expr_16847 0 1 20316 expr=2 torch.matmul torch.matmul_2213 2 1 3008 v.11 3009 $input=3008 $other=v.11 #3008=(36,6,64,64)f32 #v.11=(36,6,64,32)f32 #3009=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16846 1 1 x1.11 3011 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.11=(36,64,192)f32 torch.transpose torch.transpose_2970 3 1 3009 20315 20316 3010 $input=3009 $dim0=20315 $dim1=20316 #3009=(36,6,64,32)f32 #3010=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_443 2 1 3010 3011 input0.13 $input=3010 $shape=3011 #3010=(36,64,6,32)f32 #input0.13=(36,64,192)f32 nn.Linear layers_dfe.0.residual_group.blocks.4.attn.proj 1 1 input0.13 3013 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.13=(36,64,192)f32 #3013=(36,64,192)f32 nn.Dropout layers_dfe.0.residual_group.blocks.4.attn.proj_drop 1 1 3013 3014 #3013=(36,64,192)f32 #3014=(36,64,192)f32 pnnx.Expression pnnx_expr_16842 1 1 2899 3015 expr=[-1,8,8,int(size(@0,2))] #2899=(1,2304,192)f32 pnnx.Expression pnnx_expr_16829 1 1 154 3021 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_16822 0 1 3023 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1024 2 1 3014 3015 windows.11 $input=3014 $shape=3015 #3014=(36,64,192)f32 #windows.11=(36,8,8,192)f32 Tensor.view Tensor.view_1025 2 1 windows.11 3021 x2.11 $input=windows.11 $shape=3021 #windows.11=(36,8,8,192)f32 #x2.11=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_16819 1 1 154 3026 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2547 2 1 x2.11 3023 3024 $input=x2.11 $dims=3023 #x2.11=(1,6,6,8,8,192)f32 #3024=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_16 1 1 3024 3025 memory_format=torch.contiguous_format $input=3024 #3024=(1,6,8,6,8,192)f32 #3025=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_16816 2 1 2899 154 3030 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #2899=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1026 2 1 3025 3026 x3.11 $input=3025 $shape=3026 #3025=(1,6,8,6,8,192)f32 #x3.11=(1,48,48,192)f32 Tensor.view Tensor.view_1027 2 1 x3.11 3030 x4.11 $input=x3.11 $shape=3030 #x3.11=(1,48,48,192)f32 #x4.11=(1,2304,192)f32 pnnx.Expression pnnx_expr_16814 2 1 2899 x4.11 input.29 expr=add(@0,@1) #2899=(1,2304,192)f32 #x4.11=(1,2304,192)f32 #input.29=(1,2304,192)f32 nn.LayerNorm layers_dfe.0.residual_group.blocks.4.norm2 1 1 input.29 3034 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.29=(1,2304,192)f32 #3034=(1,2304,192)f32 nn.Linear layers_dfe.0.residual_group.blocks.4.mlp.fc1 1 1 3034 3039 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #3034=(1,2304,192)f32 #3039=(1,2304,384)f32 nn.GELU layers_dfe.0.residual_group.blocks.4.mlp.act 1 1 3039 3040 #3039=(1,2304,384)f32 #3040=(1,2304,384)f32 nn.Dropout layers_dfe.0.residual_group.blocks.4.mlp.drop 1 1 3040 3041 #3040=(1,2304,384)f32 #3041=(1,2304,384)f32 nn.Linear layers_dfe.0.residual_group.blocks.4.mlp.fc2 1 1 3041 3042 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #3041=(1,2304,384)f32 #3042=(1,2304,192)f32 nn.Dropout pnnx_unique_10 1 1 3042 3043 #3042=(1,2304,192)f32 #3043=(1,2304,192)f32 pnnx.Expression pnnx_expr_16812 2 1 input.29 3043 3045 expr=add(@0,@1) #input.29=(1,2304,192)f32 #3043=(1,2304,192)f32 #3045=(1,2304,192)f32 pnnx.Attribute layers_dfe.0.residual_group.blocks.5 0 1 attn_mask.7 @attn_mask=(36,64,64)f32 #attn_mask.7=(36,64,64)f32 nn.LayerNorm layers_dfe.0.residual_group.blocks.5.norm1 1 1 3045 3074 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #3045=(1,2304,192)f32 #3074=(1,2304,192)f32 pnnx.Expression pnnx_expr_16789 2 1 3045 154 3075 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #3045=(1,2304,192)f32 #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_16787 0 1 3077 expr=[-4,-4] pnnx.Expression pnnx_expr_16785 0 1 3078 expr=[1,2] Tensor.view Tensor.view_1028 2 1 3074 3075 x.13 $input=3074 $shape=3075 #3074=(1,2304,192)f32 #x.13=(1,48,48,192)f32 torch.roll torch.roll_2424 3 1 x.13 3077 3078 x0.13 $input=x.13 $shifts=3077 $dims=3078 #x.13=(1,48,48,192)f32 #x0.13=(1,48,48,192)f32 pnnx.Expression pnnx_expr_16763 1 1 x0.13 3095 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.13=(1,48,48,192)f32 pnnx.Expression pnnx_expr_16758 0 1 3097 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1029 2 1 x0.13 3095 x1.13 $input=x0.13 $shape=3095 #x0.13=(1,48,48,192)f32 #x1.13=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_16755 1 1 x0.13 3100 expr=[-1,8,8,int(size(@0,3))] #x0.13=(1,48,48,192)f32 torch.permute torch.permute_2548 2 1 x1.13 3097 3098 $input=x1.13 $dims=3097 #x1.13=(1,6,8,6,8,192)f32 #3098=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_17 1 1 3098 3099 memory_format=torch.contiguous_format $input=3098 #3098=(1,6,6,8,8,192)f32 #3099=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_16753 1 1 3045 3102 expr=[-1,64,int(size(@0,2))] #3045=(1,2304,192)f32 pnnx.Expression pnnx_expr_16743 0 1 3113 expr=-2 pnnx.Expression pnnx_expr_16742 0 1 3114 expr=-1 pnnx.Attribute layers_dfe.0.residual_group.blocks.5.attn 0 1 relative_position_bias_table.13 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.13=(225,6)f32 pnnx.Attribute pnnx_unique_11 0 1 relative_position_index.13 @relative_position_index=(64,64)i64 #relative_position_index.13=(64,64)i64 Tensor.view Tensor.view_1030 2 1 3099 3100 x_windows.13 $input=3099 $shape=3100 #3099=(1,6,6,8,8,192)f32 #x_windows.13=(36,8,8,192)f32 Tensor.view Tensor.view_1031 2 1 x_windows.13 3102 x2.13 $input=x_windows.13 $shape=3102 #x_windows.13=(36,8,8,192)f32 #x2.13=(36,64,192)f32 nn.Linear layers_dfe.0.residual_group.blocks.5.attn.qkv 1 1 x2.13 3138 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.13=(36,64,192)f32 #3138=(36,64,576)f32 pnnx.Expression pnnx_expr_16723 1 1 x2.13 3141 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.13=(36,64,192)f32 pnnx.Expression pnnx_expr_16718 0 1 3143 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_444 2 1 3138 3141 3142 $input=3138 $shape=3141 #3138=(36,64,576)f32 #3142=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_16717 0 1 20359 expr=0 pnnx.Expression pnnx_expr_16716 0 1 20360 expr=0 pnnx.Expression pnnx_expr_16715 0 1 20361 expr=0 pnnx.Expression pnnx_expr_16714 0 1 20362 expr=1 pnnx.Expression pnnx_expr_16713 0 1 20363 expr=0 pnnx.Expression pnnx_expr_16712 0 1 20364 expr=2 torch.permute torch.permute_2549 2 1 3142 3143 qkv0.13 $input=3142 $dims=3143 #3142=(36,64,3,6,32)f32 #qkv0.13=(3,36,6,64,32)f32 Tensor.select Tensor.select_665 3 1 qkv0.13 20359 20360 q.13 $input=qkv0.13 $dim=20359 $index=20360 #qkv0.13=(3,36,6,64,32)f32 #q.13=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16711 1 1 q.13 q0.13 expr=mul(@0,1.767767e-01) #q.13=(36,6,64,32)f32 #q0.13=(36,6,64,32)f32 Tensor.select Tensor.select_666 3 1 qkv0.13 20361 20362 k.13 $input=qkv0.13 $dim=20361 $index=20362 #qkv0.13=(3,36,6,64,32)f32 #k.13=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16709 0 1 3151 expr=[-1] Tensor.view Tensor.view_1032 2 1 relative_position_index.13 3151 3152 $input=relative_position_index.13 $shape=3151 #relative_position_index.13=(64,64)i64 #3152=(4096)i64 pnnx.Expression pnnx_expr_16708 1 1 3152 3153 expr=[@0] #3152=(4096)i64 pnnx.Expression pnnx_expr_16705 0 1 3155 expr=[64,64,-1] Tensor.index Tensor.index_331 2 1 relative_position_bias_table.13 3153 3154 $input=relative_position_bias_table.13 $expr=3153 #relative_position_bias_table.13=(225,6)f32 #3154=(4096,6)f32 pnnx.Expression pnnx_expr_16701 0 1 3157 expr=[2,0,1] Tensor.view Tensor.view_1033 2 1 3154 3155 relative_position_bias.13 $input=3154 $shape=3155 #3154=(4096,6)f32 #relative_position_bias.13=(64,64,6)f32 pnnx.Expression pnnx_expr_16700 0 1 20372 expr=0 torch.permute torch.permute_2550 2 1 relative_position_bias.13 3157 3158 $input=relative_position_bias.13 $dims=3157 #relative_position_bias.13=(64,64,6)f32 #3158=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_18 1 1 3158 relative_position_bias0.13 memory_format=torch.contiguous_format $input=3158 #3158=(6,64,64)f32 #relative_position_bias0.13=(6,64,64)f32 torch.transpose torch.transpose_2971 3 1 k.13 3113 3114 3149 $input=k.13 $dim0=3113 $dim1=3114 #k.13=(36,6,64,32)f32 #3149=(36,6,32,64)f32 torch.matmul torch.matmul_2214 2 1 q0.13 3149 attn.27 $input=q0.13 $other=3149 #q0.13=(36,6,64,32)f32 #3149=(36,6,32,64)f32 #attn.27=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3227 2 1 relative_position_bias0.13 20372 3160 $input=relative_position_bias0.13 $dim=20372 #relative_position_bias0.13=(6,64,64)f32 #3160=(1,6,64,64)f32 pnnx.Expression pnnx_expr_16698 2 1 attn.27 3160 attn0.7 expr=add(@0,@1) #attn.27=(36,6,64,64)f32 #3160=(1,6,64,64)f32 #attn0.7=(36,6,64,64)f32 pnnx.Expression pnnx_expr_16689 2 1 x2.13 attn_mask.7 3167 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.13=(36,64,192)f32 #attn_mask.7=(36,64,64)f32 pnnx.Expression pnnx_expr_16688 0 1 20377 expr=1 pnnx.Expression pnnx_expr_16687 0 1 20378 expr=0 Tensor.view Tensor.view_1034 2 1 attn0.7 3167 3168 $input=attn0.7 $shape=3167 #attn0.7=(36,6,64,64)f32 #3168=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3228 2 1 attn_mask.7 20377 3169 $input=attn_mask.7 $dim=20377 #attn_mask.7=(36,64,64)f32 #3169=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3229 2 1 3169 20378 3170 $input=3169 $dim=20378 #3169=(36,1,64,64)f32 #3170=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_16685 2 1 3168 3170 attn1.7 expr=add(@0,@1) #3168=(1,36,6,64,64)f32 #3170=(1,36,1,64,64)f32 #attn1.7=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_16682 1 1 x2.13 3172 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.13=(36,64,192)f32 Tensor.view Tensor.view_1035 2 1 attn1.7 3172 input.31 $input=attn1.7 $shape=3172 #attn1.7=(1,36,6,64,64)f32 #input.31=(36,6,64,64)f32 nn.Softmax layers_dfe.0.residual_group.blocks.5.attn.softmax 1 1 input.31 3174 dim=-1 #input.31=(36,6,64,64)f32 #3174=(36,6,64,64)f32 nn.Dropout layers_dfe.0.residual_group.blocks.5.attn.attn_drop 1 1 3174 3175 #3174=(36,6,64,64)f32 #3175=(36,6,64,64)f32 Tensor.select Tensor.select_667 3 1 qkv0.13 20363 20364 v.13 $input=qkv0.13 $dim=20363 $index=20364 #qkv0.13=(3,36,6,64,32)f32 #v.13=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16681 0 1 20382 expr=1 pnnx.Expression pnnx_expr_16680 0 1 20383 expr=2 torch.matmul torch.matmul_2215 2 1 3175 v.13 3176 $input=3175 $other=v.13 #3175=(36,6,64,64)f32 #v.13=(36,6,64,32)f32 #3176=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16679 1 1 x2.13 3178 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.13=(36,64,192)f32 torch.transpose torch.transpose_2972 3 1 3176 20382 20383 3177 $input=3176 $dim0=20382 $dim1=20383 #3176=(36,6,64,32)f32 #3177=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_445 2 1 3177 3178 input0.15 $input=3177 $shape=3178 #3177=(36,64,6,32)f32 #input0.15=(36,64,192)f32 nn.Linear layers_dfe.0.residual_group.blocks.5.attn.proj 1 1 input0.15 3180 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.15=(36,64,192)f32 #3180=(36,64,192)f32 nn.Dropout layers_dfe.0.residual_group.blocks.5.attn.proj_drop 1 1 3180 3181 #3180=(36,64,192)f32 #3181=(36,64,192)f32 pnnx.Expression pnnx_expr_16675 1 1 3045 3182 expr=[-1,8,8,int(size(@0,2))] #3045=(1,2304,192)f32 pnnx.Expression pnnx_expr_16662 1 1 154 3188 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_16655 0 1 3190 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1036 2 1 3181 3182 windows.13 $input=3181 $shape=3182 #3181=(36,64,192)f32 #windows.13=(36,8,8,192)f32 Tensor.view Tensor.view_1037 2 1 windows.13 3188 x3.13 $input=windows.13 $shape=3188 #windows.13=(36,8,8,192)f32 #x3.13=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_16652 1 1 154 3193 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2551 2 1 x3.13 3190 3191 $input=x3.13 $dims=3190 #x3.13=(1,6,6,8,8,192)f32 #3191=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_19 1 1 3191 3192 memory_format=torch.contiguous_format $input=3191 #3191=(1,6,8,6,8,192)f32 #3192=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_16649 0 1 3195 expr=[4,4] pnnx.Expression pnnx_expr_16646 0 1 3196 expr=[1,2] Tensor.view Tensor.view_1038 2 1 3192 3193 shifted_x.7 $input=3192 $shape=3193 #3192=(1,6,8,6,8,192)f32 #shifted_x.7=(1,48,48,192)f32 pnnx.Expression pnnx_expr_16643 2 1 3045 154 3200 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #3045=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.roll torch.roll_2425 3 1 shifted_x.7 3195 3196 x4.13 $input=shifted_x.7 $shifts=3195 $dims=3196 #shifted_x.7=(1,48,48,192)f32 #x4.13=(1,48,48,192)f32 Tensor.view Tensor.view_1039 2 1 x4.13 3200 x5.7 $input=x4.13 $shape=3200 #x4.13=(1,48,48,192)f32 #x5.7=(1,2304,192)f32 pnnx.Expression pnnx_expr_16641 2 1 3045 x5.7 input.33 expr=add(@0,@1) #3045=(1,2304,192)f32 #x5.7=(1,2304,192)f32 #input.33=(1,2304,192)f32 nn.LayerNorm layers_dfe.0.residual_group.blocks.5.norm2 1 1 input.33 3204 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.33=(1,2304,192)f32 #3204=(1,2304,192)f32 nn.Linear layers_dfe.0.residual_group.blocks.5.mlp.fc1 1 1 3204 3209 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #3204=(1,2304,192)f32 #3209=(1,2304,384)f32 nn.GELU layers_dfe.0.residual_group.blocks.5.mlp.act 1 1 3209 3210 #3209=(1,2304,384)f32 #3210=(1,2304,384)f32 nn.Dropout layers_dfe.0.residual_group.blocks.5.mlp.drop 1 1 3210 3211 #3210=(1,2304,384)f32 #3211=(1,2304,384)f32 nn.Linear layers_dfe.0.residual_group.blocks.5.mlp.fc2 1 1 3211 3212 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #3211=(1,2304,384)f32 #3212=(1,2304,192)f32 nn.Dropout pnnx_unique_12 1 1 3212 3213 #3212=(1,2304,192)f32 #3213=(1,2304,192)f32 pnnx.Expression pnnx_expr_16639 2 1 input.33 3213 3215 expr=add(@0,@1) #input.33=(1,2304,192)f32 #3213=(1,2304,192)f32 #3215=(1,2304,192)f32 pnnx.Expression pnnx_expr_16637 0 1 3217 expr=1 pnnx.Expression pnnx_expr_16636 0 1 3218 expr=2 pnnx.Expression pnnx_expr_16631 2 1 3215 154 3224 expr=[int(size(@0,0)),192,int(size(@1,2)),int(size(@1,3))] #3215=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.transpose torch.transpose_2973 3 1 3215 3217 3218 3223 $input=3215 $dim0=3217 $dim1=3218 #3215=(1,2304,192)f32 #3223=(1,192,2304)f32 Tensor.view Tensor.view_1040 2 1 3223 3224 input.35 $input=3223 $shape=3224 #3223=(1,192,2304)f32 #input.35=(1,192,48,48)f32 nn.Conv2d layers_dfe.0.conv 1 1 input.35 3226 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(192)f32 @weight=(192,192,3,3)f32 #input.35=(1,192,48,48)f32 #3226=(1,192,48,48)f32 pnnx.Expression pnnx_expr_16630 0 1 3227 expr=-1 pnnx.Expression pnnx_expr_16629 0 1 3228 expr=2 pnnx.Expression pnnx_expr_16628 0 1 3229 expr=1 pnnx.Expression pnnx_expr_16627 0 1 20410 expr=2 torch.flatten torch.flatten_2184 3 1 3226 3228 3227 3230 $input=3226 $start_dim=3228 $end_dim=3227 #3226=(1,192,48,48)f32 #3230=(1,192,2304)f32 torch.transpose torch.transpose_2974 3 1 3230 3229 20410 3231 $input=3230 $dim0=3229 $dim1=20410 #3230=(1,192,2304)f32 #3231=(1,2304,192)f32 pnnx.Expression pnnx_expr_16626 2 1 3231 642 3232 expr=add(@0,@1) #3231=(1,2304,192)f32 #642=(1,2304,192)f32 #3232=(1,2304,192)f32 nn.LayerNorm layers_dfe.1.residual_group.blocks.0.norm1 1 1 3232 3276 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #3232=(1,2304,192)f32 #3276=(1,2304,192)f32 pnnx.Expression pnnx_expr_16603 2 1 3232 154 3277 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #3232=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1041 2 1 3276 3277 x.15 $input=3276 $shape=3277 #3276=(1,2304,192)f32 #x.15=(1,48,48,192)f32 pnnx.Expression pnnx_expr_16582 1 1 x.15 3294 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.15=(1,48,48,192)f32 pnnx.Expression pnnx_expr_16577 0 1 3296 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1042 2 1 x.15 3294 x0.15 $input=x.15 $shape=3294 #x.15=(1,48,48,192)f32 #x0.15=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_16574 1 1 x.15 3299 expr=[-1,8,8,int(size(@0,3))] #x.15=(1,48,48,192)f32 torch.permute torch.permute_2552 2 1 x0.15 3296 3297 $input=x0.15 $dims=3296 #x0.15=(1,6,8,6,8,192)f32 #3297=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_20 1 1 3297 3298 memory_format=torch.contiguous_format $input=3297 #3297=(1,6,6,8,8,192)f32 #3298=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_16572 1 1 3232 3301 expr=[-1,64,int(size(@0,2))] #3232=(1,2304,192)f32 pnnx.Expression pnnx_expr_16562 0 1 3312 expr=-2 pnnx.Expression pnnx_expr_16561 0 1 3313 expr=-1 pnnx.Attribute layers_dfe.1.residual_group.blocks.0.attn 0 1 relative_position_bias_table.15 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.15=(225,6)f32 pnnx.Attribute pnnx_unique_13 0 1 relative_position_index.15 @relative_position_index=(64,64)i64 #relative_position_index.15=(64,64)i64 Tensor.view Tensor.view_1043 2 1 3298 3299 x_windows.15 $input=3298 $shape=3299 #3298=(1,6,6,8,8,192)f32 #x_windows.15=(36,8,8,192)f32 Tensor.view Tensor.view_1044 2 1 x_windows.15 3301 x1.15 $input=x_windows.15 $shape=3301 #x_windows.15=(36,8,8,192)f32 #x1.15=(36,64,192)f32 nn.Linear layers_dfe.1.residual_group.blocks.0.attn.qkv 1 1 x1.15 3333 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.15=(36,64,192)f32 #3333=(36,64,576)f32 pnnx.Expression pnnx_expr_16546 1 1 x1.15 3336 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.15=(36,64,192)f32 pnnx.Expression pnnx_expr_16541 0 1 3338 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_446 2 1 3333 3336 3337 $input=3333 $shape=3336 #3333=(36,64,576)f32 #3337=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_16540 0 1 20428 expr=0 pnnx.Expression pnnx_expr_16539 0 1 20429 expr=0 pnnx.Expression pnnx_expr_16538 0 1 20430 expr=0 pnnx.Expression pnnx_expr_16537 0 1 20431 expr=1 pnnx.Expression pnnx_expr_16536 0 1 20432 expr=0 pnnx.Expression pnnx_expr_16535 0 1 20433 expr=2 torch.permute torch.permute_2553 2 1 3337 3338 qkv0.15 $input=3337 $dims=3338 #3337=(36,64,3,6,32)f32 #qkv0.15=(3,36,6,64,32)f32 Tensor.select Tensor.select_668 3 1 qkv0.15 20428 20429 q.15 $input=qkv0.15 $dim=20428 $index=20429 #qkv0.15=(3,36,6,64,32)f32 #q.15=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16534 1 1 q.15 q0.15 expr=mul(@0,1.767767e-01) #q.15=(36,6,64,32)f32 #q0.15=(36,6,64,32)f32 Tensor.select Tensor.select_669 3 1 qkv0.15 20430 20431 k.15 $input=qkv0.15 $dim=20430 $index=20431 #qkv0.15=(3,36,6,64,32)f32 #k.15=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16532 0 1 3346 expr=[-1] Tensor.view Tensor.view_1045 2 1 relative_position_index.15 3346 3347 $input=relative_position_index.15 $shape=3346 #relative_position_index.15=(64,64)i64 #3347=(4096)i64 pnnx.Expression pnnx_expr_16531 1 1 3347 3348 expr=[@0] #3347=(4096)i64 pnnx.Expression pnnx_expr_16528 0 1 3350 expr=[64,64,-1] Tensor.index Tensor.index_332 2 1 relative_position_bias_table.15 3348 3349 $input=relative_position_bias_table.15 $expr=3348 #relative_position_bias_table.15=(225,6)f32 #3349=(4096,6)f32 pnnx.Expression pnnx_expr_16524 0 1 3352 expr=[2,0,1] Tensor.view Tensor.view_1046 2 1 3349 3350 relative_position_bias.15 $input=3349 $shape=3350 #3349=(4096,6)f32 #relative_position_bias.15=(64,64,6)f32 pnnx.Expression pnnx_expr_16523 0 1 20441 expr=0 torch.permute torch.permute_2554 2 1 relative_position_bias.15 3352 3353 $input=relative_position_bias.15 $dims=3352 #relative_position_bias.15=(64,64,6)f32 #3353=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_21 1 1 3353 relative_position_bias0.15 memory_format=torch.contiguous_format $input=3353 #3353=(6,64,64)f32 #relative_position_bias0.15=(6,64,64)f32 torch.transpose torch.transpose_2975 3 1 k.15 3312 3313 3344 $input=k.15 $dim0=3312 $dim1=3313 #k.15=(36,6,64,32)f32 #3344=(36,6,32,64)f32 torch.matmul torch.matmul_2216 2 1 q0.15 3344 attn.31 $input=q0.15 $other=3344 #q0.15=(36,6,64,32)f32 #3344=(36,6,32,64)f32 #attn.31=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3230 2 1 relative_position_bias0.15 20441 3355 $input=relative_position_bias0.15 $dim=20441 #relative_position_bias0.15=(6,64,64)f32 #3355=(1,6,64,64)f32 pnnx.Expression pnnx_expr_16521 2 1 attn.31 3355 input.37 expr=add(@0,@1) #attn.31=(36,6,64,64)f32 #3355=(1,6,64,64)f32 #input.37=(36,6,64,64)f32 nn.Softmax layers_dfe.1.residual_group.blocks.0.attn.softmax 1 1 input.37 3357 dim=-1 #input.37=(36,6,64,64)f32 #3357=(36,6,64,64)f32 nn.Dropout layers_dfe.1.residual_group.blocks.0.attn.attn_drop 1 1 3357 3358 #3357=(36,6,64,64)f32 #3358=(36,6,64,64)f32 Tensor.select Tensor.select_670 3 1 qkv0.15 20432 20433 v.15 $input=qkv0.15 $dim=20432 $index=20433 #qkv0.15=(3,36,6,64,32)f32 #v.15=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16520 0 1 20443 expr=1 pnnx.Expression pnnx_expr_16519 0 1 20444 expr=2 torch.matmul torch.matmul_2217 2 1 3358 v.15 3359 $input=3358 $other=v.15 #3358=(36,6,64,64)f32 #v.15=(36,6,64,32)f32 #3359=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16518 1 1 x1.15 3361 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.15=(36,64,192)f32 torch.transpose torch.transpose_2976 3 1 3359 20443 20444 3360 $input=3359 $dim0=20443 $dim1=20444 #3359=(36,6,64,32)f32 #3360=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_447 2 1 3360 3361 input0.17 $input=3360 $shape=3361 #3360=(36,64,6,32)f32 #input0.17=(36,64,192)f32 nn.Linear layers_dfe.1.residual_group.blocks.0.attn.proj 1 1 input0.17 3363 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.17=(36,64,192)f32 #3363=(36,64,192)f32 nn.Dropout layers_dfe.1.residual_group.blocks.0.attn.proj_drop 1 1 3363 3364 #3363=(36,64,192)f32 #3364=(36,64,192)f32 pnnx.Expression pnnx_expr_16514 1 1 3232 3365 expr=[-1,8,8,int(size(@0,2))] #3232=(1,2304,192)f32 pnnx.Expression pnnx_expr_16501 1 1 154 3371 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_16494 0 1 3373 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1047 2 1 3364 3365 windows.15 $input=3364 $shape=3365 #3364=(36,64,192)f32 #windows.15=(36,8,8,192)f32 Tensor.view Tensor.view_1048 2 1 windows.15 3371 x2.15 $input=windows.15 $shape=3371 #windows.15=(36,8,8,192)f32 #x2.15=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_16491 1 1 154 3376 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2555 2 1 x2.15 3373 3374 $input=x2.15 $dims=3373 #x2.15=(1,6,6,8,8,192)f32 #3374=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_22 1 1 3374 3375 memory_format=torch.contiguous_format $input=3374 #3374=(1,6,8,6,8,192)f32 #3375=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_16488 2 1 3232 154 3380 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #3232=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1049 2 1 3375 3376 x3.15 $input=3375 $shape=3376 #3375=(1,6,8,6,8,192)f32 #x3.15=(1,48,48,192)f32 Tensor.view Tensor.view_1050 2 1 x3.15 3380 x4.15 $input=x3.15 $shape=3380 #x3.15=(1,48,48,192)f32 #x4.15=(1,2304,192)f32 pnnx.Expression pnnx_expr_16486 2 1 3232 x4.15 input.39 expr=add(@0,@1) #3232=(1,2304,192)f32 #x4.15=(1,2304,192)f32 #input.39=(1,2304,192)f32 nn.LayerNorm layers_dfe.1.residual_group.blocks.0.norm2 1 1 input.39 3384 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.39=(1,2304,192)f32 #3384=(1,2304,192)f32 nn.Linear layers_dfe.1.residual_group.blocks.0.mlp.fc1 1 1 3384 3389 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #3384=(1,2304,192)f32 #3389=(1,2304,384)f32 nn.GELU layers_dfe.1.residual_group.blocks.0.mlp.act 1 1 3389 3390 #3389=(1,2304,384)f32 #3390=(1,2304,384)f32 nn.Dropout layers_dfe.1.residual_group.blocks.0.mlp.drop 1 1 3390 3391 #3390=(1,2304,384)f32 #3391=(1,2304,384)f32 nn.Linear layers_dfe.1.residual_group.blocks.0.mlp.fc2 1 1 3391 3392 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #3391=(1,2304,384)f32 #3392=(1,2304,192)f32 nn.Dropout pnnx_unique_14 1 1 3392 3393 #3392=(1,2304,192)f32 #3393=(1,2304,192)f32 pnnx.Expression pnnx_expr_16484 2 1 input.39 3393 3395 expr=add(@0,@1) #input.39=(1,2304,192)f32 #3393=(1,2304,192)f32 #3395=(1,2304,192)f32 pnnx.Attribute layers_dfe.1.residual_group.blocks.1 0 1 attn_mask.9 @attn_mask=(36,64,64)f32 #attn_mask.9=(36,64,64)f32 nn.LayerNorm layers_dfe.1.residual_group.blocks.1.norm1 1 1 3395 3424 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #3395=(1,2304,192)f32 #3424=(1,2304,192)f32 pnnx.Expression pnnx_expr_16461 2 1 3395 154 3425 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #3395=(1,2304,192)f32 #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_16459 0 1 3427 expr=[-4,-4] pnnx.Expression pnnx_expr_16457 0 1 3428 expr=[1,2] Tensor.view Tensor.view_1051 2 1 3424 3425 x.17 $input=3424 $shape=3425 #3424=(1,2304,192)f32 #x.17=(1,48,48,192)f32 torch.roll torch.roll_2426 3 1 x.17 3427 3428 x0.17 $input=x.17 $shifts=3427 $dims=3428 #x.17=(1,48,48,192)f32 #x0.17=(1,48,48,192)f32 pnnx.Expression pnnx_expr_16435 1 1 x0.17 3445 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.17=(1,48,48,192)f32 pnnx.Expression pnnx_expr_16430 0 1 3447 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1052 2 1 x0.17 3445 x1.17 $input=x0.17 $shape=3445 #x0.17=(1,48,48,192)f32 #x1.17=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_16427 1 1 x0.17 3450 expr=[-1,8,8,int(size(@0,3))] #x0.17=(1,48,48,192)f32 torch.permute torch.permute_2556 2 1 x1.17 3447 3448 $input=x1.17 $dims=3447 #x1.17=(1,6,8,6,8,192)f32 #3448=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_23 1 1 3448 3449 memory_format=torch.contiguous_format $input=3448 #3448=(1,6,6,8,8,192)f32 #3449=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_16425 1 1 3395 3452 expr=[-1,64,int(size(@0,2))] #3395=(1,2304,192)f32 pnnx.Expression pnnx_expr_16415 0 1 3463 expr=-2 pnnx.Expression pnnx_expr_16414 0 1 3464 expr=-1 pnnx.Attribute layers_dfe.1.residual_group.blocks.1.attn 0 1 relative_position_bias_table.17 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.17=(225,6)f32 pnnx.Attribute pnnx_unique_15 0 1 relative_position_index.17 @relative_position_index=(64,64)i64 #relative_position_index.17=(64,64)i64 Tensor.view Tensor.view_1053 2 1 3449 3450 x_windows.17 $input=3449 $shape=3450 #3449=(1,6,6,8,8,192)f32 #x_windows.17=(36,8,8,192)f32 Tensor.view Tensor.view_1054 2 1 x_windows.17 3452 x2.17 $input=x_windows.17 $shape=3452 #x_windows.17=(36,8,8,192)f32 #x2.17=(36,64,192)f32 nn.Linear layers_dfe.1.residual_group.blocks.1.attn.qkv 1 1 x2.17 3488 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.17=(36,64,192)f32 #3488=(36,64,576)f32 pnnx.Expression pnnx_expr_16395 1 1 x2.17 3491 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.17=(36,64,192)f32 pnnx.Expression pnnx_expr_16390 0 1 3493 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_448 2 1 3488 3491 3492 $input=3488 $shape=3491 #3488=(36,64,576)f32 #3492=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_16389 0 1 20487 expr=0 pnnx.Expression pnnx_expr_16388 0 1 20488 expr=0 pnnx.Expression pnnx_expr_16387 0 1 20489 expr=0 pnnx.Expression pnnx_expr_16386 0 1 20490 expr=1 pnnx.Expression pnnx_expr_16385 0 1 20491 expr=0 pnnx.Expression pnnx_expr_16384 0 1 20492 expr=2 torch.permute torch.permute_2557 2 1 3492 3493 qkv0.17 $input=3492 $dims=3493 #3492=(36,64,3,6,32)f32 #qkv0.17=(3,36,6,64,32)f32 Tensor.select Tensor.select_671 3 1 qkv0.17 20487 20488 q.17 $input=qkv0.17 $dim=20487 $index=20488 #qkv0.17=(3,36,6,64,32)f32 #q.17=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16383 1 1 q.17 q0.17 expr=mul(@0,1.767767e-01) #q.17=(36,6,64,32)f32 #q0.17=(36,6,64,32)f32 Tensor.select Tensor.select_672 3 1 qkv0.17 20489 20490 k.17 $input=qkv0.17 $dim=20489 $index=20490 #qkv0.17=(3,36,6,64,32)f32 #k.17=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16381 0 1 3501 expr=[-1] Tensor.view Tensor.view_1055 2 1 relative_position_index.17 3501 3502 $input=relative_position_index.17 $shape=3501 #relative_position_index.17=(64,64)i64 #3502=(4096)i64 pnnx.Expression pnnx_expr_16380 1 1 3502 3503 expr=[@0] #3502=(4096)i64 pnnx.Expression pnnx_expr_16377 0 1 3505 expr=[64,64,-1] Tensor.index Tensor.index_333 2 1 relative_position_bias_table.17 3503 3504 $input=relative_position_bias_table.17 $expr=3503 #relative_position_bias_table.17=(225,6)f32 #3504=(4096,6)f32 pnnx.Expression pnnx_expr_16373 0 1 3507 expr=[2,0,1] Tensor.view Tensor.view_1056 2 1 3504 3505 relative_position_bias.17 $input=3504 $shape=3505 #3504=(4096,6)f32 #relative_position_bias.17=(64,64,6)f32 pnnx.Expression pnnx_expr_16372 0 1 20500 expr=0 torch.permute torch.permute_2558 2 1 relative_position_bias.17 3507 3508 $input=relative_position_bias.17 $dims=3507 #relative_position_bias.17=(64,64,6)f32 #3508=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_24 1 1 3508 relative_position_bias0.17 memory_format=torch.contiguous_format $input=3508 #3508=(6,64,64)f32 #relative_position_bias0.17=(6,64,64)f32 torch.transpose torch.transpose_2977 3 1 k.17 3463 3464 3499 $input=k.17 $dim0=3463 $dim1=3464 #k.17=(36,6,64,32)f32 #3499=(36,6,32,64)f32 torch.matmul torch.matmul_2218 2 1 q0.17 3499 attn.35 $input=q0.17 $other=3499 #q0.17=(36,6,64,32)f32 #3499=(36,6,32,64)f32 #attn.35=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3231 2 1 relative_position_bias0.17 20500 3510 $input=relative_position_bias0.17 $dim=20500 #relative_position_bias0.17=(6,64,64)f32 #3510=(1,6,64,64)f32 pnnx.Expression pnnx_expr_16370 2 1 attn.35 3510 attn0.9 expr=add(@0,@1) #attn.35=(36,6,64,64)f32 #3510=(1,6,64,64)f32 #attn0.9=(36,6,64,64)f32 pnnx.Expression pnnx_expr_16361 2 1 x2.17 attn_mask.9 3517 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.17=(36,64,192)f32 #attn_mask.9=(36,64,64)f32 pnnx.Expression pnnx_expr_16360 0 1 20505 expr=1 pnnx.Expression pnnx_expr_16359 0 1 20506 expr=0 Tensor.view Tensor.view_1057 2 1 attn0.9 3517 3518 $input=attn0.9 $shape=3517 #attn0.9=(36,6,64,64)f32 #3518=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3232 2 1 attn_mask.9 20505 3519 $input=attn_mask.9 $dim=20505 #attn_mask.9=(36,64,64)f32 #3519=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3233 2 1 3519 20506 3520 $input=3519 $dim=20506 #3519=(36,1,64,64)f32 #3520=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_16357 2 1 3518 3520 attn1.9 expr=add(@0,@1) #3518=(1,36,6,64,64)f32 #3520=(1,36,1,64,64)f32 #attn1.9=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_16354 1 1 x2.17 3522 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.17=(36,64,192)f32 Tensor.view Tensor.view_1058 2 1 attn1.9 3522 input.41 $input=attn1.9 $shape=3522 #attn1.9=(1,36,6,64,64)f32 #input.41=(36,6,64,64)f32 nn.Softmax layers_dfe.1.residual_group.blocks.1.attn.softmax 1 1 input.41 3524 dim=-1 #input.41=(36,6,64,64)f32 #3524=(36,6,64,64)f32 nn.Dropout layers_dfe.1.residual_group.blocks.1.attn.attn_drop 1 1 3524 3525 #3524=(36,6,64,64)f32 #3525=(36,6,64,64)f32 Tensor.select Tensor.select_673 3 1 qkv0.17 20491 20492 v.17 $input=qkv0.17 $dim=20491 $index=20492 #qkv0.17=(3,36,6,64,32)f32 #v.17=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16353 0 1 20510 expr=1 pnnx.Expression pnnx_expr_16352 0 1 20511 expr=2 torch.matmul torch.matmul_2219 2 1 3525 v.17 3526 $input=3525 $other=v.17 #3525=(36,6,64,64)f32 #v.17=(36,6,64,32)f32 #3526=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16351 1 1 x2.17 3528 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.17=(36,64,192)f32 torch.transpose torch.transpose_2978 3 1 3526 20510 20511 3527 $input=3526 $dim0=20510 $dim1=20511 #3526=(36,6,64,32)f32 #3527=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_449 2 1 3527 3528 input0.19 $input=3527 $shape=3528 #3527=(36,64,6,32)f32 #input0.19=(36,64,192)f32 nn.Linear layers_dfe.1.residual_group.blocks.1.attn.proj 1 1 input0.19 3530 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.19=(36,64,192)f32 #3530=(36,64,192)f32 nn.Dropout layers_dfe.1.residual_group.blocks.1.attn.proj_drop 1 1 3530 3531 #3530=(36,64,192)f32 #3531=(36,64,192)f32 pnnx.Expression pnnx_expr_16347 1 1 3395 3532 expr=[-1,8,8,int(size(@0,2))] #3395=(1,2304,192)f32 pnnx.Expression pnnx_expr_16334 1 1 154 3538 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_16327 0 1 3540 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1059 2 1 3531 3532 windows.17 $input=3531 $shape=3532 #3531=(36,64,192)f32 #windows.17=(36,8,8,192)f32 Tensor.view Tensor.view_1060 2 1 windows.17 3538 x3.17 $input=windows.17 $shape=3538 #windows.17=(36,8,8,192)f32 #x3.17=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_16324 1 1 154 3543 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2559 2 1 x3.17 3540 3541 $input=x3.17 $dims=3540 #x3.17=(1,6,6,8,8,192)f32 #3541=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_25 1 1 3541 3542 memory_format=torch.contiguous_format $input=3541 #3541=(1,6,8,6,8,192)f32 #3542=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_16321 0 1 3545 expr=[4,4] pnnx.Expression pnnx_expr_16318 0 1 3546 expr=[1,2] Tensor.view Tensor.view_1061 2 1 3542 3543 shifted_x.9 $input=3542 $shape=3543 #3542=(1,6,8,6,8,192)f32 #shifted_x.9=(1,48,48,192)f32 pnnx.Expression pnnx_expr_16315 2 1 3395 154 3550 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #3395=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.roll torch.roll_2427 3 1 shifted_x.9 3545 3546 x4.17 $input=shifted_x.9 $shifts=3545 $dims=3546 #shifted_x.9=(1,48,48,192)f32 #x4.17=(1,48,48,192)f32 Tensor.view Tensor.view_1062 2 1 x4.17 3550 x5.9 $input=x4.17 $shape=3550 #x4.17=(1,48,48,192)f32 #x5.9=(1,2304,192)f32 pnnx.Expression pnnx_expr_16313 2 1 3395 x5.9 input.43 expr=add(@0,@1) #3395=(1,2304,192)f32 #x5.9=(1,2304,192)f32 #input.43=(1,2304,192)f32 nn.LayerNorm layers_dfe.1.residual_group.blocks.1.norm2 1 1 input.43 3554 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.43=(1,2304,192)f32 #3554=(1,2304,192)f32 nn.Linear layers_dfe.1.residual_group.blocks.1.mlp.fc1 1 1 3554 3559 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #3554=(1,2304,192)f32 #3559=(1,2304,384)f32 nn.GELU layers_dfe.1.residual_group.blocks.1.mlp.act 1 1 3559 3560 #3559=(1,2304,384)f32 #3560=(1,2304,384)f32 nn.Dropout layers_dfe.1.residual_group.blocks.1.mlp.drop 1 1 3560 3561 #3560=(1,2304,384)f32 #3561=(1,2304,384)f32 nn.Linear layers_dfe.1.residual_group.blocks.1.mlp.fc2 1 1 3561 3562 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #3561=(1,2304,384)f32 #3562=(1,2304,192)f32 nn.Dropout pnnx_unique_16 1 1 3562 3563 #3562=(1,2304,192)f32 #3563=(1,2304,192)f32 pnnx.Expression pnnx_expr_16311 2 1 input.43 3563 3565 expr=add(@0,@1) #input.43=(1,2304,192)f32 #3563=(1,2304,192)f32 #3565=(1,2304,192)f32 nn.LayerNorm layers_dfe.1.residual_group.blocks.2.norm1 1 1 3565 3592 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #3565=(1,2304,192)f32 #3592=(1,2304,192)f32 pnnx.Expression pnnx_expr_16289 2 1 3565 154 3593 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #3565=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1063 2 1 3592 3593 x.19 $input=3592 $shape=3593 #3592=(1,2304,192)f32 #x.19=(1,48,48,192)f32 pnnx.Expression pnnx_expr_16268 1 1 x.19 3610 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.19=(1,48,48,192)f32 pnnx.Expression pnnx_expr_16263 0 1 3612 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1064 2 1 x.19 3610 x0.19 $input=x.19 $shape=3610 #x.19=(1,48,48,192)f32 #x0.19=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_16260 1 1 x.19 3615 expr=[-1,8,8,int(size(@0,3))] #x.19=(1,48,48,192)f32 torch.permute torch.permute_2560 2 1 x0.19 3612 3613 $input=x0.19 $dims=3612 #x0.19=(1,6,8,6,8,192)f32 #3613=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_26 1 1 3613 3614 memory_format=torch.contiguous_format $input=3613 #3613=(1,6,6,8,8,192)f32 #3614=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_16258 1 1 3565 3617 expr=[-1,64,int(size(@0,2))] #3565=(1,2304,192)f32 pnnx.Expression pnnx_expr_16248 0 1 3628 expr=-2 pnnx.Expression pnnx_expr_16247 0 1 3629 expr=-1 pnnx.Attribute layers_dfe.1.residual_group.blocks.2.attn 0 1 relative_position_bias_table.19 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.19=(225,6)f32 pnnx.Attribute pnnx_unique_17 0 1 relative_position_index.19 @relative_position_index=(64,64)i64 #relative_position_index.19=(64,64)i64 Tensor.view Tensor.view_1065 2 1 3614 3615 x_windows.19 $input=3614 $shape=3615 #3614=(1,6,6,8,8,192)f32 #x_windows.19=(36,8,8,192)f32 Tensor.view Tensor.view_1066 2 1 x_windows.19 3617 x1.19 $input=x_windows.19 $shape=3617 #x_windows.19=(36,8,8,192)f32 #x1.19=(36,64,192)f32 nn.Linear layers_dfe.1.residual_group.blocks.2.attn.qkv 1 1 x1.19 3649 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.19=(36,64,192)f32 #3649=(36,64,576)f32 pnnx.Expression pnnx_expr_16232 1 1 x1.19 3652 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.19=(36,64,192)f32 pnnx.Expression pnnx_expr_16227 0 1 3654 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_450 2 1 3649 3652 3653 $input=3649 $shape=3652 #3649=(36,64,576)f32 #3653=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_16226 0 1 20555 expr=0 pnnx.Expression pnnx_expr_16225 0 1 20556 expr=0 pnnx.Expression pnnx_expr_16224 0 1 20557 expr=0 pnnx.Expression pnnx_expr_16223 0 1 20558 expr=1 pnnx.Expression pnnx_expr_16222 0 1 20559 expr=0 pnnx.Expression pnnx_expr_16221 0 1 20560 expr=2 torch.permute torch.permute_2561 2 1 3653 3654 qkv0.19 $input=3653 $dims=3654 #3653=(36,64,3,6,32)f32 #qkv0.19=(3,36,6,64,32)f32 Tensor.select Tensor.select_674 3 1 qkv0.19 20555 20556 q.19 $input=qkv0.19 $dim=20555 $index=20556 #qkv0.19=(3,36,6,64,32)f32 #q.19=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16220 1 1 q.19 q0.19 expr=mul(@0,1.767767e-01) #q.19=(36,6,64,32)f32 #q0.19=(36,6,64,32)f32 Tensor.select Tensor.select_675 3 1 qkv0.19 20557 20558 k.19 $input=qkv0.19 $dim=20557 $index=20558 #qkv0.19=(3,36,6,64,32)f32 #k.19=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16218 0 1 3662 expr=[-1] Tensor.view Tensor.view_1067 2 1 relative_position_index.19 3662 3663 $input=relative_position_index.19 $shape=3662 #relative_position_index.19=(64,64)i64 #3663=(4096)i64 pnnx.Expression pnnx_expr_16217 1 1 3663 3664 expr=[@0] #3663=(4096)i64 pnnx.Expression pnnx_expr_16214 0 1 3666 expr=[64,64,-1] Tensor.index Tensor.index_334 2 1 relative_position_bias_table.19 3664 3665 $input=relative_position_bias_table.19 $expr=3664 #relative_position_bias_table.19=(225,6)f32 #3665=(4096,6)f32 pnnx.Expression pnnx_expr_16210 0 1 3668 expr=[2,0,1] Tensor.view Tensor.view_1068 2 1 3665 3666 relative_position_bias.19 $input=3665 $shape=3666 #3665=(4096,6)f32 #relative_position_bias.19=(64,64,6)f32 pnnx.Expression pnnx_expr_16209 0 1 20568 expr=0 torch.permute torch.permute_2562 2 1 relative_position_bias.19 3668 3669 $input=relative_position_bias.19 $dims=3668 #relative_position_bias.19=(64,64,6)f32 #3669=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_27 1 1 3669 relative_position_bias0.19 memory_format=torch.contiguous_format $input=3669 #3669=(6,64,64)f32 #relative_position_bias0.19=(6,64,64)f32 torch.transpose torch.transpose_2979 3 1 k.19 3628 3629 3660 $input=k.19 $dim0=3628 $dim1=3629 #k.19=(36,6,64,32)f32 #3660=(36,6,32,64)f32 torch.matmul torch.matmul_2220 2 1 q0.19 3660 attn.39 $input=q0.19 $other=3660 #q0.19=(36,6,64,32)f32 #3660=(36,6,32,64)f32 #attn.39=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3234 2 1 relative_position_bias0.19 20568 3671 $input=relative_position_bias0.19 $dim=20568 #relative_position_bias0.19=(6,64,64)f32 #3671=(1,6,64,64)f32 pnnx.Expression pnnx_expr_16207 2 1 attn.39 3671 input.45 expr=add(@0,@1) #attn.39=(36,6,64,64)f32 #3671=(1,6,64,64)f32 #input.45=(36,6,64,64)f32 nn.Softmax layers_dfe.1.residual_group.blocks.2.attn.softmax 1 1 input.45 3673 dim=-1 #input.45=(36,6,64,64)f32 #3673=(36,6,64,64)f32 nn.Dropout layers_dfe.1.residual_group.blocks.2.attn.attn_drop 1 1 3673 3674 #3673=(36,6,64,64)f32 #3674=(36,6,64,64)f32 Tensor.select Tensor.select_676 3 1 qkv0.19 20559 20560 v.19 $input=qkv0.19 $dim=20559 $index=20560 #qkv0.19=(3,36,6,64,32)f32 #v.19=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16206 0 1 20570 expr=1 pnnx.Expression pnnx_expr_16205 0 1 20571 expr=2 torch.matmul torch.matmul_2221 2 1 3674 v.19 3675 $input=3674 $other=v.19 #3674=(36,6,64,64)f32 #v.19=(36,6,64,32)f32 #3675=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16204 1 1 x1.19 3677 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.19=(36,64,192)f32 torch.transpose torch.transpose_2980 3 1 3675 20570 20571 3676 $input=3675 $dim0=20570 $dim1=20571 #3675=(36,6,64,32)f32 #3676=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_451 2 1 3676 3677 input0.21 $input=3676 $shape=3677 #3676=(36,64,6,32)f32 #input0.21=(36,64,192)f32 nn.Linear layers_dfe.1.residual_group.blocks.2.attn.proj 1 1 input0.21 3679 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.21=(36,64,192)f32 #3679=(36,64,192)f32 nn.Dropout layers_dfe.1.residual_group.blocks.2.attn.proj_drop 1 1 3679 3680 #3679=(36,64,192)f32 #3680=(36,64,192)f32 pnnx.Expression pnnx_expr_16200 1 1 3565 3681 expr=[-1,8,8,int(size(@0,2))] #3565=(1,2304,192)f32 pnnx.Expression pnnx_expr_16187 1 1 154 3687 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_16180 0 1 3689 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1069 2 1 3680 3681 windows.19 $input=3680 $shape=3681 #3680=(36,64,192)f32 #windows.19=(36,8,8,192)f32 Tensor.view Tensor.view_1070 2 1 windows.19 3687 x2.19 $input=windows.19 $shape=3687 #windows.19=(36,8,8,192)f32 #x2.19=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_16177 1 1 154 3692 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2563 2 1 x2.19 3689 3690 $input=x2.19 $dims=3689 #x2.19=(1,6,6,8,8,192)f32 #3690=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_28 1 1 3690 3691 memory_format=torch.contiguous_format $input=3690 #3690=(1,6,8,6,8,192)f32 #3691=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_16174 2 1 3565 154 3696 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #3565=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1071 2 1 3691 3692 x3.19 $input=3691 $shape=3692 #3691=(1,6,8,6,8,192)f32 #x3.19=(1,48,48,192)f32 Tensor.view Tensor.view_1072 2 1 x3.19 3696 x4.19 $input=x3.19 $shape=3696 #x3.19=(1,48,48,192)f32 #x4.19=(1,2304,192)f32 pnnx.Expression pnnx_expr_16172 2 1 3565 x4.19 input.47 expr=add(@0,@1) #3565=(1,2304,192)f32 #x4.19=(1,2304,192)f32 #input.47=(1,2304,192)f32 nn.LayerNorm layers_dfe.1.residual_group.blocks.2.norm2 1 1 input.47 3700 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.47=(1,2304,192)f32 #3700=(1,2304,192)f32 nn.Linear layers_dfe.1.residual_group.blocks.2.mlp.fc1 1 1 3700 3705 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #3700=(1,2304,192)f32 #3705=(1,2304,384)f32 nn.GELU layers_dfe.1.residual_group.blocks.2.mlp.act 1 1 3705 3706 #3705=(1,2304,384)f32 #3706=(1,2304,384)f32 nn.Dropout layers_dfe.1.residual_group.blocks.2.mlp.drop 1 1 3706 3707 #3706=(1,2304,384)f32 #3707=(1,2304,384)f32 nn.Linear layers_dfe.1.residual_group.blocks.2.mlp.fc2 1 1 3707 3708 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #3707=(1,2304,384)f32 #3708=(1,2304,192)f32 nn.Dropout pnnx_unique_18 1 1 3708 3709 #3708=(1,2304,192)f32 #3709=(1,2304,192)f32 pnnx.Expression pnnx_expr_16170 2 1 input.47 3709 3711 expr=add(@0,@1) #input.47=(1,2304,192)f32 #3709=(1,2304,192)f32 #3711=(1,2304,192)f32 pnnx.Attribute layers_dfe.1.residual_group.blocks.3 0 1 attn_mask.11 @attn_mask=(36,64,64)f32 #attn_mask.11=(36,64,64)f32 nn.LayerNorm layers_dfe.1.residual_group.blocks.3.norm1 1 1 3711 3740 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #3711=(1,2304,192)f32 #3740=(1,2304,192)f32 pnnx.Expression pnnx_expr_16147 2 1 3711 154 3741 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #3711=(1,2304,192)f32 #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_16145 0 1 3743 expr=[-4,-4] pnnx.Expression pnnx_expr_16143 0 1 3744 expr=[1,2] Tensor.view Tensor.view_1073 2 1 3740 3741 x.21 $input=3740 $shape=3741 #3740=(1,2304,192)f32 #x.21=(1,48,48,192)f32 torch.roll torch.roll_2428 3 1 x.21 3743 3744 x0.21 $input=x.21 $shifts=3743 $dims=3744 #x.21=(1,48,48,192)f32 #x0.21=(1,48,48,192)f32 pnnx.Expression pnnx_expr_16121 1 1 x0.21 3761 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.21=(1,48,48,192)f32 pnnx.Expression pnnx_expr_16116 0 1 3763 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1074 2 1 x0.21 3761 x1.21 $input=x0.21 $shape=3761 #x0.21=(1,48,48,192)f32 #x1.21=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_16113 1 1 x0.21 3766 expr=[-1,8,8,int(size(@0,3))] #x0.21=(1,48,48,192)f32 torch.permute torch.permute_2564 2 1 x1.21 3763 3764 $input=x1.21 $dims=3763 #x1.21=(1,6,8,6,8,192)f32 #3764=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_29 1 1 3764 3765 memory_format=torch.contiguous_format $input=3764 #3764=(1,6,6,8,8,192)f32 #3765=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_16111 1 1 3711 3768 expr=[-1,64,int(size(@0,2))] #3711=(1,2304,192)f32 pnnx.Expression pnnx_expr_16101 0 1 3779 expr=-2 pnnx.Expression pnnx_expr_16100 0 1 3780 expr=-1 pnnx.Attribute layers_dfe.1.residual_group.blocks.3.attn 0 1 relative_position_bias_table.21 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.21=(225,6)f32 pnnx.Attribute pnnx_unique_19 0 1 relative_position_index.21 @relative_position_index=(64,64)i64 #relative_position_index.21=(64,64)i64 Tensor.view Tensor.view_1075 2 1 3765 3766 x_windows.21 $input=3765 $shape=3766 #3765=(1,6,6,8,8,192)f32 #x_windows.21=(36,8,8,192)f32 Tensor.view Tensor.view_1076 2 1 x_windows.21 3768 x2.21 $input=x_windows.21 $shape=3768 #x_windows.21=(36,8,8,192)f32 #x2.21=(36,64,192)f32 nn.Linear layers_dfe.1.residual_group.blocks.3.attn.qkv 1 1 x2.21 3804 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.21=(36,64,192)f32 #3804=(36,64,576)f32 pnnx.Expression pnnx_expr_16081 1 1 x2.21 3807 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.21=(36,64,192)f32 pnnx.Expression pnnx_expr_16076 0 1 3809 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_452 2 1 3804 3807 3808 $input=3804 $shape=3807 #3804=(36,64,576)f32 #3808=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_16075 0 1 20614 expr=0 pnnx.Expression pnnx_expr_16074 0 1 20615 expr=0 pnnx.Expression pnnx_expr_16073 0 1 20616 expr=0 pnnx.Expression pnnx_expr_16072 0 1 20617 expr=1 pnnx.Expression pnnx_expr_16071 0 1 20618 expr=0 pnnx.Expression pnnx_expr_16070 0 1 20619 expr=2 torch.permute torch.permute_2565 2 1 3808 3809 qkv0.21 $input=3808 $dims=3809 #3808=(36,64,3,6,32)f32 #qkv0.21=(3,36,6,64,32)f32 Tensor.select Tensor.select_677 3 1 qkv0.21 20614 20615 q.21 $input=qkv0.21 $dim=20614 $index=20615 #qkv0.21=(3,36,6,64,32)f32 #q.21=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16069 1 1 q.21 q0.21 expr=mul(@0,1.767767e-01) #q.21=(36,6,64,32)f32 #q0.21=(36,6,64,32)f32 Tensor.select Tensor.select_678 3 1 qkv0.21 20616 20617 k.21 $input=qkv0.21 $dim=20616 $index=20617 #qkv0.21=(3,36,6,64,32)f32 #k.21=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16067 0 1 3817 expr=[-1] Tensor.view Tensor.view_1077 2 1 relative_position_index.21 3817 3818 $input=relative_position_index.21 $shape=3817 #relative_position_index.21=(64,64)i64 #3818=(4096)i64 pnnx.Expression pnnx_expr_16066 1 1 3818 3819 expr=[@0] #3818=(4096)i64 pnnx.Expression pnnx_expr_16063 0 1 3821 expr=[64,64,-1] Tensor.index Tensor.index_335 2 1 relative_position_bias_table.21 3819 3820 $input=relative_position_bias_table.21 $expr=3819 #relative_position_bias_table.21=(225,6)f32 #3820=(4096,6)f32 pnnx.Expression pnnx_expr_16059 0 1 3823 expr=[2,0,1] Tensor.view Tensor.view_1078 2 1 3820 3821 relative_position_bias.21 $input=3820 $shape=3821 #3820=(4096,6)f32 #relative_position_bias.21=(64,64,6)f32 pnnx.Expression pnnx_expr_16058 0 1 20627 expr=0 torch.permute torch.permute_2566 2 1 relative_position_bias.21 3823 3824 $input=relative_position_bias.21 $dims=3823 #relative_position_bias.21=(64,64,6)f32 #3824=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_30 1 1 3824 relative_position_bias0.21 memory_format=torch.contiguous_format $input=3824 #3824=(6,64,64)f32 #relative_position_bias0.21=(6,64,64)f32 torch.transpose torch.transpose_2981 3 1 k.21 3779 3780 3815 $input=k.21 $dim0=3779 $dim1=3780 #k.21=(36,6,64,32)f32 #3815=(36,6,32,64)f32 torch.matmul torch.matmul_2222 2 1 q0.21 3815 attn.43 $input=q0.21 $other=3815 #q0.21=(36,6,64,32)f32 #3815=(36,6,32,64)f32 #attn.43=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3235 2 1 relative_position_bias0.21 20627 3826 $input=relative_position_bias0.21 $dim=20627 #relative_position_bias0.21=(6,64,64)f32 #3826=(1,6,64,64)f32 pnnx.Expression pnnx_expr_16056 2 1 attn.43 3826 attn0.11 expr=add(@0,@1) #attn.43=(36,6,64,64)f32 #3826=(1,6,64,64)f32 #attn0.11=(36,6,64,64)f32 pnnx.Expression pnnx_expr_16047 2 1 x2.21 attn_mask.11 3833 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.21=(36,64,192)f32 #attn_mask.11=(36,64,64)f32 pnnx.Expression pnnx_expr_16046 0 1 20632 expr=1 pnnx.Expression pnnx_expr_16045 0 1 20633 expr=0 Tensor.view Tensor.view_1079 2 1 attn0.11 3833 3834 $input=attn0.11 $shape=3833 #attn0.11=(36,6,64,64)f32 #3834=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3236 2 1 attn_mask.11 20632 3835 $input=attn_mask.11 $dim=20632 #attn_mask.11=(36,64,64)f32 #3835=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3237 2 1 3835 20633 3836 $input=3835 $dim=20633 #3835=(36,1,64,64)f32 #3836=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_16043 2 1 3834 3836 attn1.11 expr=add(@0,@1) #3834=(1,36,6,64,64)f32 #3836=(1,36,1,64,64)f32 #attn1.11=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_16040 1 1 x2.21 3838 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.21=(36,64,192)f32 Tensor.view Tensor.view_1080 2 1 attn1.11 3838 input.49 $input=attn1.11 $shape=3838 #attn1.11=(1,36,6,64,64)f32 #input.49=(36,6,64,64)f32 nn.Softmax layers_dfe.1.residual_group.blocks.3.attn.softmax 1 1 input.49 3840 dim=-1 #input.49=(36,6,64,64)f32 #3840=(36,6,64,64)f32 nn.Dropout layers_dfe.1.residual_group.blocks.3.attn.attn_drop 1 1 3840 3841 #3840=(36,6,64,64)f32 #3841=(36,6,64,64)f32 Tensor.select Tensor.select_679 3 1 qkv0.21 20618 20619 v.21 $input=qkv0.21 $dim=20618 $index=20619 #qkv0.21=(3,36,6,64,32)f32 #v.21=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16039 0 1 20637 expr=1 pnnx.Expression pnnx_expr_16038 0 1 20638 expr=2 torch.matmul torch.matmul_2223 2 1 3841 v.21 3842 $input=3841 $other=v.21 #3841=(36,6,64,64)f32 #v.21=(36,6,64,32)f32 #3842=(36,6,64,32)f32 pnnx.Expression pnnx_expr_16037 1 1 x2.21 3844 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.21=(36,64,192)f32 torch.transpose torch.transpose_2982 3 1 3842 20637 20638 3843 $input=3842 $dim0=20637 $dim1=20638 #3842=(36,6,64,32)f32 #3843=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_453 2 1 3843 3844 input0.23 $input=3843 $shape=3844 #3843=(36,64,6,32)f32 #input0.23=(36,64,192)f32 nn.Linear layers_dfe.1.residual_group.blocks.3.attn.proj 1 1 input0.23 3846 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.23=(36,64,192)f32 #3846=(36,64,192)f32 nn.Dropout layers_dfe.1.residual_group.blocks.3.attn.proj_drop 1 1 3846 3847 #3846=(36,64,192)f32 #3847=(36,64,192)f32 pnnx.Expression pnnx_expr_16033 1 1 3711 3848 expr=[-1,8,8,int(size(@0,2))] #3711=(1,2304,192)f32 pnnx.Expression pnnx_expr_16020 1 1 154 3854 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_16013 0 1 3856 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1081 2 1 3847 3848 windows.21 $input=3847 $shape=3848 #3847=(36,64,192)f32 #windows.21=(36,8,8,192)f32 Tensor.view Tensor.view_1082 2 1 windows.21 3854 x3.21 $input=windows.21 $shape=3854 #windows.21=(36,8,8,192)f32 #x3.21=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_16010 1 1 154 3859 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2567 2 1 x3.21 3856 3857 $input=x3.21 $dims=3856 #x3.21=(1,6,6,8,8,192)f32 #3857=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_31 1 1 3857 3858 memory_format=torch.contiguous_format $input=3857 #3857=(1,6,8,6,8,192)f32 #3858=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_16007 0 1 3861 expr=[4,4] pnnx.Expression pnnx_expr_16004 0 1 3862 expr=[1,2] Tensor.view Tensor.view_1083 2 1 3858 3859 shifted_x.11 $input=3858 $shape=3859 #3858=(1,6,8,6,8,192)f32 #shifted_x.11=(1,48,48,192)f32 pnnx.Expression pnnx_expr_16001 2 1 3711 154 3866 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #3711=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.roll torch.roll_2429 3 1 shifted_x.11 3861 3862 x4.21 $input=shifted_x.11 $shifts=3861 $dims=3862 #shifted_x.11=(1,48,48,192)f32 #x4.21=(1,48,48,192)f32 Tensor.view Tensor.view_1084 2 1 x4.21 3866 x5.11 $input=x4.21 $shape=3866 #x4.21=(1,48,48,192)f32 #x5.11=(1,2304,192)f32 pnnx.Expression pnnx_expr_15999 2 1 3711 x5.11 input.51 expr=add(@0,@1) #3711=(1,2304,192)f32 #x5.11=(1,2304,192)f32 #input.51=(1,2304,192)f32 nn.LayerNorm layers_dfe.1.residual_group.blocks.3.norm2 1 1 input.51 3870 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.51=(1,2304,192)f32 #3870=(1,2304,192)f32 nn.Linear layers_dfe.1.residual_group.blocks.3.mlp.fc1 1 1 3870 3875 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #3870=(1,2304,192)f32 #3875=(1,2304,384)f32 nn.GELU layers_dfe.1.residual_group.blocks.3.mlp.act 1 1 3875 3876 #3875=(1,2304,384)f32 #3876=(1,2304,384)f32 nn.Dropout layers_dfe.1.residual_group.blocks.3.mlp.drop 1 1 3876 3877 #3876=(1,2304,384)f32 #3877=(1,2304,384)f32 nn.Linear layers_dfe.1.residual_group.blocks.3.mlp.fc2 1 1 3877 3878 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #3877=(1,2304,384)f32 #3878=(1,2304,192)f32 nn.Dropout pnnx_unique_20 1 1 3878 3879 #3878=(1,2304,192)f32 #3879=(1,2304,192)f32 pnnx.Expression pnnx_expr_15997 2 1 input.51 3879 3881 expr=add(@0,@1) #input.51=(1,2304,192)f32 #3879=(1,2304,192)f32 #3881=(1,2304,192)f32 nn.LayerNorm layers_dfe.1.residual_group.blocks.4.norm1 1 1 3881 3908 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #3881=(1,2304,192)f32 #3908=(1,2304,192)f32 pnnx.Expression pnnx_expr_15975 2 1 3881 154 3909 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #3881=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1085 2 1 3908 3909 x.23 $input=3908 $shape=3909 #3908=(1,2304,192)f32 #x.23=(1,48,48,192)f32 pnnx.Expression pnnx_expr_15954 1 1 x.23 3926 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.23=(1,48,48,192)f32 pnnx.Expression pnnx_expr_15949 0 1 3928 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1086 2 1 x.23 3926 x0.23 $input=x.23 $shape=3926 #x.23=(1,48,48,192)f32 #x0.23=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_15946 1 1 x.23 3931 expr=[-1,8,8,int(size(@0,3))] #x.23=(1,48,48,192)f32 torch.permute torch.permute_2568 2 1 x0.23 3928 3929 $input=x0.23 $dims=3928 #x0.23=(1,6,8,6,8,192)f32 #3929=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_32 1 1 3929 3930 memory_format=torch.contiguous_format $input=3929 #3929=(1,6,6,8,8,192)f32 #3930=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_15944 1 1 3881 3933 expr=[-1,64,int(size(@0,2))] #3881=(1,2304,192)f32 pnnx.Expression pnnx_expr_15934 0 1 3944 expr=-2 pnnx.Expression pnnx_expr_15933 0 1 3945 expr=-1 pnnx.Attribute layers_dfe.1.residual_group.blocks.4.attn 0 1 relative_position_bias_table.23 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.23=(225,6)f32 pnnx.Attribute pnnx_unique_21 0 1 relative_position_index.23 @relative_position_index=(64,64)i64 #relative_position_index.23=(64,64)i64 Tensor.view Tensor.view_1087 2 1 3930 3931 x_windows.23 $input=3930 $shape=3931 #3930=(1,6,6,8,8,192)f32 #x_windows.23=(36,8,8,192)f32 Tensor.view Tensor.view_1088 2 1 x_windows.23 3933 x1.23 $input=x_windows.23 $shape=3933 #x_windows.23=(36,8,8,192)f32 #x1.23=(36,64,192)f32 nn.Linear layers_dfe.1.residual_group.blocks.4.attn.qkv 1 1 x1.23 3965 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.23=(36,64,192)f32 #3965=(36,64,576)f32 pnnx.Expression pnnx_expr_15918 1 1 x1.23 3968 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.23=(36,64,192)f32 pnnx.Expression pnnx_expr_15913 0 1 3970 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_454 2 1 3965 3968 3969 $input=3965 $shape=3968 #3965=(36,64,576)f32 #3969=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_15912 0 1 20682 expr=0 pnnx.Expression pnnx_expr_15911 0 1 20683 expr=0 pnnx.Expression pnnx_expr_15910 0 1 20684 expr=0 pnnx.Expression pnnx_expr_15909 0 1 20685 expr=1 pnnx.Expression pnnx_expr_15908 0 1 20686 expr=0 pnnx.Expression pnnx_expr_15907 0 1 20687 expr=2 torch.permute torch.permute_2569 2 1 3969 3970 qkv0.23 $input=3969 $dims=3970 #3969=(36,64,3,6,32)f32 #qkv0.23=(3,36,6,64,32)f32 Tensor.select Tensor.select_680 3 1 qkv0.23 20682 20683 q.23 $input=qkv0.23 $dim=20682 $index=20683 #qkv0.23=(3,36,6,64,32)f32 #q.23=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15906 1 1 q.23 q0.23 expr=mul(@0,1.767767e-01) #q.23=(36,6,64,32)f32 #q0.23=(36,6,64,32)f32 Tensor.select Tensor.select_681 3 1 qkv0.23 20684 20685 k.23 $input=qkv0.23 $dim=20684 $index=20685 #qkv0.23=(3,36,6,64,32)f32 #k.23=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15904 0 1 3978 expr=[-1] Tensor.view Tensor.view_1089 2 1 relative_position_index.23 3978 3979 $input=relative_position_index.23 $shape=3978 #relative_position_index.23=(64,64)i64 #3979=(4096)i64 pnnx.Expression pnnx_expr_15903 1 1 3979 3980 expr=[@0] #3979=(4096)i64 pnnx.Expression pnnx_expr_15900 0 1 3982 expr=[64,64,-1] Tensor.index Tensor.index_336 2 1 relative_position_bias_table.23 3980 3981 $input=relative_position_bias_table.23 $expr=3980 #relative_position_bias_table.23=(225,6)f32 #3981=(4096,6)f32 pnnx.Expression pnnx_expr_15896 0 1 3984 expr=[2,0,1] Tensor.view Tensor.view_1090 2 1 3981 3982 relative_position_bias.23 $input=3981 $shape=3982 #3981=(4096,6)f32 #relative_position_bias.23=(64,64,6)f32 pnnx.Expression pnnx_expr_15895 0 1 20695 expr=0 torch.permute torch.permute_2570 2 1 relative_position_bias.23 3984 3985 $input=relative_position_bias.23 $dims=3984 #relative_position_bias.23=(64,64,6)f32 #3985=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_33 1 1 3985 relative_position_bias0.23 memory_format=torch.contiguous_format $input=3985 #3985=(6,64,64)f32 #relative_position_bias0.23=(6,64,64)f32 torch.transpose torch.transpose_2983 3 1 k.23 3944 3945 3976 $input=k.23 $dim0=3944 $dim1=3945 #k.23=(36,6,64,32)f32 #3976=(36,6,32,64)f32 torch.matmul torch.matmul_2224 2 1 q0.23 3976 attn.47 $input=q0.23 $other=3976 #q0.23=(36,6,64,32)f32 #3976=(36,6,32,64)f32 #attn.47=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3238 2 1 relative_position_bias0.23 20695 3987 $input=relative_position_bias0.23 $dim=20695 #relative_position_bias0.23=(6,64,64)f32 #3987=(1,6,64,64)f32 pnnx.Expression pnnx_expr_15893 2 1 attn.47 3987 input.53 expr=add(@0,@1) #attn.47=(36,6,64,64)f32 #3987=(1,6,64,64)f32 #input.53=(36,6,64,64)f32 nn.Softmax layers_dfe.1.residual_group.blocks.4.attn.softmax 1 1 input.53 3989 dim=-1 #input.53=(36,6,64,64)f32 #3989=(36,6,64,64)f32 nn.Dropout layers_dfe.1.residual_group.blocks.4.attn.attn_drop 1 1 3989 3990 #3989=(36,6,64,64)f32 #3990=(36,6,64,64)f32 Tensor.select Tensor.select_682 3 1 qkv0.23 20686 20687 v.23 $input=qkv0.23 $dim=20686 $index=20687 #qkv0.23=(3,36,6,64,32)f32 #v.23=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15892 0 1 20697 expr=1 pnnx.Expression pnnx_expr_15891 0 1 20698 expr=2 torch.matmul torch.matmul_2225 2 1 3990 v.23 3991 $input=3990 $other=v.23 #3990=(36,6,64,64)f32 #v.23=(36,6,64,32)f32 #3991=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15890 1 1 x1.23 3993 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.23=(36,64,192)f32 torch.transpose torch.transpose_2984 3 1 3991 20697 20698 3992 $input=3991 $dim0=20697 $dim1=20698 #3991=(36,6,64,32)f32 #3992=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_455 2 1 3992 3993 input0.25 $input=3992 $shape=3993 #3992=(36,64,6,32)f32 #input0.25=(36,64,192)f32 nn.Linear layers_dfe.1.residual_group.blocks.4.attn.proj 1 1 input0.25 3995 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.25=(36,64,192)f32 #3995=(36,64,192)f32 nn.Dropout layers_dfe.1.residual_group.blocks.4.attn.proj_drop 1 1 3995 3996 #3995=(36,64,192)f32 #3996=(36,64,192)f32 pnnx.Expression pnnx_expr_15886 1 1 3881 3997 expr=[-1,8,8,int(size(@0,2))] #3881=(1,2304,192)f32 pnnx.Expression pnnx_expr_15873 1 1 154 4003 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_15866 0 1 4005 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1091 2 1 3996 3997 windows.23 $input=3996 $shape=3997 #3996=(36,64,192)f32 #windows.23=(36,8,8,192)f32 Tensor.view Tensor.view_1092 2 1 windows.23 4003 x2.23 $input=windows.23 $shape=4003 #windows.23=(36,8,8,192)f32 #x2.23=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_15863 1 1 154 4008 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2571 2 1 x2.23 4005 4006 $input=x2.23 $dims=4005 #x2.23=(1,6,6,8,8,192)f32 #4006=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_34 1 1 4006 4007 memory_format=torch.contiguous_format $input=4006 #4006=(1,6,8,6,8,192)f32 #4007=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_15860 2 1 3881 154 4012 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #3881=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1093 2 1 4007 4008 x3.23 $input=4007 $shape=4008 #4007=(1,6,8,6,8,192)f32 #x3.23=(1,48,48,192)f32 Tensor.view Tensor.view_1094 2 1 x3.23 4012 x4.23 $input=x3.23 $shape=4012 #x3.23=(1,48,48,192)f32 #x4.23=(1,2304,192)f32 pnnx.Expression pnnx_expr_15858 2 1 3881 x4.23 input.55 expr=add(@0,@1) #3881=(1,2304,192)f32 #x4.23=(1,2304,192)f32 #input.55=(1,2304,192)f32 nn.LayerNorm layers_dfe.1.residual_group.blocks.4.norm2 1 1 input.55 4016 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.55=(1,2304,192)f32 #4016=(1,2304,192)f32 nn.Linear layers_dfe.1.residual_group.blocks.4.mlp.fc1 1 1 4016 4021 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #4016=(1,2304,192)f32 #4021=(1,2304,384)f32 nn.GELU layers_dfe.1.residual_group.blocks.4.mlp.act 1 1 4021 4022 #4021=(1,2304,384)f32 #4022=(1,2304,384)f32 nn.Dropout layers_dfe.1.residual_group.blocks.4.mlp.drop 1 1 4022 4023 #4022=(1,2304,384)f32 #4023=(1,2304,384)f32 nn.Linear layers_dfe.1.residual_group.blocks.4.mlp.fc2 1 1 4023 4024 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #4023=(1,2304,384)f32 #4024=(1,2304,192)f32 nn.Dropout pnnx_unique_22 1 1 4024 4025 #4024=(1,2304,192)f32 #4025=(1,2304,192)f32 pnnx.Expression pnnx_expr_15856 2 1 input.55 4025 4027 expr=add(@0,@1) #input.55=(1,2304,192)f32 #4025=(1,2304,192)f32 #4027=(1,2304,192)f32 pnnx.Attribute layers_dfe.1.residual_group.blocks.5 0 1 attn_mask.13 @attn_mask=(36,64,64)f32 #attn_mask.13=(36,64,64)f32 nn.LayerNorm layers_dfe.1.residual_group.blocks.5.norm1 1 1 4027 4056 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #4027=(1,2304,192)f32 #4056=(1,2304,192)f32 pnnx.Expression pnnx_expr_15833 2 1 4027 154 4057 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #4027=(1,2304,192)f32 #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_15831 0 1 4059 expr=[-4,-4] pnnx.Expression pnnx_expr_15829 0 1 4060 expr=[1,2] Tensor.view Tensor.view_1095 2 1 4056 4057 x.25 $input=4056 $shape=4057 #4056=(1,2304,192)f32 #x.25=(1,48,48,192)f32 torch.roll torch.roll_2430 3 1 x.25 4059 4060 x0.25 $input=x.25 $shifts=4059 $dims=4060 #x.25=(1,48,48,192)f32 #x0.25=(1,48,48,192)f32 pnnx.Expression pnnx_expr_15807 1 1 x0.25 4077 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.25=(1,48,48,192)f32 pnnx.Expression pnnx_expr_15802 0 1 4079 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1096 2 1 x0.25 4077 x1.25 $input=x0.25 $shape=4077 #x0.25=(1,48,48,192)f32 #x1.25=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_15799 1 1 x0.25 4082 expr=[-1,8,8,int(size(@0,3))] #x0.25=(1,48,48,192)f32 torch.permute torch.permute_2572 2 1 x1.25 4079 4080 $input=x1.25 $dims=4079 #x1.25=(1,6,8,6,8,192)f32 #4080=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_35 1 1 4080 4081 memory_format=torch.contiguous_format $input=4080 #4080=(1,6,6,8,8,192)f32 #4081=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_15797 1 1 4027 4084 expr=[-1,64,int(size(@0,2))] #4027=(1,2304,192)f32 pnnx.Expression pnnx_expr_15787 0 1 4095 expr=-2 pnnx.Expression pnnx_expr_15786 0 1 4096 expr=-1 pnnx.Attribute layers_dfe.1.residual_group.blocks.5.attn 0 1 relative_position_bias_table.25 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.25=(225,6)f32 pnnx.Attribute pnnx_unique_23 0 1 relative_position_index.25 @relative_position_index=(64,64)i64 #relative_position_index.25=(64,64)i64 Tensor.view Tensor.view_1097 2 1 4081 4082 x_windows.25 $input=4081 $shape=4082 #4081=(1,6,6,8,8,192)f32 #x_windows.25=(36,8,8,192)f32 Tensor.view Tensor.view_1098 2 1 x_windows.25 4084 x2.25 $input=x_windows.25 $shape=4084 #x_windows.25=(36,8,8,192)f32 #x2.25=(36,64,192)f32 nn.Linear layers_dfe.1.residual_group.blocks.5.attn.qkv 1 1 x2.25 4120 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.25=(36,64,192)f32 #4120=(36,64,576)f32 pnnx.Expression pnnx_expr_15767 1 1 x2.25 4123 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.25=(36,64,192)f32 pnnx.Expression pnnx_expr_15762 0 1 4125 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_456 2 1 4120 4123 4124 $input=4120 $shape=4123 #4120=(36,64,576)f32 #4124=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_15761 0 1 20741 expr=0 pnnx.Expression pnnx_expr_15760 0 1 20742 expr=0 pnnx.Expression pnnx_expr_15759 0 1 20743 expr=0 pnnx.Expression pnnx_expr_15758 0 1 20744 expr=1 pnnx.Expression pnnx_expr_15757 0 1 20745 expr=0 pnnx.Expression pnnx_expr_15756 0 1 20746 expr=2 torch.permute torch.permute_2573 2 1 4124 4125 qkv0.25 $input=4124 $dims=4125 #4124=(36,64,3,6,32)f32 #qkv0.25=(3,36,6,64,32)f32 Tensor.select Tensor.select_683 3 1 qkv0.25 20741 20742 q.25 $input=qkv0.25 $dim=20741 $index=20742 #qkv0.25=(3,36,6,64,32)f32 #q.25=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15755 1 1 q.25 q0.25 expr=mul(@0,1.767767e-01) #q.25=(36,6,64,32)f32 #q0.25=(36,6,64,32)f32 Tensor.select Tensor.select_684 3 1 qkv0.25 20743 20744 k.25 $input=qkv0.25 $dim=20743 $index=20744 #qkv0.25=(3,36,6,64,32)f32 #k.25=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15753 0 1 4133 expr=[-1] Tensor.view Tensor.view_1099 2 1 relative_position_index.25 4133 4134 $input=relative_position_index.25 $shape=4133 #relative_position_index.25=(64,64)i64 #4134=(4096)i64 pnnx.Expression pnnx_expr_15752 1 1 4134 4135 expr=[@0] #4134=(4096)i64 pnnx.Expression pnnx_expr_15749 0 1 4137 expr=[64,64,-1] Tensor.index Tensor.index_337 2 1 relative_position_bias_table.25 4135 4136 $input=relative_position_bias_table.25 $expr=4135 #relative_position_bias_table.25=(225,6)f32 #4136=(4096,6)f32 pnnx.Expression pnnx_expr_15745 0 1 4139 expr=[2,0,1] Tensor.view Tensor.view_1100 2 1 4136 4137 relative_position_bias.25 $input=4136 $shape=4137 #4136=(4096,6)f32 #relative_position_bias.25=(64,64,6)f32 pnnx.Expression pnnx_expr_15744 0 1 20754 expr=0 torch.permute torch.permute_2574 2 1 relative_position_bias.25 4139 4140 $input=relative_position_bias.25 $dims=4139 #relative_position_bias.25=(64,64,6)f32 #4140=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_36 1 1 4140 relative_position_bias0.25 memory_format=torch.contiguous_format $input=4140 #4140=(6,64,64)f32 #relative_position_bias0.25=(6,64,64)f32 torch.transpose torch.transpose_2985 3 1 k.25 4095 4096 4131 $input=k.25 $dim0=4095 $dim1=4096 #k.25=(36,6,64,32)f32 #4131=(36,6,32,64)f32 torch.matmul torch.matmul_2226 2 1 q0.25 4131 attn.51 $input=q0.25 $other=4131 #q0.25=(36,6,64,32)f32 #4131=(36,6,32,64)f32 #attn.51=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3239 2 1 relative_position_bias0.25 20754 4142 $input=relative_position_bias0.25 $dim=20754 #relative_position_bias0.25=(6,64,64)f32 #4142=(1,6,64,64)f32 pnnx.Expression pnnx_expr_15742 2 1 attn.51 4142 attn0.13 expr=add(@0,@1) #attn.51=(36,6,64,64)f32 #4142=(1,6,64,64)f32 #attn0.13=(36,6,64,64)f32 pnnx.Expression pnnx_expr_15733 2 1 x2.25 attn_mask.13 4149 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.25=(36,64,192)f32 #attn_mask.13=(36,64,64)f32 pnnx.Expression pnnx_expr_15732 0 1 20759 expr=1 pnnx.Expression pnnx_expr_15731 0 1 20760 expr=0 Tensor.view Tensor.view_1101 2 1 attn0.13 4149 4150 $input=attn0.13 $shape=4149 #attn0.13=(36,6,64,64)f32 #4150=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3240 2 1 attn_mask.13 20759 4151 $input=attn_mask.13 $dim=20759 #attn_mask.13=(36,64,64)f32 #4151=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3241 2 1 4151 20760 4152 $input=4151 $dim=20760 #4151=(36,1,64,64)f32 #4152=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_15729 2 1 4150 4152 attn1.13 expr=add(@0,@1) #4150=(1,36,6,64,64)f32 #4152=(1,36,1,64,64)f32 #attn1.13=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_15726 1 1 x2.25 4154 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.25=(36,64,192)f32 Tensor.view Tensor.view_1102 2 1 attn1.13 4154 input.57 $input=attn1.13 $shape=4154 #attn1.13=(1,36,6,64,64)f32 #input.57=(36,6,64,64)f32 nn.Softmax layers_dfe.1.residual_group.blocks.5.attn.softmax 1 1 input.57 4156 dim=-1 #input.57=(36,6,64,64)f32 #4156=(36,6,64,64)f32 nn.Dropout layers_dfe.1.residual_group.blocks.5.attn.attn_drop 1 1 4156 4157 #4156=(36,6,64,64)f32 #4157=(36,6,64,64)f32 Tensor.select Tensor.select_685 3 1 qkv0.25 20745 20746 v.25 $input=qkv0.25 $dim=20745 $index=20746 #qkv0.25=(3,36,6,64,32)f32 #v.25=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15725 0 1 20764 expr=1 pnnx.Expression pnnx_expr_15724 0 1 20765 expr=2 torch.matmul torch.matmul_2227 2 1 4157 v.25 4158 $input=4157 $other=v.25 #4157=(36,6,64,64)f32 #v.25=(36,6,64,32)f32 #4158=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15723 1 1 x2.25 4160 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.25=(36,64,192)f32 torch.transpose torch.transpose_2986 3 1 4158 20764 20765 4159 $input=4158 $dim0=20764 $dim1=20765 #4158=(36,6,64,32)f32 #4159=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_457 2 1 4159 4160 input0.27 $input=4159 $shape=4160 #4159=(36,64,6,32)f32 #input0.27=(36,64,192)f32 nn.Linear layers_dfe.1.residual_group.blocks.5.attn.proj 1 1 input0.27 4162 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.27=(36,64,192)f32 #4162=(36,64,192)f32 nn.Dropout layers_dfe.1.residual_group.blocks.5.attn.proj_drop 1 1 4162 4163 #4162=(36,64,192)f32 #4163=(36,64,192)f32 pnnx.Expression pnnx_expr_15719 1 1 4027 4164 expr=[-1,8,8,int(size(@0,2))] #4027=(1,2304,192)f32 pnnx.Expression pnnx_expr_15706 1 1 154 4170 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_15699 0 1 4172 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1103 2 1 4163 4164 windows.25 $input=4163 $shape=4164 #4163=(36,64,192)f32 #windows.25=(36,8,8,192)f32 Tensor.view Tensor.view_1104 2 1 windows.25 4170 x3.25 $input=windows.25 $shape=4170 #windows.25=(36,8,8,192)f32 #x3.25=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_15696 1 1 154 4175 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2575 2 1 x3.25 4172 4173 $input=x3.25 $dims=4172 #x3.25=(1,6,6,8,8,192)f32 #4173=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_37 1 1 4173 4174 memory_format=torch.contiguous_format $input=4173 #4173=(1,6,8,6,8,192)f32 #4174=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_15693 0 1 4177 expr=[4,4] pnnx.Expression pnnx_expr_15690 0 1 4178 expr=[1,2] Tensor.view Tensor.view_1105 2 1 4174 4175 shifted_x.13 $input=4174 $shape=4175 #4174=(1,6,8,6,8,192)f32 #shifted_x.13=(1,48,48,192)f32 pnnx.Expression pnnx_expr_15687 2 1 4027 154 4182 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #4027=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.roll torch.roll_2431 3 1 shifted_x.13 4177 4178 x4.25 $input=shifted_x.13 $shifts=4177 $dims=4178 #shifted_x.13=(1,48,48,192)f32 #x4.25=(1,48,48,192)f32 Tensor.view Tensor.view_1106 2 1 x4.25 4182 x5.13 $input=x4.25 $shape=4182 #x4.25=(1,48,48,192)f32 #x5.13=(1,2304,192)f32 pnnx.Expression pnnx_expr_15685 2 1 4027 x5.13 input.59 expr=add(@0,@1) #4027=(1,2304,192)f32 #x5.13=(1,2304,192)f32 #input.59=(1,2304,192)f32 nn.LayerNorm layers_dfe.1.residual_group.blocks.5.norm2 1 1 input.59 4186 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.59=(1,2304,192)f32 #4186=(1,2304,192)f32 nn.Linear layers_dfe.1.residual_group.blocks.5.mlp.fc1 1 1 4186 4191 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #4186=(1,2304,192)f32 #4191=(1,2304,384)f32 nn.GELU layers_dfe.1.residual_group.blocks.5.mlp.act 1 1 4191 4192 #4191=(1,2304,384)f32 #4192=(1,2304,384)f32 nn.Dropout layers_dfe.1.residual_group.blocks.5.mlp.drop 1 1 4192 4193 #4192=(1,2304,384)f32 #4193=(1,2304,384)f32 nn.Linear layers_dfe.1.residual_group.blocks.5.mlp.fc2 1 1 4193 4194 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #4193=(1,2304,384)f32 #4194=(1,2304,192)f32 nn.Dropout pnnx_unique_24 1 1 4194 4195 #4194=(1,2304,192)f32 #4195=(1,2304,192)f32 pnnx.Expression pnnx_expr_15683 2 1 input.59 4195 4197 expr=add(@0,@1) #input.59=(1,2304,192)f32 #4195=(1,2304,192)f32 #4197=(1,2304,192)f32 pnnx.Expression pnnx_expr_15681 0 1 4199 expr=1 pnnx.Expression pnnx_expr_15680 0 1 4200 expr=2 pnnx.Expression pnnx_expr_15675 2 1 4197 154 4206 expr=[int(size(@0,0)),192,int(size(@1,2)),int(size(@1,3))] #4197=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.transpose torch.transpose_2987 3 1 4197 4199 4200 4205 $input=4197 $dim0=4199 $dim1=4200 #4197=(1,2304,192)f32 #4205=(1,192,2304)f32 Tensor.view Tensor.view_1107 2 1 4205 4206 input.61 $input=4205 $shape=4206 #4205=(1,192,2304)f32 #input.61=(1,192,48,48)f32 nn.Conv2d layers_dfe.1.conv 1 1 input.61 4208 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(192)f32 @weight=(192,192,3,3)f32 #input.61=(1,192,48,48)f32 #4208=(1,192,48,48)f32 pnnx.Expression pnnx_expr_15674 0 1 4209 expr=-1 pnnx.Expression pnnx_expr_15673 0 1 4210 expr=2 pnnx.Expression pnnx_expr_15672 0 1 4211 expr=1 pnnx.Expression pnnx_expr_15671 0 1 20792 expr=2 torch.flatten torch.flatten_2185 3 1 4208 4210 4209 4212 $input=4208 $start_dim=4210 $end_dim=4209 #4208=(1,192,48,48)f32 #4212=(1,192,2304)f32 torch.transpose torch.transpose_2988 3 1 4212 4211 20792 4213 $input=4212 $dim0=4211 $dim1=20792 #4212=(1,192,2304)f32 #4213=(1,2304,192)f32 pnnx.Expression pnnx_expr_15670 2 1 4213 3232 4214 expr=add(@0,@1) #4213=(1,2304,192)f32 #3232=(1,2304,192)f32 #4214=(1,2304,192)f32 nn.LayerNorm layers_dfe.2.residual_group.blocks.0.norm1 1 1 4214 4258 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #4214=(1,2304,192)f32 #4258=(1,2304,192)f32 pnnx.Expression pnnx_expr_15647 2 1 4214 154 4259 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #4214=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1108 2 1 4258 4259 x.27 $input=4258 $shape=4259 #4258=(1,2304,192)f32 #x.27=(1,48,48,192)f32 pnnx.Expression pnnx_expr_15626 1 1 x.27 4276 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.27=(1,48,48,192)f32 pnnx.Expression pnnx_expr_15621 0 1 4278 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1109 2 1 x.27 4276 x0.27 $input=x.27 $shape=4276 #x.27=(1,48,48,192)f32 #x0.27=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_15618 1 1 x.27 4281 expr=[-1,8,8,int(size(@0,3))] #x.27=(1,48,48,192)f32 torch.permute torch.permute_2576 2 1 x0.27 4278 4279 $input=x0.27 $dims=4278 #x0.27=(1,6,8,6,8,192)f32 #4279=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_38 1 1 4279 4280 memory_format=torch.contiguous_format $input=4279 #4279=(1,6,6,8,8,192)f32 #4280=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_15616 1 1 4214 4283 expr=[-1,64,int(size(@0,2))] #4214=(1,2304,192)f32 pnnx.Expression pnnx_expr_15606 0 1 4294 expr=-2 pnnx.Expression pnnx_expr_15605 0 1 4295 expr=-1 pnnx.Attribute layers_dfe.2.residual_group.blocks.0.attn 0 1 relative_position_bias_table.27 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.27=(225,6)f32 pnnx.Attribute pnnx_unique_25 0 1 relative_position_index.27 @relative_position_index=(64,64)i64 #relative_position_index.27=(64,64)i64 Tensor.view Tensor.view_1110 2 1 4280 4281 x_windows.27 $input=4280 $shape=4281 #4280=(1,6,6,8,8,192)f32 #x_windows.27=(36,8,8,192)f32 Tensor.view Tensor.view_1111 2 1 x_windows.27 4283 x1.27 $input=x_windows.27 $shape=4283 #x_windows.27=(36,8,8,192)f32 #x1.27=(36,64,192)f32 nn.Linear layers_dfe.2.residual_group.blocks.0.attn.qkv 1 1 x1.27 4315 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.27=(36,64,192)f32 #4315=(36,64,576)f32 pnnx.Expression pnnx_expr_15590 1 1 x1.27 4318 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.27=(36,64,192)f32 pnnx.Expression pnnx_expr_15585 0 1 4320 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_458 2 1 4315 4318 4319 $input=4315 $shape=4318 #4315=(36,64,576)f32 #4319=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_15584 0 1 20810 expr=0 pnnx.Expression pnnx_expr_15583 0 1 20811 expr=0 pnnx.Expression pnnx_expr_15582 0 1 20812 expr=0 pnnx.Expression pnnx_expr_15581 0 1 20813 expr=1 pnnx.Expression pnnx_expr_15580 0 1 20814 expr=0 pnnx.Expression pnnx_expr_15579 0 1 20815 expr=2 torch.permute torch.permute_2577 2 1 4319 4320 qkv0.27 $input=4319 $dims=4320 #4319=(36,64,3,6,32)f32 #qkv0.27=(3,36,6,64,32)f32 Tensor.select Tensor.select_686 3 1 qkv0.27 20810 20811 q.27 $input=qkv0.27 $dim=20810 $index=20811 #qkv0.27=(3,36,6,64,32)f32 #q.27=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15578 1 1 q.27 q0.27 expr=mul(@0,1.767767e-01) #q.27=(36,6,64,32)f32 #q0.27=(36,6,64,32)f32 Tensor.select Tensor.select_687 3 1 qkv0.27 20812 20813 k.27 $input=qkv0.27 $dim=20812 $index=20813 #qkv0.27=(3,36,6,64,32)f32 #k.27=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15576 0 1 4328 expr=[-1] Tensor.view Tensor.view_1112 2 1 relative_position_index.27 4328 4329 $input=relative_position_index.27 $shape=4328 #relative_position_index.27=(64,64)i64 #4329=(4096)i64 pnnx.Expression pnnx_expr_15575 1 1 4329 4330 expr=[@0] #4329=(4096)i64 pnnx.Expression pnnx_expr_15572 0 1 4332 expr=[64,64,-1] Tensor.index Tensor.index_338 2 1 relative_position_bias_table.27 4330 4331 $input=relative_position_bias_table.27 $expr=4330 #relative_position_bias_table.27=(225,6)f32 #4331=(4096,6)f32 pnnx.Expression pnnx_expr_15568 0 1 4334 expr=[2,0,1] Tensor.view Tensor.view_1113 2 1 4331 4332 relative_position_bias.27 $input=4331 $shape=4332 #4331=(4096,6)f32 #relative_position_bias.27=(64,64,6)f32 pnnx.Expression pnnx_expr_15567 0 1 20823 expr=0 torch.permute torch.permute_2578 2 1 relative_position_bias.27 4334 4335 $input=relative_position_bias.27 $dims=4334 #relative_position_bias.27=(64,64,6)f32 #4335=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_39 1 1 4335 relative_position_bias0.27 memory_format=torch.contiguous_format $input=4335 #4335=(6,64,64)f32 #relative_position_bias0.27=(6,64,64)f32 torch.transpose torch.transpose_2989 3 1 k.27 4294 4295 4326 $input=k.27 $dim0=4294 $dim1=4295 #k.27=(36,6,64,32)f32 #4326=(36,6,32,64)f32 torch.matmul torch.matmul_2228 2 1 q0.27 4326 attn.55 $input=q0.27 $other=4326 #q0.27=(36,6,64,32)f32 #4326=(36,6,32,64)f32 #attn.55=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3242 2 1 relative_position_bias0.27 20823 4337 $input=relative_position_bias0.27 $dim=20823 #relative_position_bias0.27=(6,64,64)f32 #4337=(1,6,64,64)f32 pnnx.Expression pnnx_expr_15565 2 1 attn.55 4337 input.63 expr=add(@0,@1) #attn.55=(36,6,64,64)f32 #4337=(1,6,64,64)f32 #input.63=(36,6,64,64)f32 nn.Softmax layers_dfe.2.residual_group.blocks.0.attn.softmax 1 1 input.63 4339 dim=-1 #input.63=(36,6,64,64)f32 #4339=(36,6,64,64)f32 nn.Dropout layers_dfe.2.residual_group.blocks.0.attn.attn_drop 1 1 4339 4340 #4339=(36,6,64,64)f32 #4340=(36,6,64,64)f32 Tensor.select Tensor.select_688 3 1 qkv0.27 20814 20815 v.27 $input=qkv0.27 $dim=20814 $index=20815 #qkv0.27=(3,36,6,64,32)f32 #v.27=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15564 0 1 20825 expr=1 pnnx.Expression pnnx_expr_15563 0 1 20826 expr=2 torch.matmul torch.matmul_2229 2 1 4340 v.27 4341 $input=4340 $other=v.27 #4340=(36,6,64,64)f32 #v.27=(36,6,64,32)f32 #4341=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15562 1 1 x1.27 4343 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.27=(36,64,192)f32 torch.transpose torch.transpose_2990 3 1 4341 20825 20826 4342 $input=4341 $dim0=20825 $dim1=20826 #4341=(36,6,64,32)f32 #4342=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_459 2 1 4342 4343 input0.29 $input=4342 $shape=4343 #4342=(36,64,6,32)f32 #input0.29=(36,64,192)f32 nn.Linear layers_dfe.2.residual_group.blocks.0.attn.proj 1 1 input0.29 4345 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.29=(36,64,192)f32 #4345=(36,64,192)f32 nn.Dropout layers_dfe.2.residual_group.blocks.0.attn.proj_drop 1 1 4345 4346 #4345=(36,64,192)f32 #4346=(36,64,192)f32 pnnx.Expression pnnx_expr_15558 1 1 4214 4347 expr=[-1,8,8,int(size(@0,2))] #4214=(1,2304,192)f32 pnnx.Expression pnnx_expr_15545 1 1 154 4353 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_15538 0 1 4355 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1114 2 1 4346 4347 windows.27 $input=4346 $shape=4347 #4346=(36,64,192)f32 #windows.27=(36,8,8,192)f32 Tensor.view Tensor.view_1115 2 1 windows.27 4353 x2.27 $input=windows.27 $shape=4353 #windows.27=(36,8,8,192)f32 #x2.27=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_15535 1 1 154 4358 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2579 2 1 x2.27 4355 4356 $input=x2.27 $dims=4355 #x2.27=(1,6,6,8,8,192)f32 #4356=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_40 1 1 4356 4357 memory_format=torch.contiguous_format $input=4356 #4356=(1,6,8,6,8,192)f32 #4357=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_15532 2 1 4214 154 4362 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #4214=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1116 2 1 4357 4358 x3.27 $input=4357 $shape=4358 #4357=(1,6,8,6,8,192)f32 #x3.27=(1,48,48,192)f32 Tensor.view Tensor.view_1117 2 1 x3.27 4362 x4.27 $input=x3.27 $shape=4362 #x3.27=(1,48,48,192)f32 #x4.27=(1,2304,192)f32 pnnx.Expression pnnx_expr_15530 2 1 4214 x4.27 input.65 expr=add(@0,@1) #4214=(1,2304,192)f32 #x4.27=(1,2304,192)f32 #input.65=(1,2304,192)f32 nn.LayerNorm layers_dfe.2.residual_group.blocks.0.norm2 1 1 input.65 4366 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.65=(1,2304,192)f32 #4366=(1,2304,192)f32 nn.Linear layers_dfe.2.residual_group.blocks.0.mlp.fc1 1 1 4366 4371 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #4366=(1,2304,192)f32 #4371=(1,2304,384)f32 nn.GELU layers_dfe.2.residual_group.blocks.0.mlp.act 1 1 4371 4372 #4371=(1,2304,384)f32 #4372=(1,2304,384)f32 nn.Dropout layers_dfe.2.residual_group.blocks.0.mlp.drop 1 1 4372 4373 #4372=(1,2304,384)f32 #4373=(1,2304,384)f32 nn.Linear layers_dfe.2.residual_group.blocks.0.mlp.fc2 1 1 4373 4374 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #4373=(1,2304,384)f32 #4374=(1,2304,192)f32 nn.Dropout pnnx_unique_26 1 1 4374 4375 #4374=(1,2304,192)f32 #4375=(1,2304,192)f32 pnnx.Expression pnnx_expr_15528 2 1 input.65 4375 4377 expr=add(@0,@1) #input.65=(1,2304,192)f32 #4375=(1,2304,192)f32 #4377=(1,2304,192)f32 pnnx.Attribute layers_dfe.2.residual_group.blocks.1 0 1 attn_mask.15 @attn_mask=(36,64,64)f32 #attn_mask.15=(36,64,64)f32 nn.LayerNorm layers_dfe.2.residual_group.blocks.1.norm1 1 1 4377 4406 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #4377=(1,2304,192)f32 #4406=(1,2304,192)f32 pnnx.Expression pnnx_expr_15505 2 1 4377 154 4407 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #4377=(1,2304,192)f32 #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_15503 0 1 4409 expr=[-4,-4] pnnx.Expression pnnx_expr_15501 0 1 4410 expr=[1,2] Tensor.view Tensor.view_1118 2 1 4406 4407 x.29 $input=4406 $shape=4407 #4406=(1,2304,192)f32 #x.29=(1,48,48,192)f32 torch.roll torch.roll_2432 3 1 x.29 4409 4410 x0.29 $input=x.29 $shifts=4409 $dims=4410 #x.29=(1,48,48,192)f32 #x0.29=(1,48,48,192)f32 pnnx.Expression pnnx_expr_15479 1 1 x0.29 4427 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.29=(1,48,48,192)f32 pnnx.Expression pnnx_expr_15474 0 1 4429 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1119 2 1 x0.29 4427 x1.29 $input=x0.29 $shape=4427 #x0.29=(1,48,48,192)f32 #x1.29=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_15471 1 1 x0.29 4432 expr=[-1,8,8,int(size(@0,3))] #x0.29=(1,48,48,192)f32 torch.permute torch.permute_2580 2 1 x1.29 4429 4430 $input=x1.29 $dims=4429 #x1.29=(1,6,8,6,8,192)f32 #4430=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_41 1 1 4430 4431 memory_format=torch.contiguous_format $input=4430 #4430=(1,6,6,8,8,192)f32 #4431=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_15469 1 1 4377 4434 expr=[-1,64,int(size(@0,2))] #4377=(1,2304,192)f32 pnnx.Expression pnnx_expr_15459 0 1 4445 expr=-2 pnnx.Expression pnnx_expr_15458 0 1 4446 expr=-1 pnnx.Attribute layers_dfe.2.residual_group.blocks.1.attn 0 1 relative_position_bias_table.29 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.29=(225,6)f32 pnnx.Attribute pnnx_unique_27 0 1 relative_position_index.29 @relative_position_index=(64,64)i64 #relative_position_index.29=(64,64)i64 Tensor.view Tensor.view_1120 2 1 4431 4432 x_windows.29 $input=4431 $shape=4432 #4431=(1,6,6,8,8,192)f32 #x_windows.29=(36,8,8,192)f32 Tensor.view Tensor.view_1121 2 1 x_windows.29 4434 x2.29 $input=x_windows.29 $shape=4434 #x_windows.29=(36,8,8,192)f32 #x2.29=(36,64,192)f32 nn.Linear layers_dfe.2.residual_group.blocks.1.attn.qkv 1 1 x2.29 4470 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.29=(36,64,192)f32 #4470=(36,64,576)f32 pnnx.Expression pnnx_expr_15439 1 1 x2.29 4473 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.29=(36,64,192)f32 pnnx.Expression pnnx_expr_15434 0 1 4475 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_460 2 1 4470 4473 4474 $input=4470 $shape=4473 #4470=(36,64,576)f32 #4474=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_15433 0 1 20869 expr=0 pnnx.Expression pnnx_expr_15432 0 1 20870 expr=0 pnnx.Expression pnnx_expr_15431 0 1 20871 expr=0 pnnx.Expression pnnx_expr_15430 0 1 20872 expr=1 pnnx.Expression pnnx_expr_15429 0 1 20873 expr=0 pnnx.Expression pnnx_expr_15428 0 1 20874 expr=2 torch.permute torch.permute_2581 2 1 4474 4475 qkv0.29 $input=4474 $dims=4475 #4474=(36,64,3,6,32)f32 #qkv0.29=(3,36,6,64,32)f32 Tensor.select Tensor.select_689 3 1 qkv0.29 20869 20870 q.29 $input=qkv0.29 $dim=20869 $index=20870 #qkv0.29=(3,36,6,64,32)f32 #q.29=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15427 1 1 q.29 q0.29 expr=mul(@0,1.767767e-01) #q.29=(36,6,64,32)f32 #q0.29=(36,6,64,32)f32 Tensor.select Tensor.select_690 3 1 qkv0.29 20871 20872 k.29 $input=qkv0.29 $dim=20871 $index=20872 #qkv0.29=(3,36,6,64,32)f32 #k.29=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15425 0 1 4483 expr=[-1] Tensor.view Tensor.view_1122 2 1 relative_position_index.29 4483 4484 $input=relative_position_index.29 $shape=4483 #relative_position_index.29=(64,64)i64 #4484=(4096)i64 pnnx.Expression pnnx_expr_15424 1 1 4484 4485 expr=[@0] #4484=(4096)i64 pnnx.Expression pnnx_expr_15421 0 1 4487 expr=[64,64,-1] Tensor.index Tensor.index_339 2 1 relative_position_bias_table.29 4485 4486 $input=relative_position_bias_table.29 $expr=4485 #relative_position_bias_table.29=(225,6)f32 #4486=(4096,6)f32 pnnx.Expression pnnx_expr_15417 0 1 4489 expr=[2,0,1] Tensor.view Tensor.view_1123 2 1 4486 4487 relative_position_bias.29 $input=4486 $shape=4487 #4486=(4096,6)f32 #relative_position_bias.29=(64,64,6)f32 pnnx.Expression pnnx_expr_15416 0 1 20882 expr=0 torch.permute torch.permute_2582 2 1 relative_position_bias.29 4489 4490 $input=relative_position_bias.29 $dims=4489 #relative_position_bias.29=(64,64,6)f32 #4490=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_42 1 1 4490 relative_position_bias0.29 memory_format=torch.contiguous_format $input=4490 #4490=(6,64,64)f32 #relative_position_bias0.29=(6,64,64)f32 torch.transpose torch.transpose_2991 3 1 k.29 4445 4446 4481 $input=k.29 $dim0=4445 $dim1=4446 #k.29=(36,6,64,32)f32 #4481=(36,6,32,64)f32 torch.matmul torch.matmul_2230 2 1 q0.29 4481 attn.59 $input=q0.29 $other=4481 #q0.29=(36,6,64,32)f32 #4481=(36,6,32,64)f32 #attn.59=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3243 2 1 relative_position_bias0.29 20882 4492 $input=relative_position_bias0.29 $dim=20882 #relative_position_bias0.29=(6,64,64)f32 #4492=(1,6,64,64)f32 pnnx.Expression pnnx_expr_15414 2 1 attn.59 4492 attn0.15 expr=add(@0,@1) #attn.59=(36,6,64,64)f32 #4492=(1,6,64,64)f32 #attn0.15=(36,6,64,64)f32 pnnx.Expression pnnx_expr_15405 2 1 x2.29 attn_mask.15 4499 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.29=(36,64,192)f32 #attn_mask.15=(36,64,64)f32 pnnx.Expression pnnx_expr_15404 0 1 20887 expr=1 pnnx.Expression pnnx_expr_15403 0 1 20888 expr=0 Tensor.view Tensor.view_1124 2 1 attn0.15 4499 4500 $input=attn0.15 $shape=4499 #attn0.15=(36,6,64,64)f32 #4500=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3244 2 1 attn_mask.15 20887 4501 $input=attn_mask.15 $dim=20887 #attn_mask.15=(36,64,64)f32 #4501=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3245 2 1 4501 20888 4502 $input=4501 $dim=20888 #4501=(36,1,64,64)f32 #4502=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_15401 2 1 4500 4502 attn1.15 expr=add(@0,@1) #4500=(1,36,6,64,64)f32 #4502=(1,36,1,64,64)f32 #attn1.15=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_15398 1 1 x2.29 4504 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.29=(36,64,192)f32 Tensor.view Tensor.view_1125 2 1 attn1.15 4504 input.67 $input=attn1.15 $shape=4504 #attn1.15=(1,36,6,64,64)f32 #input.67=(36,6,64,64)f32 nn.Softmax layers_dfe.2.residual_group.blocks.1.attn.softmax 1 1 input.67 4506 dim=-1 #input.67=(36,6,64,64)f32 #4506=(36,6,64,64)f32 nn.Dropout layers_dfe.2.residual_group.blocks.1.attn.attn_drop 1 1 4506 4507 #4506=(36,6,64,64)f32 #4507=(36,6,64,64)f32 Tensor.select Tensor.select_691 3 1 qkv0.29 20873 20874 v.29 $input=qkv0.29 $dim=20873 $index=20874 #qkv0.29=(3,36,6,64,32)f32 #v.29=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15397 0 1 20892 expr=1 pnnx.Expression pnnx_expr_15396 0 1 20893 expr=2 torch.matmul torch.matmul_2231 2 1 4507 v.29 4508 $input=4507 $other=v.29 #4507=(36,6,64,64)f32 #v.29=(36,6,64,32)f32 #4508=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15395 1 1 x2.29 4510 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.29=(36,64,192)f32 torch.transpose torch.transpose_2992 3 1 4508 20892 20893 4509 $input=4508 $dim0=20892 $dim1=20893 #4508=(36,6,64,32)f32 #4509=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_461 2 1 4509 4510 input0.31 $input=4509 $shape=4510 #4509=(36,64,6,32)f32 #input0.31=(36,64,192)f32 nn.Linear layers_dfe.2.residual_group.blocks.1.attn.proj 1 1 input0.31 4512 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.31=(36,64,192)f32 #4512=(36,64,192)f32 nn.Dropout layers_dfe.2.residual_group.blocks.1.attn.proj_drop 1 1 4512 4513 #4512=(36,64,192)f32 #4513=(36,64,192)f32 pnnx.Expression pnnx_expr_15391 1 1 4377 4514 expr=[-1,8,8,int(size(@0,2))] #4377=(1,2304,192)f32 pnnx.Expression pnnx_expr_15378 1 1 154 4520 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_15371 0 1 4522 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1126 2 1 4513 4514 windows.29 $input=4513 $shape=4514 #4513=(36,64,192)f32 #windows.29=(36,8,8,192)f32 Tensor.view Tensor.view_1127 2 1 windows.29 4520 x3.29 $input=windows.29 $shape=4520 #windows.29=(36,8,8,192)f32 #x3.29=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_15368 1 1 154 4525 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2583 2 1 x3.29 4522 4523 $input=x3.29 $dims=4522 #x3.29=(1,6,6,8,8,192)f32 #4523=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_43 1 1 4523 4524 memory_format=torch.contiguous_format $input=4523 #4523=(1,6,8,6,8,192)f32 #4524=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_15365 0 1 4527 expr=[4,4] pnnx.Expression pnnx_expr_15362 0 1 4528 expr=[1,2] Tensor.view Tensor.view_1128 2 1 4524 4525 shifted_x.15 $input=4524 $shape=4525 #4524=(1,6,8,6,8,192)f32 #shifted_x.15=(1,48,48,192)f32 pnnx.Expression pnnx_expr_15359 2 1 4377 154 4532 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #4377=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.roll torch.roll_2433 3 1 shifted_x.15 4527 4528 x4.29 $input=shifted_x.15 $shifts=4527 $dims=4528 #shifted_x.15=(1,48,48,192)f32 #x4.29=(1,48,48,192)f32 Tensor.view Tensor.view_1129 2 1 x4.29 4532 x5.15 $input=x4.29 $shape=4532 #x4.29=(1,48,48,192)f32 #x5.15=(1,2304,192)f32 pnnx.Expression pnnx_expr_15357 2 1 4377 x5.15 input.69 expr=add(@0,@1) #4377=(1,2304,192)f32 #x5.15=(1,2304,192)f32 #input.69=(1,2304,192)f32 nn.LayerNorm layers_dfe.2.residual_group.blocks.1.norm2 1 1 input.69 4536 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.69=(1,2304,192)f32 #4536=(1,2304,192)f32 nn.Linear layers_dfe.2.residual_group.blocks.1.mlp.fc1 1 1 4536 4541 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #4536=(1,2304,192)f32 #4541=(1,2304,384)f32 nn.GELU layers_dfe.2.residual_group.blocks.1.mlp.act 1 1 4541 4542 #4541=(1,2304,384)f32 #4542=(1,2304,384)f32 nn.Dropout layers_dfe.2.residual_group.blocks.1.mlp.drop 1 1 4542 4543 #4542=(1,2304,384)f32 #4543=(1,2304,384)f32 nn.Linear layers_dfe.2.residual_group.blocks.1.mlp.fc2 1 1 4543 4544 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #4543=(1,2304,384)f32 #4544=(1,2304,192)f32 nn.Dropout pnnx_unique_28 1 1 4544 4545 #4544=(1,2304,192)f32 #4545=(1,2304,192)f32 pnnx.Expression pnnx_expr_15355 2 1 input.69 4545 4547 expr=add(@0,@1) #input.69=(1,2304,192)f32 #4545=(1,2304,192)f32 #4547=(1,2304,192)f32 nn.LayerNorm layers_dfe.2.residual_group.blocks.2.norm1 1 1 4547 4574 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #4547=(1,2304,192)f32 #4574=(1,2304,192)f32 pnnx.Expression pnnx_expr_15333 2 1 4547 154 4575 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #4547=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1130 2 1 4574 4575 x.31 $input=4574 $shape=4575 #4574=(1,2304,192)f32 #x.31=(1,48,48,192)f32 pnnx.Expression pnnx_expr_15312 1 1 x.31 4592 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.31=(1,48,48,192)f32 pnnx.Expression pnnx_expr_15307 0 1 4594 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1131 2 1 x.31 4592 x0.31 $input=x.31 $shape=4592 #x.31=(1,48,48,192)f32 #x0.31=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_15304 1 1 x.31 4597 expr=[-1,8,8,int(size(@0,3))] #x.31=(1,48,48,192)f32 torch.permute torch.permute_2584 2 1 x0.31 4594 4595 $input=x0.31 $dims=4594 #x0.31=(1,6,8,6,8,192)f32 #4595=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_44 1 1 4595 4596 memory_format=torch.contiguous_format $input=4595 #4595=(1,6,6,8,8,192)f32 #4596=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_15302 1 1 4547 4599 expr=[-1,64,int(size(@0,2))] #4547=(1,2304,192)f32 pnnx.Expression pnnx_expr_15292 0 1 4610 expr=-2 pnnx.Expression pnnx_expr_15291 0 1 4611 expr=-1 pnnx.Attribute layers_dfe.2.residual_group.blocks.2.attn 0 1 relative_position_bias_table.31 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.31=(225,6)f32 pnnx.Attribute pnnx_unique_29 0 1 relative_position_index.31 @relative_position_index=(64,64)i64 #relative_position_index.31=(64,64)i64 Tensor.view Tensor.view_1132 2 1 4596 4597 x_windows.31 $input=4596 $shape=4597 #4596=(1,6,6,8,8,192)f32 #x_windows.31=(36,8,8,192)f32 Tensor.view Tensor.view_1133 2 1 x_windows.31 4599 x1.31 $input=x_windows.31 $shape=4599 #x_windows.31=(36,8,8,192)f32 #x1.31=(36,64,192)f32 nn.Linear layers_dfe.2.residual_group.blocks.2.attn.qkv 1 1 x1.31 4631 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.31=(36,64,192)f32 #4631=(36,64,576)f32 pnnx.Expression pnnx_expr_15276 1 1 x1.31 4634 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.31=(36,64,192)f32 pnnx.Expression pnnx_expr_15271 0 1 4636 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_462 2 1 4631 4634 4635 $input=4631 $shape=4634 #4631=(36,64,576)f32 #4635=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_15270 0 1 20937 expr=0 pnnx.Expression pnnx_expr_15269 0 1 20938 expr=0 pnnx.Expression pnnx_expr_15268 0 1 20939 expr=0 pnnx.Expression pnnx_expr_15267 0 1 20940 expr=1 pnnx.Expression pnnx_expr_15266 0 1 20941 expr=0 pnnx.Expression pnnx_expr_15265 0 1 20942 expr=2 torch.permute torch.permute_2585 2 1 4635 4636 qkv0.31 $input=4635 $dims=4636 #4635=(36,64,3,6,32)f32 #qkv0.31=(3,36,6,64,32)f32 Tensor.select Tensor.select_692 3 1 qkv0.31 20937 20938 q.31 $input=qkv0.31 $dim=20937 $index=20938 #qkv0.31=(3,36,6,64,32)f32 #q.31=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15264 1 1 q.31 q0.31 expr=mul(@0,1.767767e-01) #q.31=(36,6,64,32)f32 #q0.31=(36,6,64,32)f32 Tensor.select Tensor.select_693 3 1 qkv0.31 20939 20940 k.31 $input=qkv0.31 $dim=20939 $index=20940 #qkv0.31=(3,36,6,64,32)f32 #k.31=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15262 0 1 4644 expr=[-1] Tensor.view Tensor.view_1134 2 1 relative_position_index.31 4644 4645 $input=relative_position_index.31 $shape=4644 #relative_position_index.31=(64,64)i64 #4645=(4096)i64 pnnx.Expression pnnx_expr_15261 1 1 4645 4646 expr=[@0] #4645=(4096)i64 pnnx.Expression pnnx_expr_15258 0 1 4648 expr=[64,64,-1] Tensor.index Tensor.index_340 2 1 relative_position_bias_table.31 4646 4647 $input=relative_position_bias_table.31 $expr=4646 #relative_position_bias_table.31=(225,6)f32 #4647=(4096,6)f32 pnnx.Expression pnnx_expr_15254 0 1 4650 expr=[2,0,1] Tensor.view Tensor.view_1135 2 1 4647 4648 relative_position_bias.31 $input=4647 $shape=4648 #4647=(4096,6)f32 #relative_position_bias.31=(64,64,6)f32 pnnx.Expression pnnx_expr_15253 0 1 20950 expr=0 torch.permute torch.permute_2586 2 1 relative_position_bias.31 4650 4651 $input=relative_position_bias.31 $dims=4650 #relative_position_bias.31=(64,64,6)f32 #4651=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_45 1 1 4651 relative_position_bias0.31 memory_format=torch.contiguous_format $input=4651 #4651=(6,64,64)f32 #relative_position_bias0.31=(6,64,64)f32 torch.transpose torch.transpose_2993 3 1 k.31 4610 4611 4642 $input=k.31 $dim0=4610 $dim1=4611 #k.31=(36,6,64,32)f32 #4642=(36,6,32,64)f32 torch.matmul torch.matmul_2232 2 1 q0.31 4642 attn.63 $input=q0.31 $other=4642 #q0.31=(36,6,64,32)f32 #4642=(36,6,32,64)f32 #attn.63=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3246 2 1 relative_position_bias0.31 20950 4653 $input=relative_position_bias0.31 $dim=20950 #relative_position_bias0.31=(6,64,64)f32 #4653=(1,6,64,64)f32 pnnx.Expression pnnx_expr_15251 2 1 attn.63 4653 input.71 expr=add(@0,@1) #attn.63=(36,6,64,64)f32 #4653=(1,6,64,64)f32 #input.71=(36,6,64,64)f32 nn.Softmax layers_dfe.2.residual_group.blocks.2.attn.softmax 1 1 input.71 4655 dim=-1 #input.71=(36,6,64,64)f32 #4655=(36,6,64,64)f32 nn.Dropout layers_dfe.2.residual_group.blocks.2.attn.attn_drop 1 1 4655 4656 #4655=(36,6,64,64)f32 #4656=(36,6,64,64)f32 Tensor.select Tensor.select_694 3 1 qkv0.31 20941 20942 v.31 $input=qkv0.31 $dim=20941 $index=20942 #qkv0.31=(3,36,6,64,32)f32 #v.31=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15250 0 1 20952 expr=1 pnnx.Expression pnnx_expr_15249 0 1 20953 expr=2 torch.matmul torch.matmul_2233 2 1 4656 v.31 4657 $input=4656 $other=v.31 #4656=(36,6,64,64)f32 #v.31=(36,6,64,32)f32 #4657=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15248 1 1 x1.31 4659 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.31=(36,64,192)f32 torch.transpose torch.transpose_2994 3 1 4657 20952 20953 4658 $input=4657 $dim0=20952 $dim1=20953 #4657=(36,6,64,32)f32 #4658=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_463 2 1 4658 4659 input0.33 $input=4658 $shape=4659 #4658=(36,64,6,32)f32 #input0.33=(36,64,192)f32 nn.Linear layers_dfe.2.residual_group.blocks.2.attn.proj 1 1 input0.33 4661 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.33=(36,64,192)f32 #4661=(36,64,192)f32 nn.Dropout layers_dfe.2.residual_group.blocks.2.attn.proj_drop 1 1 4661 4662 #4661=(36,64,192)f32 #4662=(36,64,192)f32 pnnx.Expression pnnx_expr_15244 1 1 4547 4663 expr=[-1,8,8,int(size(@0,2))] #4547=(1,2304,192)f32 pnnx.Expression pnnx_expr_15231 1 1 154 4669 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_15224 0 1 4671 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1136 2 1 4662 4663 windows.31 $input=4662 $shape=4663 #4662=(36,64,192)f32 #windows.31=(36,8,8,192)f32 Tensor.view Tensor.view_1137 2 1 windows.31 4669 x2.31 $input=windows.31 $shape=4669 #windows.31=(36,8,8,192)f32 #x2.31=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_15221 1 1 154 4674 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2587 2 1 x2.31 4671 4672 $input=x2.31 $dims=4671 #x2.31=(1,6,6,8,8,192)f32 #4672=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_46 1 1 4672 4673 memory_format=torch.contiguous_format $input=4672 #4672=(1,6,8,6,8,192)f32 #4673=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_15218 2 1 4547 154 4678 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #4547=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1138 2 1 4673 4674 x3.31 $input=4673 $shape=4674 #4673=(1,6,8,6,8,192)f32 #x3.31=(1,48,48,192)f32 Tensor.view Tensor.view_1139 2 1 x3.31 4678 x4.31 $input=x3.31 $shape=4678 #x3.31=(1,48,48,192)f32 #x4.31=(1,2304,192)f32 pnnx.Expression pnnx_expr_15216 2 1 4547 x4.31 input.73 expr=add(@0,@1) #4547=(1,2304,192)f32 #x4.31=(1,2304,192)f32 #input.73=(1,2304,192)f32 nn.LayerNorm layers_dfe.2.residual_group.blocks.2.norm2 1 1 input.73 4682 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.73=(1,2304,192)f32 #4682=(1,2304,192)f32 nn.Linear layers_dfe.2.residual_group.blocks.2.mlp.fc1 1 1 4682 4687 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #4682=(1,2304,192)f32 #4687=(1,2304,384)f32 nn.GELU layers_dfe.2.residual_group.blocks.2.mlp.act 1 1 4687 4688 #4687=(1,2304,384)f32 #4688=(1,2304,384)f32 nn.Dropout layers_dfe.2.residual_group.blocks.2.mlp.drop 1 1 4688 4689 #4688=(1,2304,384)f32 #4689=(1,2304,384)f32 nn.Linear layers_dfe.2.residual_group.blocks.2.mlp.fc2 1 1 4689 4690 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #4689=(1,2304,384)f32 #4690=(1,2304,192)f32 nn.Dropout pnnx_unique_30 1 1 4690 4691 #4690=(1,2304,192)f32 #4691=(1,2304,192)f32 pnnx.Expression pnnx_expr_15214 2 1 input.73 4691 4693 expr=add(@0,@1) #input.73=(1,2304,192)f32 #4691=(1,2304,192)f32 #4693=(1,2304,192)f32 pnnx.Attribute layers_dfe.2.residual_group.blocks.3 0 1 attn_mask.17 @attn_mask=(36,64,64)f32 #attn_mask.17=(36,64,64)f32 nn.LayerNorm layers_dfe.2.residual_group.blocks.3.norm1 1 1 4693 4722 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #4693=(1,2304,192)f32 #4722=(1,2304,192)f32 pnnx.Expression pnnx_expr_15191 2 1 4693 154 4723 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #4693=(1,2304,192)f32 #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_15189 0 1 4725 expr=[-4,-4] pnnx.Expression pnnx_expr_15187 0 1 4726 expr=[1,2] Tensor.view Tensor.view_1140 2 1 4722 4723 x.33 $input=4722 $shape=4723 #4722=(1,2304,192)f32 #x.33=(1,48,48,192)f32 torch.roll torch.roll_2434 3 1 x.33 4725 4726 x0.33 $input=x.33 $shifts=4725 $dims=4726 #x.33=(1,48,48,192)f32 #x0.33=(1,48,48,192)f32 pnnx.Expression pnnx_expr_15165 1 1 x0.33 4743 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.33=(1,48,48,192)f32 pnnx.Expression pnnx_expr_15160 0 1 4745 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1141 2 1 x0.33 4743 x1.33 $input=x0.33 $shape=4743 #x0.33=(1,48,48,192)f32 #x1.33=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_15157 1 1 x0.33 4748 expr=[-1,8,8,int(size(@0,3))] #x0.33=(1,48,48,192)f32 torch.permute torch.permute_2588 2 1 x1.33 4745 4746 $input=x1.33 $dims=4745 #x1.33=(1,6,8,6,8,192)f32 #4746=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_47 1 1 4746 4747 memory_format=torch.contiguous_format $input=4746 #4746=(1,6,6,8,8,192)f32 #4747=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_15155 1 1 4693 4750 expr=[-1,64,int(size(@0,2))] #4693=(1,2304,192)f32 pnnx.Expression pnnx_expr_15145 0 1 4761 expr=-2 pnnx.Expression pnnx_expr_15144 0 1 4762 expr=-1 pnnx.Attribute layers_dfe.2.residual_group.blocks.3.attn 0 1 relative_position_bias_table.33 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.33=(225,6)f32 pnnx.Attribute pnnx_unique_31 0 1 relative_position_index.33 @relative_position_index=(64,64)i64 #relative_position_index.33=(64,64)i64 Tensor.view Tensor.view_1142 2 1 4747 4748 x_windows.33 $input=4747 $shape=4748 #4747=(1,6,6,8,8,192)f32 #x_windows.33=(36,8,8,192)f32 Tensor.view Tensor.view_1143 2 1 x_windows.33 4750 x2.33 $input=x_windows.33 $shape=4750 #x_windows.33=(36,8,8,192)f32 #x2.33=(36,64,192)f32 nn.Linear layers_dfe.2.residual_group.blocks.3.attn.qkv 1 1 x2.33 4786 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.33=(36,64,192)f32 #4786=(36,64,576)f32 pnnx.Expression pnnx_expr_15125 1 1 x2.33 4789 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.33=(36,64,192)f32 pnnx.Expression pnnx_expr_15120 0 1 4791 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_464 2 1 4786 4789 4790 $input=4786 $shape=4789 #4786=(36,64,576)f32 #4790=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_15119 0 1 20996 expr=0 pnnx.Expression pnnx_expr_15118 0 1 20997 expr=0 pnnx.Expression pnnx_expr_15117 0 1 20998 expr=0 pnnx.Expression pnnx_expr_15116 0 1 20999 expr=1 pnnx.Expression pnnx_expr_15115 0 1 21000 expr=0 pnnx.Expression pnnx_expr_15114 0 1 21001 expr=2 torch.permute torch.permute_2589 2 1 4790 4791 qkv0.33 $input=4790 $dims=4791 #4790=(36,64,3,6,32)f32 #qkv0.33=(3,36,6,64,32)f32 Tensor.select Tensor.select_695 3 1 qkv0.33 20996 20997 q.33 $input=qkv0.33 $dim=20996 $index=20997 #qkv0.33=(3,36,6,64,32)f32 #q.33=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15113 1 1 q.33 q0.33 expr=mul(@0,1.767767e-01) #q.33=(36,6,64,32)f32 #q0.33=(36,6,64,32)f32 Tensor.select Tensor.select_696 3 1 qkv0.33 20998 20999 k.33 $input=qkv0.33 $dim=20998 $index=20999 #qkv0.33=(3,36,6,64,32)f32 #k.33=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15111 0 1 4799 expr=[-1] Tensor.view Tensor.view_1144 2 1 relative_position_index.33 4799 4800 $input=relative_position_index.33 $shape=4799 #relative_position_index.33=(64,64)i64 #4800=(4096)i64 pnnx.Expression pnnx_expr_15110 1 1 4800 4801 expr=[@0] #4800=(4096)i64 pnnx.Expression pnnx_expr_15107 0 1 4803 expr=[64,64,-1] Tensor.index Tensor.index_341 2 1 relative_position_bias_table.33 4801 4802 $input=relative_position_bias_table.33 $expr=4801 #relative_position_bias_table.33=(225,6)f32 #4802=(4096,6)f32 pnnx.Expression pnnx_expr_15103 0 1 4805 expr=[2,0,1] Tensor.view Tensor.view_1145 2 1 4802 4803 relative_position_bias.33 $input=4802 $shape=4803 #4802=(4096,6)f32 #relative_position_bias.33=(64,64,6)f32 pnnx.Expression pnnx_expr_15102 0 1 21009 expr=0 torch.permute torch.permute_2590 2 1 relative_position_bias.33 4805 4806 $input=relative_position_bias.33 $dims=4805 #relative_position_bias.33=(64,64,6)f32 #4806=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_48 1 1 4806 relative_position_bias0.33 memory_format=torch.contiguous_format $input=4806 #4806=(6,64,64)f32 #relative_position_bias0.33=(6,64,64)f32 torch.transpose torch.transpose_2995 3 1 k.33 4761 4762 4797 $input=k.33 $dim0=4761 $dim1=4762 #k.33=(36,6,64,32)f32 #4797=(36,6,32,64)f32 torch.matmul torch.matmul_2234 2 1 q0.33 4797 attn.67 $input=q0.33 $other=4797 #q0.33=(36,6,64,32)f32 #4797=(36,6,32,64)f32 #attn.67=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3247 2 1 relative_position_bias0.33 21009 4808 $input=relative_position_bias0.33 $dim=21009 #relative_position_bias0.33=(6,64,64)f32 #4808=(1,6,64,64)f32 pnnx.Expression pnnx_expr_15100 2 1 attn.67 4808 attn0.17 expr=add(@0,@1) #attn.67=(36,6,64,64)f32 #4808=(1,6,64,64)f32 #attn0.17=(36,6,64,64)f32 pnnx.Expression pnnx_expr_15091 2 1 x2.33 attn_mask.17 4815 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.33=(36,64,192)f32 #attn_mask.17=(36,64,64)f32 pnnx.Expression pnnx_expr_15090 0 1 21014 expr=1 pnnx.Expression pnnx_expr_15089 0 1 21015 expr=0 Tensor.view Tensor.view_1146 2 1 attn0.17 4815 4816 $input=attn0.17 $shape=4815 #attn0.17=(36,6,64,64)f32 #4816=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3248 2 1 attn_mask.17 21014 4817 $input=attn_mask.17 $dim=21014 #attn_mask.17=(36,64,64)f32 #4817=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3249 2 1 4817 21015 4818 $input=4817 $dim=21015 #4817=(36,1,64,64)f32 #4818=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_15087 2 1 4816 4818 attn1.17 expr=add(@0,@1) #4816=(1,36,6,64,64)f32 #4818=(1,36,1,64,64)f32 #attn1.17=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_15084 1 1 x2.33 4820 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.33=(36,64,192)f32 Tensor.view Tensor.view_1147 2 1 attn1.17 4820 input.75 $input=attn1.17 $shape=4820 #attn1.17=(1,36,6,64,64)f32 #input.75=(36,6,64,64)f32 nn.Softmax layers_dfe.2.residual_group.blocks.3.attn.softmax 1 1 input.75 4822 dim=-1 #input.75=(36,6,64,64)f32 #4822=(36,6,64,64)f32 nn.Dropout layers_dfe.2.residual_group.blocks.3.attn.attn_drop 1 1 4822 4823 #4822=(36,6,64,64)f32 #4823=(36,6,64,64)f32 Tensor.select Tensor.select_697 3 1 qkv0.33 21000 21001 v.33 $input=qkv0.33 $dim=21000 $index=21001 #qkv0.33=(3,36,6,64,32)f32 #v.33=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15083 0 1 21019 expr=1 pnnx.Expression pnnx_expr_15082 0 1 21020 expr=2 torch.matmul torch.matmul_2235 2 1 4823 v.33 4824 $input=4823 $other=v.33 #4823=(36,6,64,64)f32 #v.33=(36,6,64,32)f32 #4824=(36,6,64,32)f32 pnnx.Expression pnnx_expr_15081 1 1 x2.33 4826 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.33=(36,64,192)f32 torch.transpose torch.transpose_2996 3 1 4824 21019 21020 4825 $input=4824 $dim0=21019 $dim1=21020 #4824=(36,6,64,32)f32 #4825=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_465 2 1 4825 4826 input0.35 $input=4825 $shape=4826 #4825=(36,64,6,32)f32 #input0.35=(36,64,192)f32 nn.Linear layers_dfe.2.residual_group.blocks.3.attn.proj 1 1 input0.35 4828 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.35=(36,64,192)f32 #4828=(36,64,192)f32 nn.Dropout layers_dfe.2.residual_group.blocks.3.attn.proj_drop 1 1 4828 4829 #4828=(36,64,192)f32 #4829=(36,64,192)f32 pnnx.Expression pnnx_expr_15077 1 1 4693 4830 expr=[-1,8,8,int(size(@0,2))] #4693=(1,2304,192)f32 pnnx.Expression pnnx_expr_15064 1 1 154 4836 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_15057 0 1 4838 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1148 2 1 4829 4830 windows.33 $input=4829 $shape=4830 #4829=(36,64,192)f32 #windows.33=(36,8,8,192)f32 Tensor.view Tensor.view_1149 2 1 windows.33 4836 x3.33 $input=windows.33 $shape=4836 #windows.33=(36,8,8,192)f32 #x3.33=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_15054 1 1 154 4841 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2591 2 1 x3.33 4838 4839 $input=x3.33 $dims=4838 #x3.33=(1,6,6,8,8,192)f32 #4839=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_49 1 1 4839 4840 memory_format=torch.contiguous_format $input=4839 #4839=(1,6,8,6,8,192)f32 #4840=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_15051 0 1 4843 expr=[4,4] pnnx.Expression pnnx_expr_15048 0 1 4844 expr=[1,2] Tensor.view Tensor.view_1150 2 1 4840 4841 shifted_x.17 $input=4840 $shape=4841 #4840=(1,6,8,6,8,192)f32 #shifted_x.17=(1,48,48,192)f32 pnnx.Expression pnnx_expr_15045 2 1 4693 154 4848 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #4693=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.roll torch.roll_2435 3 1 shifted_x.17 4843 4844 x4.33 $input=shifted_x.17 $shifts=4843 $dims=4844 #shifted_x.17=(1,48,48,192)f32 #x4.33=(1,48,48,192)f32 Tensor.view Tensor.view_1151 2 1 x4.33 4848 x5.17 $input=x4.33 $shape=4848 #x4.33=(1,48,48,192)f32 #x5.17=(1,2304,192)f32 pnnx.Expression pnnx_expr_15043 2 1 4693 x5.17 input.77 expr=add(@0,@1) #4693=(1,2304,192)f32 #x5.17=(1,2304,192)f32 #input.77=(1,2304,192)f32 nn.LayerNorm layers_dfe.2.residual_group.blocks.3.norm2 1 1 input.77 4852 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.77=(1,2304,192)f32 #4852=(1,2304,192)f32 nn.Linear layers_dfe.2.residual_group.blocks.3.mlp.fc1 1 1 4852 4857 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #4852=(1,2304,192)f32 #4857=(1,2304,384)f32 nn.GELU layers_dfe.2.residual_group.blocks.3.mlp.act 1 1 4857 4858 #4857=(1,2304,384)f32 #4858=(1,2304,384)f32 nn.Dropout layers_dfe.2.residual_group.blocks.3.mlp.drop 1 1 4858 4859 #4858=(1,2304,384)f32 #4859=(1,2304,384)f32 nn.Linear layers_dfe.2.residual_group.blocks.3.mlp.fc2 1 1 4859 4860 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #4859=(1,2304,384)f32 #4860=(1,2304,192)f32 nn.Dropout pnnx_unique_32 1 1 4860 4861 #4860=(1,2304,192)f32 #4861=(1,2304,192)f32 pnnx.Expression pnnx_expr_15041 2 1 input.77 4861 4863 expr=add(@0,@1) #input.77=(1,2304,192)f32 #4861=(1,2304,192)f32 #4863=(1,2304,192)f32 nn.LayerNorm layers_dfe.2.residual_group.blocks.4.norm1 1 1 4863 4890 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #4863=(1,2304,192)f32 #4890=(1,2304,192)f32 pnnx.Expression pnnx_expr_15019 2 1 4863 154 4891 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #4863=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1152 2 1 4890 4891 x.35 $input=4890 $shape=4891 #4890=(1,2304,192)f32 #x.35=(1,48,48,192)f32 pnnx.Expression pnnx_expr_14998 1 1 x.35 4908 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.35=(1,48,48,192)f32 pnnx.Expression pnnx_expr_14993 0 1 4910 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1153 2 1 x.35 4908 x0.35 $input=x.35 $shape=4908 #x.35=(1,48,48,192)f32 #x0.35=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_14990 1 1 x.35 4913 expr=[-1,8,8,int(size(@0,3))] #x.35=(1,48,48,192)f32 torch.permute torch.permute_2592 2 1 x0.35 4910 4911 $input=x0.35 $dims=4910 #x0.35=(1,6,8,6,8,192)f32 #4911=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_50 1 1 4911 4912 memory_format=torch.contiguous_format $input=4911 #4911=(1,6,6,8,8,192)f32 #4912=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_14988 1 1 4863 4915 expr=[-1,64,int(size(@0,2))] #4863=(1,2304,192)f32 pnnx.Expression pnnx_expr_14978 0 1 4926 expr=-2 pnnx.Expression pnnx_expr_14977 0 1 4927 expr=-1 pnnx.Attribute layers_dfe.2.residual_group.blocks.4.attn 0 1 relative_position_bias_table.35 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.35=(225,6)f32 pnnx.Attribute pnnx_unique_33 0 1 relative_position_index.35 @relative_position_index=(64,64)i64 #relative_position_index.35=(64,64)i64 Tensor.view Tensor.view_1154 2 1 4912 4913 x_windows.35 $input=4912 $shape=4913 #4912=(1,6,6,8,8,192)f32 #x_windows.35=(36,8,8,192)f32 Tensor.view Tensor.view_1155 2 1 x_windows.35 4915 x1.35 $input=x_windows.35 $shape=4915 #x_windows.35=(36,8,8,192)f32 #x1.35=(36,64,192)f32 nn.Linear layers_dfe.2.residual_group.blocks.4.attn.qkv 1 1 x1.35 4947 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.35=(36,64,192)f32 #4947=(36,64,576)f32 pnnx.Expression pnnx_expr_14962 1 1 x1.35 4950 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.35=(36,64,192)f32 pnnx.Expression pnnx_expr_14957 0 1 4952 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_466 2 1 4947 4950 4951 $input=4947 $shape=4950 #4947=(36,64,576)f32 #4951=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_14956 0 1 21064 expr=0 pnnx.Expression pnnx_expr_14955 0 1 21065 expr=0 pnnx.Expression pnnx_expr_14954 0 1 21066 expr=0 pnnx.Expression pnnx_expr_14953 0 1 21067 expr=1 pnnx.Expression pnnx_expr_14952 0 1 21068 expr=0 pnnx.Expression pnnx_expr_14951 0 1 21069 expr=2 torch.permute torch.permute_2593 2 1 4951 4952 qkv0.35 $input=4951 $dims=4952 #4951=(36,64,3,6,32)f32 #qkv0.35=(3,36,6,64,32)f32 Tensor.select Tensor.select_698 3 1 qkv0.35 21064 21065 q.35 $input=qkv0.35 $dim=21064 $index=21065 #qkv0.35=(3,36,6,64,32)f32 #q.35=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14950 1 1 q.35 q0.35 expr=mul(@0,1.767767e-01) #q.35=(36,6,64,32)f32 #q0.35=(36,6,64,32)f32 Tensor.select Tensor.select_699 3 1 qkv0.35 21066 21067 k.35 $input=qkv0.35 $dim=21066 $index=21067 #qkv0.35=(3,36,6,64,32)f32 #k.35=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14948 0 1 4960 expr=[-1] Tensor.view Tensor.view_1156 2 1 relative_position_index.35 4960 4961 $input=relative_position_index.35 $shape=4960 #relative_position_index.35=(64,64)i64 #4961=(4096)i64 pnnx.Expression pnnx_expr_14947 1 1 4961 4962 expr=[@0] #4961=(4096)i64 pnnx.Expression pnnx_expr_14944 0 1 4964 expr=[64,64,-1] Tensor.index Tensor.index_342 2 1 relative_position_bias_table.35 4962 4963 $input=relative_position_bias_table.35 $expr=4962 #relative_position_bias_table.35=(225,6)f32 #4963=(4096,6)f32 pnnx.Expression pnnx_expr_14940 0 1 4966 expr=[2,0,1] Tensor.view Tensor.view_1157 2 1 4963 4964 relative_position_bias.35 $input=4963 $shape=4964 #4963=(4096,6)f32 #relative_position_bias.35=(64,64,6)f32 pnnx.Expression pnnx_expr_14939 0 1 21077 expr=0 torch.permute torch.permute_2594 2 1 relative_position_bias.35 4966 4967 $input=relative_position_bias.35 $dims=4966 #relative_position_bias.35=(64,64,6)f32 #4967=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_51 1 1 4967 relative_position_bias0.35 memory_format=torch.contiguous_format $input=4967 #4967=(6,64,64)f32 #relative_position_bias0.35=(6,64,64)f32 torch.transpose torch.transpose_2997 3 1 k.35 4926 4927 4958 $input=k.35 $dim0=4926 $dim1=4927 #k.35=(36,6,64,32)f32 #4958=(36,6,32,64)f32 torch.matmul torch.matmul_2236 2 1 q0.35 4958 attn.71 $input=q0.35 $other=4958 #q0.35=(36,6,64,32)f32 #4958=(36,6,32,64)f32 #attn.71=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3250 2 1 relative_position_bias0.35 21077 4969 $input=relative_position_bias0.35 $dim=21077 #relative_position_bias0.35=(6,64,64)f32 #4969=(1,6,64,64)f32 pnnx.Expression pnnx_expr_14937 2 1 attn.71 4969 input.79 expr=add(@0,@1) #attn.71=(36,6,64,64)f32 #4969=(1,6,64,64)f32 #input.79=(36,6,64,64)f32 nn.Softmax layers_dfe.2.residual_group.blocks.4.attn.softmax 1 1 input.79 4971 dim=-1 #input.79=(36,6,64,64)f32 #4971=(36,6,64,64)f32 nn.Dropout layers_dfe.2.residual_group.blocks.4.attn.attn_drop 1 1 4971 4972 #4971=(36,6,64,64)f32 #4972=(36,6,64,64)f32 Tensor.select Tensor.select_700 3 1 qkv0.35 21068 21069 v.35 $input=qkv0.35 $dim=21068 $index=21069 #qkv0.35=(3,36,6,64,32)f32 #v.35=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14936 0 1 21079 expr=1 pnnx.Expression pnnx_expr_14935 0 1 21080 expr=2 torch.matmul torch.matmul_2237 2 1 4972 v.35 4973 $input=4972 $other=v.35 #4972=(36,6,64,64)f32 #v.35=(36,6,64,32)f32 #4973=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14934 1 1 x1.35 4975 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.35=(36,64,192)f32 torch.transpose torch.transpose_2998 3 1 4973 21079 21080 4974 $input=4973 $dim0=21079 $dim1=21080 #4973=(36,6,64,32)f32 #4974=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_467 2 1 4974 4975 input0.37 $input=4974 $shape=4975 #4974=(36,64,6,32)f32 #input0.37=(36,64,192)f32 nn.Linear layers_dfe.2.residual_group.blocks.4.attn.proj 1 1 input0.37 4977 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.37=(36,64,192)f32 #4977=(36,64,192)f32 nn.Dropout layers_dfe.2.residual_group.blocks.4.attn.proj_drop 1 1 4977 4978 #4977=(36,64,192)f32 #4978=(36,64,192)f32 pnnx.Expression pnnx_expr_14930 1 1 4863 4979 expr=[-1,8,8,int(size(@0,2))] #4863=(1,2304,192)f32 pnnx.Expression pnnx_expr_14917 1 1 154 4985 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_14910 0 1 4987 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1158 2 1 4978 4979 windows.35 $input=4978 $shape=4979 #4978=(36,64,192)f32 #windows.35=(36,8,8,192)f32 Tensor.view Tensor.view_1159 2 1 windows.35 4985 x2.35 $input=windows.35 $shape=4985 #windows.35=(36,8,8,192)f32 #x2.35=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_14907 1 1 154 4990 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2595 2 1 x2.35 4987 4988 $input=x2.35 $dims=4987 #x2.35=(1,6,6,8,8,192)f32 #4988=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_52 1 1 4988 4989 memory_format=torch.contiguous_format $input=4988 #4988=(1,6,8,6,8,192)f32 #4989=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_14904 2 1 4863 154 4994 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #4863=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1160 2 1 4989 4990 x3.35 $input=4989 $shape=4990 #4989=(1,6,8,6,8,192)f32 #x3.35=(1,48,48,192)f32 Tensor.view Tensor.view_1161 2 1 x3.35 4994 x4.35 $input=x3.35 $shape=4994 #x3.35=(1,48,48,192)f32 #x4.35=(1,2304,192)f32 pnnx.Expression pnnx_expr_14902 2 1 4863 x4.35 input.81 expr=add(@0,@1) #4863=(1,2304,192)f32 #x4.35=(1,2304,192)f32 #input.81=(1,2304,192)f32 nn.LayerNorm layers_dfe.2.residual_group.blocks.4.norm2 1 1 input.81 4998 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.81=(1,2304,192)f32 #4998=(1,2304,192)f32 nn.Linear layers_dfe.2.residual_group.blocks.4.mlp.fc1 1 1 4998 5003 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #4998=(1,2304,192)f32 #5003=(1,2304,384)f32 nn.GELU layers_dfe.2.residual_group.blocks.4.mlp.act 1 1 5003 5004 #5003=(1,2304,384)f32 #5004=(1,2304,384)f32 nn.Dropout layers_dfe.2.residual_group.blocks.4.mlp.drop 1 1 5004 5005 #5004=(1,2304,384)f32 #5005=(1,2304,384)f32 nn.Linear layers_dfe.2.residual_group.blocks.4.mlp.fc2 1 1 5005 5006 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #5005=(1,2304,384)f32 #5006=(1,2304,192)f32 nn.Dropout pnnx_unique_34 1 1 5006 5007 #5006=(1,2304,192)f32 #5007=(1,2304,192)f32 pnnx.Expression pnnx_expr_14900 2 1 input.81 5007 5009 expr=add(@0,@1) #input.81=(1,2304,192)f32 #5007=(1,2304,192)f32 #5009=(1,2304,192)f32 pnnx.Attribute layers_dfe.2.residual_group.blocks.5 0 1 attn_mask.19 @attn_mask=(36,64,64)f32 #attn_mask.19=(36,64,64)f32 nn.LayerNorm layers_dfe.2.residual_group.blocks.5.norm1 1 1 5009 5038 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #5009=(1,2304,192)f32 #5038=(1,2304,192)f32 pnnx.Expression pnnx_expr_14877 2 1 5009 154 5039 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #5009=(1,2304,192)f32 #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_14875 0 1 5041 expr=[-4,-4] pnnx.Expression pnnx_expr_14873 0 1 5042 expr=[1,2] Tensor.view Tensor.view_1162 2 1 5038 5039 x.37 $input=5038 $shape=5039 #5038=(1,2304,192)f32 #x.37=(1,48,48,192)f32 torch.roll torch.roll_2436 3 1 x.37 5041 5042 x0.37 $input=x.37 $shifts=5041 $dims=5042 #x.37=(1,48,48,192)f32 #x0.37=(1,48,48,192)f32 pnnx.Expression pnnx_expr_14851 1 1 x0.37 5059 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.37=(1,48,48,192)f32 pnnx.Expression pnnx_expr_14846 0 1 5061 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1163 2 1 x0.37 5059 x1.37 $input=x0.37 $shape=5059 #x0.37=(1,48,48,192)f32 #x1.37=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_14843 1 1 x0.37 5064 expr=[-1,8,8,int(size(@0,3))] #x0.37=(1,48,48,192)f32 torch.permute torch.permute_2596 2 1 x1.37 5061 5062 $input=x1.37 $dims=5061 #x1.37=(1,6,8,6,8,192)f32 #5062=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_53 1 1 5062 5063 memory_format=torch.contiguous_format $input=5062 #5062=(1,6,6,8,8,192)f32 #5063=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_14841 1 1 5009 5066 expr=[-1,64,int(size(@0,2))] #5009=(1,2304,192)f32 pnnx.Expression pnnx_expr_14831 0 1 5077 expr=-2 pnnx.Expression pnnx_expr_14830 0 1 5078 expr=-1 pnnx.Attribute layers_dfe.2.residual_group.blocks.5.attn 0 1 relative_position_bias_table.37 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.37=(225,6)f32 pnnx.Attribute pnnx_unique_35 0 1 relative_position_index.37 @relative_position_index=(64,64)i64 #relative_position_index.37=(64,64)i64 Tensor.view Tensor.view_1164 2 1 5063 5064 x_windows.37 $input=5063 $shape=5064 #5063=(1,6,6,8,8,192)f32 #x_windows.37=(36,8,8,192)f32 Tensor.view Tensor.view_1165 2 1 x_windows.37 5066 x2.37 $input=x_windows.37 $shape=5066 #x_windows.37=(36,8,8,192)f32 #x2.37=(36,64,192)f32 nn.Linear layers_dfe.2.residual_group.blocks.5.attn.qkv 1 1 x2.37 5102 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.37=(36,64,192)f32 #5102=(36,64,576)f32 pnnx.Expression pnnx_expr_14811 1 1 x2.37 5105 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.37=(36,64,192)f32 pnnx.Expression pnnx_expr_14806 0 1 5107 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_468 2 1 5102 5105 5106 $input=5102 $shape=5105 #5102=(36,64,576)f32 #5106=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_14805 0 1 21123 expr=0 pnnx.Expression pnnx_expr_14804 0 1 21124 expr=0 pnnx.Expression pnnx_expr_14803 0 1 21125 expr=0 pnnx.Expression pnnx_expr_14802 0 1 21126 expr=1 pnnx.Expression pnnx_expr_14801 0 1 21127 expr=0 pnnx.Expression pnnx_expr_14800 0 1 21128 expr=2 torch.permute torch.permute_2597 2 1 5106 5107 qkv0.37 $input=5106 $dims=5107 #5106=(36,64,3,6,32)f32 #qkv0.37=(3,36,6,64,32)f32 Tensor.select Tensor.select_701 3 1 qkv0.37 21123 21124 q.37 $input=qkv0.37 $dim=21123 $index=21124 #qkv0.37=(3,36,6,64,32)f32 #q.37=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14799 1 1 q.37 q0.37 expr=mul(@0,1.767767e-01) #q.37=(36,6,64,32)f32 #q0.37=(36,6,64,32)f32 Tensor.select Tensor.select_702 3 1 qkv0.37 21125 21126 k.37 $input=qkv0.37 $dim=21125 $index=21126 #qkv0.37=(3,36,6,64,32)f32 #k.37=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14797 0 1 5115 expr=[-1] Tensor.view Tensor.view_1166 2 1 relative_position_index.37 5115 5116 $input=relative_position_index.37 $shape=5115 #relative_position_index.37=(64,64)i64 #5116=(4096)i64 pnnx.Expression pnnx_expr_14796 1 1 5116 5117 expr=[@0] #5116=(4096)i64 pnnx.Expression pnnx_expr_14793 0 1 5119 expr=[64,64,-1] Tensor.index Tensor.index_343 2 1 relative_position_bias_table.37 5117 5118 $input=relative_position_bias_table.37 $expr=5117 #relative_position_bias_table.37=(225,6)f32 #5118=(4096,6)f32 pnnx.Expression pnnx_expr_14789 0 1 5121 expr=[2,0,1] Tensor.view Tensor.view_1167 2 1 5118 5119 relative_position_bias.37 $input=5118 $shape=5119 #5118=(4096,6)f32 #relative_position_bias.37=(64,64,6)f32 pnnx.Expression pnnx_expr_14788 0 1 21136 expr=0 torch.permute torch.permute_2598 2 1 relative_position_bias.37 5121 5122 $input=relative_position_bias.37 $dims=5121 #relative_position_bias.37=(64,64,6)f32 #5122=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_54 1 1 5122 relative_position_bias0.37 memory_format=torch.contiguous_format $input=5122 #5122=(6,64,64)f32 #relative_position_bias0.37=(6,64,64)f32 torch.transpose torch.transpose_2999 3 1 k.37 5077 5078 5113 $input=k.37 $dim0=5077 $dim1=5078 #k.37=(36,6,64,32)f32 #5113=(36,6,32,64)f32 torch.matmul torch.matmul_2238 2 1 q0.37 5113 attn.75 $input=q0.37 $other=5113 #q0.37=(36,6,64,32)f32 #5113=(36,6,32,64)f32 #attn.75=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3251 2 1 relative_position_bias0.37 21136 5124 $input=relative_position_bias0.37 $dim=21136 #relative_position_bias0.37=(6,64,64)f32 #5124=(1,6,64,64)f32 pnnx.Expression pnnx_expr_14786 2 1 attn.75 5124 attn0.19 expr=add(@0,@1) #attn.75=(36,6,64,64)f32 #5124=(1,6,64,64)f32 #attn0.19=(36,6,64,64)f32 pnnx.Expression pnnx_expr_14777 2 1 x2.37 attn_mask.19 5131 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.37=(36,64,192)f32 #attn_mask.19=(36,64,64)f32 pnnx.Expression pnnx_expr_14776 0 1 21141 expr=1 pnnx.Expression pnnx_expr_14775 0 1 21142 expr=0 Tensor.view Tensor.view_1168 2 1 attn0.19 5131 5132 $input=attn0.19 $shape=5131 #attn0.19=(36,6,64,64)f32 #5132=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3252 2 1 attn_mask.19 21141 5133 $input=attn_mask.19 $dim=21141 #attn_mask.19=(36,64,64)f32 #5133=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3253 2 1 5133 21142 5134 $input=5133 $dim=21142 #5133=(36,1,64,64)f32 #5134=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_14773 2 1 5132 5134 attn1.19 expr=add(@0,@1) #5132=(1,36,6,64,64)f32 #5134=(1,36,1,64,64)f32 #attn1.19=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_14770 1 1 x2.37 5136 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.37=(36,64,192)f32 Tensor.view Tensor.view_1169 2 1 attn1.19 5136 input.83 $input=attn1.19 $shape=5136 #attn1.19=(1,36,6,64,64)f32 #input.83=(36,6,64,64)f32 nn.Softmax layers_dfe.2.residual_group.blocks.5.attn.softmax 1 1 input.83 5138 dim=-1 #input.83=(36,6,64,64)f32 #5138=(36,6,64,64)f32 nn.Dropout layers_dfe.2.residual_group.blocks.5.attn.attn_drop 1 1 5138 5139 #5138=(36,6,64,64)f32 #5139=(36,6,64,64)f32 Tensor.select Tensor.select_703 3 1 qkv0.37 21127 21128 v.37 $input=qkv0.37 $dim=21127 $index=21128 #qkv0.37=(3,36,6,64,32)f32 #v.37=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14769 0 1 21146 expr=1 pnnx.Expression pnnx_expr_14768 0 1 21147 expr=2 torch.matmul torch.matmul_2239 2 1 5139 v.37 5140 $input=5139 $other=v.37 #5139=(36,6,64,64)f32 #v.37=(36,6,64,32)f32 #5140=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14767 1 1 x2.37 5142 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.37=(36,64,192)f32 torch.transpose torch.transpose_3000 3 1 5140 21146 21147 5141 $input=5140 $dim0=21146 $dim1=21147 #5140=(36,6,64,32)f32 #5141=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_469 2 1 5141 5142 input0.39 $input=5141 $shape=5142 #5141=(36,64,6,32)f32 #input0.39=(36,64,192)f32 nn.Linear layers_dfe.2.residual_group.blocks.5.attn.proj 1 1 input0.39 5144 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.39=(36,64,192)f32 #5144=(36,64,192)f32 nn.Dropout layers_dfe.2.residual_group.blocks.5.attn.proj_drop 1 1 5144 5145 #5144=(36,64,192)f32 #5145=(36,64,192)f32 pnnx.Expression pnnx_expr_14763 1 1 5009 5146 expr=[-1,8,8,int(size(@0,2))] #5009=(1,2304,192)f32 pnnx.Expression pnnx_expr_14750 1 1 154 5152 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_14743 0 1 5154 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1170 2 1 5145 5146 windows.37 $input=5145 $shape=5146 #5145=(36,64,192)f32 #windows.37=(36,8,8,192)f32 Tensor.view Tensor.view_1171 2 1 windows.37 5152 x3.37 $input=windows.37 $shape=5152 #windows.37=(36,8,8,192)f32 #x3.37=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_14740 1 1 154 5157 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2599 2 1 x3.37 5154 5155 $input=x3.37 $dims=5154 #x3.37=(1,6,6,8,8,192)f32 #5155=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_55 1 1 5155 5156 memory_format=torch.contiguous_format $input=5155 #5155=(1,6,8,6,8,192)f32 #5156=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_14737 0 1 5159 expr=[4,4] pnnx.Expression pnnx_expr_14734 0 1 5160 expr=[1,2] Tensor.view Tensor.view_1172 2 1 5156 5157 shifted_x.19 $input=5156 $shape=5157 #5156=(1,6,8,6,8,192)f32 #shifted_x.19=(1,48,48,192)f32 pnnx.Expression pnnx_expr_14731 2 1 5009 154 5164 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #5009=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.roll torch.roll_2437 3 1 shifted_x.19 5159 5160 x4.37 $input=shifted_x.19 $shifts=5159 $dims=5160 #shifted_x.19=(1,48,48,192)f32 #x4.37=(1,48,48,192)f32 Tensor.view Tensor.view_1173 2 1 x4.37 5164 x5.19 $input=x4.37 $shape=5164 #x4.37=(1,48,48,192)f32 #x5.19=(1,2304,192)f32 pnnx.Expression pnnx_expr_14729 2 1 5009 x5.19 input.85 expr=add(@0,@1) #5009=(1,2304,192)f32 #x5.19=(1,2304,192)f32 #input.85=(1,2304,192)f32 nn.LayerNorm layers_dfe.2.residual_group.blocks.5.norm2 1 1 input.85 5168 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.85=(1,2304,192)f32 #5168=(1,2304,192)f32 nn.Linear layers_dfe.2.residual_group.blocks.5.mlp.fc1 1 1 5168 5173 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #5168=(1,2304,192)f32 #5173=(1,2304,384)f32 nn.GELU layers_dfe.2.residual_group.blocks.5.mlp.act 1 1 5173 5174 #5173=(1,2304,384)f32 #5174=(1,2304,384)f32 nn.Dropout layers_dfe.2.residual_group.blocks.5.mlp.drop 1 1 5174 5175 #5174=(1,2304,384)f32 #5175=(1,2304,384)f32 nn.Linear layers_dfe.2.residual_group.blocks.5.mlp.fc2 1 1 5175 5176 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #5175=(1,2304,384)f32 #5176=(1,2304,192)f32 nn.Dropout pnnx_unique_36 1 1 5176 5177 #5176=(1,2304,192)f32 #5177=(1,2304,192)f32 pnnx.Expression pnnx_expr_14727 2 1 input.85 5177 5179 expr=add(@0,@1) #input.85=(1,2304,192)f32 #5177=(1,2304,192)f32 #5179=(1,2304,192)f32 pnnx.Expression pnnx_expr_14725 0 1 5181 expr=1 pnnx.Expression pnnx_expr_14724 0 1 5182 expr=2 pnnx.Expression pnnx_expr_14719 2 1 5179 154 5188 expr=[int(size(@0,0)),192,int(size(@1,2)),int(size(@1,3))] #5179=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.transpose torch.transpose_3001 3 1 5179 5181 5182 5187 $input=5179 $dim0=5181 $dim1=5182 #5179=(1,2304,192)f32 #5187=(1,192,2304)f32 Tensor.view Tensor.view_1174 2 1 5187 5188 input.87 $input=5187 $shape=5188 #5187=(1,192,2304)f32 #input.87=(1,192,48,48)f32 nn.Conv2d layers_dfe.2.conv 1 1 input.87 5190 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(192)f32 @weight=(192,192,3,3)f32 #input.87=(1,192,48,48)f32 #5190=(1,192,48,48)f32 pnnx.Expression pnnx_expr_14718 0 1 5191 expr=-1 pnnx.Expression pnnx_expr_14717 0 1 5192 expr=2 pnnx.Expression pnnx_expr_14716 0 1 5193 expr=1 pnnx.Expression pnnx_expr_14715 0 1 21174 expr=2 torch.flatten torch.flatten_2186 3 1 5190 5192 5191 5194 $input=5190 $start_dim=5192 $end_dim=5191 #5190=(1,192,48,48)f32 #5194=(1,192,2304)f32 torch.transpose torch.transpose_3002 3 1 5194 5193 21174 5195 $input=5194 $dim0=5193 $dim1=21174 #5194=(1,192,2304)f32 #5195=(1,2304,192)f32 pnnx.Expression pnnx_expr_14714 2 1 5195 4214 5196 expr=add(@0,@1) #5195=(1,2304,192)f32 #4214=(1,2304,192)f32 #5196=(1,2304,192)f32 nn.LayerNorm layers_dfe.3.residual_group.blocks.0.norm1 1 1 5196 5240 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #5196=(1,2304,192)f32 #5240=(1,2304,192)f32 pnnx.Expression pnnx_expr_14691 2 1 5196 154 5241 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #5196=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1175 2 1 5240 5241 x.39 $input=5240 $shape=5241 #5240=(1,2304,192)f32 #x.39=(1,48,48,192)f32 pnnx.Expression pnnx_expr_14670 1 1 x.39 5258 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.39=(1,48,48,192)f32 pnnx.Expression pnnx_expr_14665 0 1 5260 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1176 2 1 x.39 5258 x0.39 $input=x.39 $shape=5258 #x.39=(1,48,48,192)f32 #x0.39=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_14662 1 1 x.39 5263 expr=[-1,8,8,int(size(@0,3))] #x.39=(1,48,48,192)f32 torch.permute torch.permute_2600 2 1 x0.39 5260 5261 $input=x0.39 $dims=5260 #x0.39=(1,6,8,6,8,192)f32 #5261=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_56 1 1 5261 5262 memory_format=torch.contiguous_format $input=5261 #5261=(1,6,6,8,8,192)f32 #5262=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_14660 1 1 5196 5265 expr=[-1,64,int(size(@0,2))] #5196=(1,2304,192)f32 pnnx.Expression pnnx_expr_14650 0 1 5276 expr=-2 pnnx.Expression pnnx_expr_14649 0 1 5277 expr=-1 pnnx.Attribute layers_dfe.3.residual_group.blocks.0.attn 0 1 relative_position_bias_table.39 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.39=(225,6)f32 pnnx.Attribute pnnx_unique_37 0 1 relative_position_index.39 @relative_position_index=(64,64)i64 #relative_position_index.39=(64,64)i64 Tensor.view Tensor.view_1177 2 1 5262 5263 x_windows.39 $input=5262 $shape=5263 #5262=(1,6,6,8,8,192)f32 #x_windows.39=(36,8,8,192)f32 Tensor.view Tensor.view_1178 2 1 x_windows.39 5265 x1.39 $input=x_windows.39 $shape=5265 #x_windows.39=(36,8,8,192)f32 #x1.39=(36,64,192)f32 nn.Linear layers_dfe.3.residual_group.blocks.0.attn.qkv 1 1 x1.39 5297 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.39=(36,64,192)f32 #5297=(36,64,576)f32 pnnx.Expression pnnx_expr_14634 1 1 x1.39 5300 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.39=(36,64,192)f32 pnnx.Expression pnnx_expr_14629 0 1 5302 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_470 2 1 5297 5300 5301 $input=5297 $shape=5300 #5297=(36,64,576)f32 #5301=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_14628 0 1 21192 expr=0 pnnx.Expression pnnx_expr_14627 0 1 21193 expr=0 pnnx.Expression pnnx_expr_14626 0 1 21194 expr=0 pnnx.Expression pnnx_expr_14625 0 1 21195 expr=1 pnnx.Expression pnnx_expr_14624 0 1 21196 expr=0 pnnx.Expression pnnx_expr_14623 0 1 21197 expr=2 torch.permute torch.permute_2601 2 1 5301 5302 qkv0.39 $input=5301 $dims=5302 #5301=(36,64,3,6,32)f32 #qkv0.39=(3,36,6,64,32)f32 Tensor.select Tensor.select_704 3 1 qkv0.39 21192 21193 q.39 $input=qkv0.39 $dim=21192 $index=21193 #qkv0.39=(3,36,6,64,32)f32 #q.39=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14622 1 1 q.39 q0.39 expr=mul(@0,1.767767e-01) #q.39=(36,6,64,32)f32 #q0.39=(36,6,64,32)f32 Tensor.select Tensor.select_705 3 1 qkv0.39 21194 21195 k.39 $input=qkv0.39 $dim=21194 $index=21195 #qkv0.39=(3,36,6,64,32)f32 #k.39=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14620 0 1 5310 expr=[-1] Tensor.view Tensor.view_1179 2 1 relative_position_index.39 5310 5311 $input=relative_position_index.39 $shape=5310 #relative_position_index.39=(64,64)i64 #5311=(4096)i64 pnnx.Expression pnnx_expr_14619 1 1 5311 5312 expr=[@0] #5311=(4096)i64 pnnx.Expression pnnx_expr_14616 0 1 5314 expr=[64,64,-1] Tensor.index Tensor.index_344 2 1 relative_position_bias_table.39 5312 5313 $input=relative_position_bias_table.39 $expr=5312 #relative_position_bias_table.39=(225,6)f32 #5313=(4096,6)f32 pnnx.Expression pnnx_expr_14612 0 1 5316 expr=[2,0,1] Tensor.view Tensor.view_1180 2 1 5313 5314 relative_position_bias.39 $input=5313 $shape=5314 #5313=(4096,6)f32 #relative_position_bias.39=(64,64,6)f32 pnnx.Expression pnnx_expr_14611 0 1 21205 expr=0 torch.permute torch.permute_2602 2 1 relative_position_bias.39 5316 5317 $input=relative_position_bias.39 $dims=5316 #relative_position_bias.39=(64,64,6)f32 #5317=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_57 1 1 5317 relative_position_bias0.39 memory_format=torch.contiguous_format $input=5317 #5317=(6,64,64)f32 #relative_position_bias0.39=(6,64,64)f32 torch.transpose torch.transpose_3003 3 1 k.39 5276 5277 5308 $input=k.39 $dim0=5276 $dim1=5277 #k.39=(36,6,64,32)f32 #5308=(36,6,32,64)f32 torch.matmul torch.matmul_2240 2 1 q0.39 5308 attn.79 $input=q0.39 $other=5308 #q0.39=(36,6,64,32)f32 #5308=(36,6,32,64)f32 #attn.79=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3254 2 1 relative_position_bias0.39 21205 5319 $input=relative_position_bias0.39 $dim=21205 #relative_position_bias0.39=(6,64,64)f32 #5319=(1,6,64,64)f32 pnnx.Expression pnnx_expr_14609 2 1 attn.79 5319 input.89 expr=add(@0,@1) #attn.79=(36,6,64,64)f32 #5319=(1,6,64,64)f32 #input.89=(36,6,64,64)f32 nn.Softmax layers_dfe.3.residual_group.blocks.0.attn.softmax 1 1 input.89 5321 dim=-1 #input.89=(36,6,64,64)f32 #5321=(36,6,64,64)f32 nn.Dropout layers_dfe.3.residual_group.blocks.0.attn.attn_drop 1 1 5321 5322 #5321=(36,6,64,64)f32 #5322=(36,6,64,64)f32 Tensor.select Tensor.select_706 3 1 qkv0.39 21196 21197 v.39 $input=qkv0.39 $dim=21196 $index=21197 #qkv0.39=(3,36,6,64,32)f32 #v.39=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14608 0 1 21207 expr=1 pnnx.Expression pnnx_expr_14607 0 1 21208 expr=2 torch.matmul torch.matmul_2241 2 1 5322 v.39 5323 $input=5322 $other=v.39 #5322=(36,6,64,64)f32 #v.39=(36,6,64,32)f32 #5323=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14606 1 1 x1.39 5325 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.39=(36,64,192)f32 torch.transpose torch.transpose_3004 3 1 5323 21207 21208 5324 $input=5323 $dim0=21207 $dim1=21208 #5323=(36,6,64,32)f32 #5324=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_471 2 1 5324 5325 input0.41 $input=5324 $shape=5325 #5324=(36,64,6,32)f32 #input0.41=(36,64,192)f32 nn.Linear layers_dfe.3.residual_group.blocks.0.attn.proj 1 1 input0.41 5327 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.41=(36,64,192)f32 #5327=(36,64,192)f32 nn.Dropout layers_dfe.3.residual_group.blocks.0.attn.proj_drop 1 1 5327 5328 #5327=(36,64,192)f32 #5328=(36,64,192)f32 pnnx.Expression pnnx_expr_14602 1 1 5196 5329 expr=[-1,8,8,int(size(@0,2))] #5196=(1,2304,192)f32 pnnx.Expression pnnx_expr_14589 1 1 154 5335 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_14582 0 1 5337 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1181 2 1 5328 5329 windows.39 $input=5328 $shape=5329 #5328=(36,64,192)f32 #windows.39=(36,8,8,192)f32 Tensor.view Tensor.view_1182 2 1 windows.39 5335 x2.39 $input=windows.39 $shape=5335 #windows.39=(36,8,8,192)f32 #x2.39=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_14579 1 1 154 5340 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2603 2 1 x2.39 5337 5338 $input=x2.39 $dims=5337 #x2.39=(1,6,6,8,8,192)f32 #5338=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_58 1 1 5338 5339 memory_format=torch.contiguous_format $input=5338 #5338=(1,6,8,6,8,192)f32 #5339=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_14576 2 1 5196 154 5344 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #5196=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1183 2 1 5339 5340 x3.39 $input=5339 $shape=5340 #5339=(1,6,8,6,8,192)f32 #x3.39=(1,48,48,192)f32 Tensor.view Tensor.view_1184 2 1 x3.39 5344 x4.39 $input=x3.39 $shape=5344 #x3.39=(1,48,48,192)f32 #x4.39=(1,2304,192)f32 pnnx.Expression pnnx_expr_14574 2 1 5196 x4.39 input.91 expr=add(@0,@1) #5196=(1,2304,192)f32 #x4.39=(1,2304,192)f32 #input.91=(1,2304,192)f32 nn.LayerNorm layers_dfe.3.residual_group.blocks.0.norm2 1 1 input.91 5348 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.91=(1,2304,192)f32 #5348=(1,2304,192)f32 nn.Linear layers_dfe.3.residual_group.blocks.0.mlp.fc1 1 1 5348 5353 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #5348=(1,2304,192)f32 #5353=(1,2304,384)f32 nn.GELU layers_dfe.3.residual_group.blocks.0.mlp.act 1 1 5353 5354 #5353=(1,2304,384)f32 #5354=(1,2304,384)f32 nn.Dropout layers_dfe.3.residual_group.blocks.0.mlp.drop 1 1 5354 5355 #5354=(1,2304,384)f32 #5355=(1,2304,384)f32 nn.Linear layers_dfe.3.residual_group.blocks.0.mlp.fc2 1 1 5355 5356 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #5355=(1,2304,384)f32 #5356=(1,2304,192)f32 nn.Dropout pnnx_unique_38 1 1 5356 5357 #5356=(1,2304,192)f32 #5357=(1,2304,192)f32 pnnx.Expression pnnx_expr_14572 2 1 input.91 5357 5359 expr=add(@0,@1) #input.91=(1,2304,192)f32 #5357=(1,2304,192)f32 #5359=(1,2304,192)f32 pnnx.Attribute layers_dfe.3.residual_group.blocks.1 0 1 attn_mask.21 @attn_mask=(36,64,64)f32 #attn_mask.21=(36,64,64)f32 nn.LayerNorm layers_dfe.3.residual_group.blocks.1.norm1 1 1 5359 5388 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #5359=(1,2304,192)f32 #5388=(1,2304,192)f32 pnnx.Expression pnnx_expr_14549 2 1 5359 154 5389 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #5359=(1,2304,192)f32 #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_14547 0 1 5391 expr=[-4,-4] pnnx.Expression pnnx_expr_14545 0 1 5392 expr=[1,2] Tensor.view Tensor.view_1185 2 1 5388 5389 x.41 $input=5388 $shape=5389 #5388=(1,2304,192)f32 #x.41=(1,48,48,192)f32 torch.roll torch.roll_2438 3 1 x.41 5391 5392 x0.41 $input=x.41 $shifts=5391 $dims=5392 #x.41=(1,48,48,192)f32 #x0.41=(1,48,48,192)f32 pnnx.Expression pnnx_expr_14523 1 1 x0.41 5409 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.41=(1,48,48,192)f32 pnnx.Expression pnnx_expr_14518 0 1 5411 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1186 2 1 x0.41 5409 x1.41 $input=x0.41 $shape=5409 #x0.41=(1,48,48,192)f32 #x1.41=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_14515 1 1 x0.41 5414 expr=[-1,8,8,int(size(@0,3))] #x0.41=(1,48,48,192)f32 torch.permute torch.permute_2604 2 1 x1.41 5411 5412 $input=x1.41 $dims=5411 #x1.41=(1,6,8,6,8,192)f32 #5412=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_59 1 1 5412 5413 memory_format=torch.contiguous_format $input=5412 #5412=(1,6,6,8,8,192)f32 #5413=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_14513 1 1 5359 5416 expr=[-1,64,int(size(@0,2))] #5359=(1,2304,192)f32 pnnx.Expression pnnx_expr_14503 0 1 5427 expr=-2 pnnx.Expression pnnx_expr_14502 0 1 5428 expr=-1 pnnx.Attribute layers_dfe.3.residual_group.blocks.1.attn 0 1 relative_position_bias_table.41 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.41=(225,6)f32 pnnx.Attribute pnnx_unique_39 0 1 relative_position_index.41 @relative_position_index=(64,64)i64 #relative_position_index.41=(64,64)i64 Tensor.view Tensor.view_1187 2 1 5413 5414 x_windows.41 $input=5413 $shape=5414 #5413=(1,6,6,8,8,192)f32 #x_windows.41=(36,8,8,192)f32 Tensor.view Tensor.view_1188 2 1 x_windows.41 5416 x2.41 $input=x_windows.41 $shape=5416 #x_windows.41=(36,8,8,192)f32 #x2.41=(36,64,192)f32 nn.Linear layers_dfe.3.residual_group.blocks.1.attn.qkv 1 1 x2.41 5452 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.41=(36,64,192)f32 #5452=(36,64,576)f32 pnnx.Expression pnnx_expr_14483 1 1 x2.41 5455 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.41=(36,64,192)f32 pnnx.Expression pnnx_expr_14478 0 1 5457 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_472 2 1 5452 5455 5456 $input=5452 $shape=5455 #5452=(36,64,576)f32 #5456=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_14477 0 1 21251 expr=0 pnnx.Expression pnnx_expr_14476 0 1 21252 expr=0 pnnx.Expression pnnx_expr_14475 0 1 21253 expr=0 pnnx.Expression pnnx_expr_14474 0 1 21254 expr=1 pnnx.Expression pnnx_expr_14473 0 1 21255 expr=0 pnnx.Expression pnnx_expr_14472 0 1 21256 expr=2 torch.permute torch.permute_2605 2 1 5456 5457 qkv0.41 $input=5456 $dims=5457 #5456=(36,64,3,6,32)f32 #qkv0.41=(3,36,6,64,32)f32 Tensor.select Tensor.select_707 3 1 qkv0.41 21251 21252 q.41 $input=qkv0.41 $dim=21251 $index=21252 #qkv0.41=(3,36,6,64,32)f32 #q.41=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14471 1 1 q.41 q0.41 expr=mul(@0,1.767767e-01) #q.41=(36,6,64,32)f32 #q0.41=(36,6,64,32)f32 Tensor.select Tensor.select_708 3 1 qkv0.41 21253 21254 k.41 $input=qkv0.41 $dim=21253 $index=21254 #qkv0.41=(3,36,6,64,32)f32 #k.41=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14469 0 1 5465 expr=[-1] Tensor.view Tensor.view_1189 2 1 relative_position_index.41 5465 5466 $input=relative_position_index.41 $shape=5465 #relative_position_index.41=(64,64)i64 #5466=(4096)i64 pnnx.Expression pnnx_expr_14468 1 1 5466 5467 expr=[@0] #5466=(4096)i64 pnnx.Expression pnnx_expr_14465 0 1 5469 expr=[64,64,-1] Tensor.index Tensor.index_345 2 1 relative_position_bias_table.41 5467 5468 $input=relative_position_bias_table.41 $expr=5467 #relative_position_bias_table.41=(225,6)f32 #5468=(4096,6)f32 pnnx.Expression pnnx_expr_14461 0 1 5471 expr=[2,0,1] Tensor.view Tensor.view_1190 2 1 5468 5469 relative_position_bias.41 $input=5468 $shape=5469 #5468=(4096,6)f32 #relative_position_bias.41=(64,64,6)f32 pnnx.Expression pnnx_expr_14460 0 1 21264 expr=0 torch.permute torch.permute_2606 2 1 relative_position_bias.41 5471 5472 $input=relative_position_bias.41 $dims=5471 #relative_position_bias.41=(64,64,6)f32 #5472=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_60 1 1 5472 relative_position_bias0.41 memory_format=torch.contiguous_format $input=5472 #5472=(6,64,64)f32 #relative_position_bias0.41=(6,64,64)f32 torch.transpose torch.transpose_3005 3 1 k.41 5427 5428 5463 $input=k.41 $dim0=5427 $dim1=5428 #k.41=(36,6,64,32)f32 #5463=(36,6,32,64)f32 torch.matmul torch.matmul_2242 2 1 q0.41 5463 attn.83 $input=q0.41 $other=5463 #q0.41=(36,6,64,32)f32 #5463=(36,6,32,64)f32 #attn.83=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3255 2 1 relative_position_bias0.41 21264 5474 $input=relative_position_bias0.41 $dim=21264 #relative_position_bias0.41=(6,64,64)f32 #5474=(1,6,64,64)f32 pnnx.Expression pnnx_expr_14458 2 1 attn.83 5474 attn0.21 expr=add(@0,@1) #attn.83=(36,6,64,64)f32 #5474=(1,6,64,64)f32 #attn0.21=(36,6,64,64)f32 pnnx.Expression pnnx_expr_14449 2 1 x2.41 attn_mask.21 5481 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.41=(36,64,192)f32 #attn_mask.21=(36,64,64)f32 pnnx.Expression pnnx_expr_14448 0 1 21269 expr=1 pnnx.Expression pnnx_expr_14447 0 1 21270 expr=0 Tensor.view Tensor.view_1191 2 1 attn0.21 5481 5482 $input=attn0.21 $shape=5481 #attn0.21=(36,6,64,64)f32 #5482=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3256 2 1 attn_mask.21 21269 5483 $input=attn_mask.21 $dim=21269 #attn_mask.21=(36,64,64)f32 #5483=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3257 2 1 5483 21270 5484 $input=5483 $dim=21270 #5483=(36,1,64,64)f32 #5484=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_14445 2 1 5482 5484 attn1.21 expr=add(@0,@1) #5482=(1,36,6,64,64)f32 #5484=(1,36,1,64,64)f32 #attn1.21=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_14442 1 1 x2.41 5486 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.41=(36,64,192)f32 Tensor.view Tensor.view_1192 2 1 attn1.21 5486 input.93 $input=attn1.21 $shape=5486 #attn1.21=(1,36,6,64,64)f32 #input.93=(36,6,64,64)f32 nn.Softmax layers_dfe.3.residual_group.blocks.1.attn.softmax 1 1 input.93 5488 dim=-1 #input.93=(36,6,64,64)f32 #5488=(36,6,64,64)f32 nn.Dropout layers_dfe.3.residual_group.blocks.1.attn.attn_drop 1 1 5488 5489 #5488=(36,6,64,64)f32 #5489=(36,6,64,64)f32 Tensor.select Tensor.select_709 3 1 qkv0.41 21255 21256 v.41 $input=qkv0.41 $dim=21255 $index=21256 #qkv0.41=(3,36,6,64,32)f32 #v.41=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14441 0 1 21274 expr=1 pnnx.Expression pnnx_expr_14440 0 1 21275 expr=2 torch.matmul torch.matmul_2243 2 1 5489 v.41 5490 $input=5489 $other=v.41 #5489=(36,6,64,64)f32 #v.41=(36,6,64,32)f32 #5490=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14439 1 1 x2.41 5492 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.41=(36,64,192)f32 torch.transpose torch.transpose_3006 3 1 5490 21274 21275 5491 $input=5490 $dim0=21274 $dim1=21275 #5490=(36,6,64,32)f32 #5491=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_473 2 1 5491 5492 input0.43 $input=5491 $shape=5492 #5491=(36,64,6,32)f32 #input0.43=(36,64,192)f32 nn.Linear layers_dfe.3.residual_group.blocks.1.attn.proj 1 1 input0.43 5494 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.43=(36,64,192)f32 #5494=(36,64,192)f32 nn.Dropout layers_dfe.3.residual_group.blocks.1.attn.proj_drop 1 1 5494 5495 #5494=(36,64,192)f32 #5495=(36,64,192)f32 pnnx.Expression pnnx_expr_14435 1 1 5359 5496 expr=[-1,8,8,int(size(@0,2))] #5359=(1,2304,192)f32 pnnx.Expression pnnx_expr_14422 1 1 154 5502 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_14415 0 1 5504 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1193 2 1 5495 5496 windows.41 $input=5495 $shape=5496 #5495=(36,64,192)f32 #windows.41=(36,8,8,192)f32 Tensor.view Tensor.view_1194 2 1 windows.41 5502 x3.41 $input=windows.41 $shape=5502 #windows.41=(36,8,8,192)f32 #x3.41=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_14412 1 1 154 5507 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2607 2 1 x3.41 5504 5505 $input=x3.41 $dims=5504 #x3.41=(1,6,6,8,8,192)f32 #5505=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_61 1 1 5505 5506 memory_format=torch.contiguous_format $input=5505 #5505=(1,6,8,6,8,192)f32 #5506=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_14409 0 1 5509 expr=[4,4] pnnx.Expression pnnx_expr_14406 0 1 5510 expr=[1,2] Tensor.view Tensor.view_1195 2 1 5506 5507 shifted_x.21 $input=5506 $shape=5507 #5506=(1,6,8,6,8,192)f32 #shifted_x.21=(1,48,48,192)f32 pnnx.Expression pnnx_expr_14403 2 1 5359 154 5514 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #5359=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.roll torch.roll_2439 3 1 shifted_x.21 5509 5510 x4.41 $input=shifted_x.21 $shifts=5509 $dims=5510 #shifted_x.21=(1,48,48,192)f32 #x4.41=(1,48,48,192)f32 Tensor.view Tensor.view_1196 2 1 x4.41 5514 x5.21 $input=x4.41 $shape=5514 #x4.41=(1,48,48,192)f32 #x5.21=(1,2304,192)f32 pnnx.Expression pnnx_expr_14401 2 1 5359 x5.21 input.95 expr=add(@0,@1) #5359=(1,2304,192)f32 #x5.21=(1,2304,192)f32 #input.95=(1,2304,192)f32 nn.LayerNorm layers_dfe.3.residual_group.blocks.1.norm2 1 1 input.95 5518 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.95=(1,2304,192)f32 #5518=(1,2304,192)f32 nn.Linear layers_dfe.3.residual_group.blocks.1.mlp.fc1 1 1 5518 5523 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #5518=(1,2304,192)f32 #5523=(1,2304,384)f32 nn.GELU layers_dfe.3.residual_group.blocks.1.mlp.act 1 1 5523 5524 #5523=(1,2304,384)f32 #5524=(1,2304,384)f32 nn.Dropout layers_dfe.3.residual_group.blocks.1.mlp.drop 1 1 5524 5525 #5524=(1,2304,384)f32 #5525=(1,2304,384)f32 nn.Linear layers_dfe.3.residual_group.blocks.1.mlp.fc2 1 1 5525 5526 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #5525=(1,2304,384)f32 #5526=(1,2304,192)f32 nn.Dropout pnnx_unique_40 1 1 5526 5527 #5526=(1,2304,192)f32 #5527=(1,2304,192)f32 pnnx.Expression pnnx_expr_14399 2 1 input.95 5527 5529 expr=add(@0,@1) #input.95=(1,2304,192)f32 #5527=(1,2304,192)f32 #5529=(1,2304,192)f32 nn.LayerNorm layers_dfe.3.residual_group.blocks.2.norm1 1 1 5529 5556 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #5529=(1,2304,192)f32 #5556=(1,2304,192)f32 pnnx.Expression pnnx_expr_14377 2 1 5529 154 5557 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #5529=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1197 2 1 5556 5557 x.43 $input=5556 $shape=5557 #5556=(1,2304,192)f32 #x.43=(1,48,48,192)f32 pnnx.Expression pnnx_expr_14356 1 1 x.43 5574 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.43=(1,48,48,192)f32 pnnx.Expression pnnx_expr_14351 0 1 5576 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1198 2 1 x.43 5574 x0.43 $input=x.43 $shape=5574 #x.43=(1,48,48,192)f32 #x0.43=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_14348 1 1 x.43 5579 expr=[-1,8,8,int(size(@0,3))] #x.43=(1,48,48,192)f32 torch.permute torch.permute_2608 2 1 x0.43 5576 5577 $input=x0.43 $dims=5576 #x0.43=(1,6,8,6,8,192)f32 #5577=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_62 1 1 5577 5578 memory_format=torch.contiguous_format $input=5577 #5577=(1,6,6,8,8,192)f32 #5578=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_14346 1 1 5529 5581 expr=[-1,64,int(size(@0,2))] #5529=(1,2304,192)f32 pnnx.Expression pnnx_expr_14336 0 1 5592 expr=-2 pnnx.Expression pnnx_expr_14335 0 1 5593 expr=-1 pnnx.Attribute layers_dfe.3.residual_group.blocks.2.attn 0 1 relative_position_bias_table.43 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.43=(225,6)f32 pnnx.Attribute pnnx_unique_41 0 1 relative_position_index.43 @relative_position_index=(64,64)i64 #relative_position_index.43=(64,64)i64 Tensor.view Tensor.view_1199 2 1 5578 5579 x_windows.43 $input=5578 $shape=5579 #5578=(1,6,6,8,8,192)f32 #x_windows.43=(36,8,8,192)f32 Tensor.view Tensor.view_1200 2 1 x_windows.43 5581 x1.43 $input=x_windows.43 $shape=5581 #x_windows.43=(36,8,8,192)f32 #x1.43=(36,64,192)f32 nn.Linear layers_dfe.3.residual_group.blocks.2.attn.qkv 1 1 x1.43 5613 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.43=(36,64,192)f32 #5613=(36,64,576)f32 pnnx.Expression pnnx_expr_14320 1 1 x1.43 5616 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.43=(36,64,192)f32 pnnx.Expression pnnx_expr_14315 0 1 5618 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_474 2 1 5613 5616 5617 $input=5613 $shape=5616 #5613=(36,64,576)f32 #5617=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_14314 0 1 21319 expr=0 pnnx.Expression pnnx_expr_14313 0 1 21320 expr=0 pnnx.Expression pnnx_expr_14312 0 1 21321 expr=0 pnnx.Expression pnnx_expr_14311 0 1 21322 expr=1 pnnx.Expression pnnx_expr_14310 0 1 21323 expr=0 pnnx.Expression pnnx_expr_14309 0 1 21324 expr=2 torch.permute torch.permute_2609 2 1 5617 5618 qkv0.43 $input=5617 $dims=5618 #5617=(36,64,3,6,32)f32 #qkv0.43=(3,36,6,64,32)f32 Tensor.select Tensor.select_710 3 1 qkv0.43 21319 21320 q.43 $input=qkv0.43 $dim=21319 $index=21320 #qkv0.43=(3,36,6,64,32)f32 #q.43=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14308 1 1 q.43 q0.43 expr=mul(@0,1.767767e-01) #q.43=(36,6,64,32)f32 #q0.43=(36,6,64,32)f32 Tensor.select Tensor.select_711 3 1 qkv0.43 21321 21322 k.43 $input=qkv0.43 $dim=21321 $index=21322 #qkv0.43=(3,36,6,64,32)f32 #k.43=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14306 0 1 5626 expr=[-1] Tensor.view Tensor.view_1201 2 1 relative_position_index.43 5626 5627 $input=relative_position_index.43 $shape=5626 #relative_position_index.43=(64,64)i64 #5627=(4096)i64 pnnx.Expression pnnx_expr_14305 1 1 5627 5628 expr=[@0] #5627=(4096)i64 pnnx.Expression pnnx_expr_14302 0 1 5630 expr=[64,64,-1] Tensor.index Tensor.index_346 2 1 relative_position_bias_table.43 5628 5629 $input=relative_position_bias_table.43 $expr=5628 #relative_position_bias_table.43=(225,6)f32 #5629=(4096,6)f32 pnnx.Expression pnnx_expr_14298 0 1 5632 expr=[2,0,1] Tensor.view Tensor.view_1202 2 1 5629 5630 relative_position_bias.43 $input=5629 $shape=5630 #5629=(4096,6)f32 #relative_position_bias.43=(64,64,6)f32 pnnx.Expression pnnx_expr_14297 0 1 21332 expr=0 torch.permute torch.permute_2610 2 1 relative_position_bias.43 5632 5633 $input=relative_position_bias.43 $dims=5632 #relative_position_bias.43=(64,64,6)f32 #5633=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_63 1 1 5633 relative_position_bias0.43 memory_format=torch.contiguous_format $input=5633 #5633=(6,64,64)f32 #relative_position_bias0.43=(6,64,64)f32 torch.transpose torch.transpose_3007 3 1 k.43 5592 5593 5624 $input=k.43 $dim0=5592 $dim1=5593 #k.43=(36,6,64,32)f32 #5624=(36,6,32,64)f32 torch.matmul torch.matmul_2244 2 1 q0.43 5624 attn.87 $input=q0.43 $other=5624 #q0.43=(36,6,64,32)f32 #5624=(36,6,32,64)f32 #attn.87=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3258 2 1 relative_position_bias0.43 21332 5635 $input=relative_position_bias0.43 $dim=21332 #relative_position_bias0.43=(6,64,64)f32 #5635=(1,6,64,64)f32 pnnx.Expression pnnx_expr_14295 2 1 attn.87 5635 input.97 expr=add(@0,@1) #attn.87=(36,6,64,64)f32 #5635=(1,6,64,64)f32 #input.97=(36,6,64,64)f32 nn.Softmax layers_dfe.3.residual_group.blocks.2.attn.softmax 1 1 input.97 5637 dim=-1 #input.97=(36,6,64,64)f32 #5637=(36,6,64,64)f32 nn.Dropout layers_dfe.3.residual_group.blocks.2.attn.attn_drop 1 1 5637 5638 #5637=(36,6,64,64)f32 #5638=(36,6,64,64)f32 Tensor.select Tensor.select_712 3 1 qkv0.43 21323 21324 v.43 $input=qkv0.43 $dim=21323 $index=21324 #qkv0.43=(3,36,6,64,32)f32 #v.43=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14294 0 1 21334 expr=1 pnnx.Expression pnnx_expr_14293 0 1 21335 expr=2 torch.matmul torch.matmul_2245 2 1 5638 v.43 5639 $input=5638 $other=v.43 #5638=(36,6,64,64)f32 #v.43=(36,6,64,32)f32 #5639=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14292 1 1 x1.43 5641 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.43=(36,64,192)f32 torch.transpose torch.transpose_3008 3 1 5639 21334 21335 5640 $input=5639 $dim0=21334 $dim1=21335 #5639=(36,6,64,32)f32 #5640=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_475 2 1 5640 5641 input0.45 $input=5640 $shape=5641 #5640=(36,64,6,32)f32 #input0.45=(36,64,192)f32 nn.Linear layers_dfe.3.residual_group.blocks.2.attn.proj 1 1 input0.45 5643 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.45=(36,64,192)f32 #5643=(36,64,192)f32 nn.Dropout layers_dfe.3.residual_group.blocks.2.attn.proj_drop 1 1 5643 5644 #5643=(36,64,192)f32 #5644=(36,64,192)f32 pnnx.Expression pnnx_expr_14288 1 1 5529 5645 expr=[-1,8,8,int(size(@0,2))] #5529=(1,2304,192)f32 pnnx.Expression pnnx_expr_14275 1 1 154 5651 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_14268 0 1 5653 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1203 2 1 5644 5645 windows.43 $input=5644 $shape=5645 #5644=(36,64,192)f32 #windows.43=(36,8,8,192)f32 Tensor.view Tensor.view_1204 2 1 windows.43 5651 x2.43 $input=windows.43 $shape=5651 #windows.43=(36,8,8,192)f32 #x2.43=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_14265 1 1 154 5656 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2611 2 1 x2.43 5653 5654 $input=x2.43 $dims=5653 #x2.43=(1,6,6,8,8,192)f32 #5654=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_64 1 1 5654 5655 memory_format=torch.contiguous_format $input=5654 #5654=(1,6,8,6,8,192)f32 #5655=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_14262 2 1 5529 154 5660 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #5529=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1205 2 1 5655 5656 x3.43 $input=5655 $shape=5656 #5655=(1,6,8,6,8,192)f32 #x3.43=(1,48,48,192)f32 Tensor.view Tensor.view_1206 2 1 x3.43 5660 x4.43 $input=x3.43 $shape=5660 #x3.43=(1,48,48,192)f32 #x4.43=(1,2304,192)f32 pnnx.Expression pnnx_expr_14260 2 1 5529 x4.43 input.99 expr=add(@0,@1) #5529=(1,2304,192)f32 #x4.43=(1,2304,192)f32 #input.99=(1,2304,192)f32 nn.LayerNorm layers_dfe.3.residual_group.blocks.2.norm2 1 1 input.99 5664 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.99=(1,2304,192)f32 #5664=(1,2304,192)f32 nn.Linear layers_dfe.3.residual_group.blocks.2.mlp.fc1 1 1 5664 5669 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #5664=(1,2304,192)f32 #5669=(1,2304,384)f32 nn.GELU layers_dfe.3.residual_group.blocks.2.mlp.act 1 1 5669 5670 #5669=(1,2304,384)f32 #5670=(1,2304,384)f32 nn.Dropout layers_dfe.3.residual_group.blocks.2.mlp.drop 1 1 5670 5671 #5670=(1,2304,384)f32 #5671=(1,2304,384)f32 nn.Linear layers_dfe.3.residual_group.blocks.2.mlp.fc2 1 1 5671 5672 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #5671=(1,2304,384)f32 #5672=(1,2304,192)f32 nn.Dropout pnnx_unique_42 1 1 5672 5673 #5672=(1,2304,192)f32 #5673=(1,2304,192)f32 pnnx.Expression pnnx_expr_14258 2 1 input.99 5673 5675 expr=add(@0,@1) #input.99=(1,2304,192)f32 #5673=(1,2304,192)f32 #5675=(1,2304,192)f32 pnnx.Attribute layers_dfe.3.residual_group.blocks.3 0 1 attn_mask.23 @attn_mask=(36,64,64)f32 #attn_mask.23=(36,64,64)f32 nn.LayerNorm layers_dfe.3.residual_group.blocks.3.norm1 1 1 5675 5704 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #5675=(1,2304,192)f32 #5704=(1,2304,192)f32 pnnx.Expression pnnx_expr_14235 2 1 5675 154 5705 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #5675=(1,2304,192)f32 #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_14233 0 1 5707 expr=[-4,-4] pnnx.Expression pnnx_expr_14231 0 1 5708 expr=[1,2] Tensor.view Tensor.view_1207 2 1 5704 5705 x.45 $input=5704 $shape=5705 #5704=(1,2304,192)f32 #x.45=(1,48,48,192)f32 torch.roll torch.roll_2440 3 1 x.45 5707 5708 x0.45 $input=x.45 $shifts=5707 $dims=5708 #x.45=(1,48,48,192)f32 #x0.45=(1,48,48,192)f32 pnnx.Expression pnnx_expr_14209 1 1 x0.45 5725 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.45=(1,48,48,192)f32 pnnx.Expression pnnx_expr_14204 0 1 5727 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1208 2 1 x0.45 5725 x1.45 $input=x0.45 $shape=5725 #x0.45=(1,48,48,192)f32 #x1.45=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_14201 1 1 x0.45 5730 expr=[-1,8,8,int(size(@0,3))] #x0.45=(1,48,48,192)f32 torch.permute torch.permute_2612 2 1 x1.45 5727 5728 $input=x1.45 $dims=5727 #x1.45=(1,6,8,6,8,192)f32 #5728=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_65 1 1 5728 5729 memory_format=torch.contiguous_format $input=5728 #5728=(1,6,6,8,8,192)f32 #5729=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_14199 1 1 5675 5732 expr=[-1,64,int(size(@0,2))] #5675=(1,2304,192)f32 pnnx.Expression pnnx_expr_14189 0 1 5743 expr=-2 pnnx.Expression pnnx_expr_14188 0 1 5744 expr=-1 pnnx.Attribute layers_dfe.3.residual_group.blocks.3.attn 0 1 relative_position_bias_table.45 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.45=(225,6)f32 pnnx.Attribute pnnx_unique_43 0 1 relative_position_index.45 @relative_position_index=(64,64)i64 #relative_position_index.45=(64,64)i64 Tensor.view Tensor.view_1209 2 1 5729 5730 x_windows.45 $input=5729 $shape=5730 #5729=(1,6,6,8,8,192)f32 #x_windows.45=(36,8,8,192)f32 Tensor.view Tensor.view_1210 2 1 x_windows.45 5732 x2.45 $input=x_windows.45 $shape=5732 #x_windows.45=(36,8,8,192)f32 #x2.45=(36,64,192)f32 nn.Linear layers_dfe.3.residual_group.blocks.3.attn.qkv 1 1 x2.45 5768 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.45=(36,64,192)f32 #5768=(36,64,576)f32 pnnx.Expression pnnx_expr_14169 1 1 x2.45 5771 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.45=(36,64,192)f32 pnnx.Expression pnnx_expr_14164 0 1 5773 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_476 2 1 5768 5771 5772 $input=5768 $shape=5771 #5768=(36,64,576)f32 #5772=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_14163 0 1 21378 expr=0 pnnx.Expression pnnx_expr_14162 0 1 21379 expr=0 pnnx.Expression pnnx_expr_14161 0 1 21380 expr=0 pnnx.Expression pnnx_expr_14160 0 1 21381 expr=1 pnnx.Expression pnnx_expr_14159 0 1 21382 expr=0 pnnx.Expression pnnx_expr_14158 0 1 21383 expr=2 torch.permute torch.permute_2613 2 1 5772 5773 qkv0.45 $input=5772 $dims=5773 #5772=(36,64,3,6,32)f32 #qkv0.45=(3,36,6,64,32)f32 Tensor.select Tensor.select_713 3 1 qkv0.45 21378 21379 q.45 $input=qkv0.45 $dim=21378 $index=21379 #qkv0.45=(3,36,6,64,32)f32 #q.45=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14157 1 1 q.45 q0.45 expr=mul(@0,1.767767e-01) #q.45=(36,6,64,32)f32 #q0.45=(36,6,64,32)f32 Tensor.select Tensor.select_714 3 1 qkv0.45 21380 21381 k.45 $input=qkv0.45 $dim=21380 $index=21381 #qkv0.45=(3,36,6,64,32)f32 #k.45=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14155 0 1 5781 expr=[-1] Tensor.view Tensor.view_1211 2 1 relative_position_index.45 5781 5782 $input=relative_position_index.45 $shape=5781 #relative_position_index.45=(64,64)i64 #5782=(4096)i64 pnnx.Expression pnnx_expr_14154 1 1 5782 5783 expr=[@0] #5782=(4096)i64 pnnx.Expression pnnx_expr_14151 0 1 5785 expr=[64,64,-1] Tensor.index Tensor.index_347 2 1 relative_position_bias_table.45 5783 5784 $input=relative_position_bias_table.45 $expr=5783 #relative_position_bias_table.45=(225,6)f32 #5784=(4096,6)f32 pnnx.Expression pnnx_expr_14147 0 1 5787 expr=[2,0,1] Tensor.view Tensor.view_1212 2 1 5784 5785 relative_position_bias.45 $input=5784 $shape=5785 #5784=(4096,6)f32 #relative_position_bias.45=(64,64,6)f32 pnnx.Expression pnnx_expr_14146 0 1 21391 expr=0 torch.permute torch.permute_2614 2 1 relative_position_bias.45 5787 5788 $input=relative_position_bias.45 $dims=5787 #relative_position_bias.45=(64,64,6)f32 #5788=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_66 1 1 5788 relative_position_bias0.45 memory_format=torch.contiguous_format $input=5788 #5788=(6,64,64)f32 #relative_position_bias0.45=(6,64,64)f32 torch.transpose torch.transpose_3009 3 1 k.45 5743 5744 5779 $input=k.45 $dim0=5743 $dim1=5744 #k.45=(36,6,64,32)f32 #5779=(36,6,32,64)f32 torch.matmul torch.matmul_2246 2 1 q0.45 5779 attn.91 $input=q0.45 $other=5779 #q0.45=(36,6,64,32)f32 #5779=(36,6,32,64)f32 #attn.91=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3259 2 1 relative_position_bias0.45 21391 5790 $input=relative_position_bias0.45 $dim=21391 #relative_position_bias0.45=(6,64,64)f32 #5790=(1,6,64,64)f32 pnnx.Expression pnnx_expr_14144 2 1 attn.91 5790 attn0.23 expr=add(@0,@1) #attn.91=(36,6,64,64)f32 #5790=(1,6,64,64)f32 #attn0.23=(36,6,64,64)f32 pnnx.Expression pnnx_expr_14135 2 1 x2.45 attn_mask.23 5797 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.45=(36,64,192)f32 #attn_mask.23=(36,64,64)f32 pnnx.Expression pnnx_expr_14134 0 1 21396 expr=1 pnnx.Expression pnnx_expr_14133 0 1 21397 expr=0 Tensor.view Tensor.view_1213 2 1 attn0.23 5797 5798 $input=attn0.23 $shape=5797 #attn0.23=(36,6,64,64)f32 #5798=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3260 2 1 attn_mask.23 21396 5799 $input=attn_mask.23 $dim=21396 #attn_mask.23=(36,64,64)f32 #5799=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3261 2 1 5799 21397 5800 $input=5799 $dim=21397 #5799=(36,1,64,64)f32 #5800=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_14131 2 1 5798 5800 attn1.23 expr=add(@0,@1) #5798=(1,36,6,64,64)f32 #5800=(1,36,1,64,64)f32 #attn1.23=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_14128 1 1 x2.45 5802 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.45=(36,64,192)f32 Tensor.view Tensor.view_1214 2 1 attn1.23 5802 input.101 $input=attn1.23 $shape=5802 #attn1.23=(1,36,6,64,64)f32 #input.101=(36,6,64,64)f32 nn.Softmax layers_dfe.3.residual_group.blocks.3.attn.softmax 1 1 input.101 5804 dim=-1 #input.101=(36,6,64,64)f32 #5804=(36,6,64,64)f32 nn.Dropout layers_dfe.3.residual_group.blocks.3.attn.attn_drop 1 1 5804 5805 #5804=(36,6,64,64)f32 #5805=(36,6,64,64)f32 Tensor.select Tensor.select_715 3 1 qkv0.45 21382 21383 v.45 $input=qkv0.45 $dim=21382 $index=21383 #qkv0.45=(3,36,6,64,32)f32 #v.45=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14127 0 1 21401 expr=1 pnnx.Expression pnnx_expr_14126 0 1 21402 expr=2 torch.matmul torch.matmul_2247 2 1 5805 v.45 5806 $input=5805 $other=v.45 #5805=(36,6,64,64)f32 #v.45=(36,6,64,32)f32 #5806=(36,6,64,32)f32 pnnx.Expression pnnx_expr_14125 1 1 x2.45 5808 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.45=(36,64,192)f32 torch.transpose torch.transpose_3010 3 1 5806 21401 21402 5807 $input=5806 $dim0=21401 $dim1=21402 #5806=(36,6,64,32)f32 #5807=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_477 2 1 5807 5808 input0.47 $input=5807 $shape=5808 #5807=(36,64,6,32)f32 #input0.47=(36,64,192)f32 nn.Linear layers_dfe.3.residual_group.blocks.3.attn.proj 1 1 input0.47 5810 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.47=(36,64,192)f32 #5810=(36,64,192)f32 nn.Dropout layers_dfe.3.residual_group.blocks.3.attn.proj_drop 1 1 5810 5811 #5810=(36,64,192)f32 #5811=(36,64,192)f32 pnnx.Expression pnnx_expr_14121 1 1 5675 5812 expr=[-1,8,8,int(size(@0,2))] #5675=(1,2304,192)f32 pnnx.Expression pnnx_expr_14108 1 1 154 5818 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_14101 0 1 5820 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1215 2 1 5811 5812 windows.45 $input=5811 $shape=5812 #5811=(36,64,192)f32 #windows.45=(36,8,8,192)f32 Tensor.view Tensor.view_1216 2 1 windows.45 5818 x3.45 $input=windows.45 $shape=5818 #windows.45=(36,8,8,192)f32 #x3.45=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_14098 1 1 154 5823 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2615 2 1 x3.45 5820 5821 $input=x3.45 $dims=5820 #x3.45=(1,6,6,8,8,192)f32 #5821=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_67 1 1 5821 5822 memory_format=torch.contiguous_format $input=5821 #5821=(1,6,8,6,8,192)f32 #5822=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_14095 0 1 5825 expr=[4,4] pnnx.Expression pnnx_expr_14092 0 1 5826 expr=[1,2] Tensor.view Tensor.view_1217 2 1 5822 5823 shifted_x.23 $input=5822 $shape=5823 #5822=(1,6,8,6,8,192)f32 #shifted_x.23=(1,48,48,192)f32 pnnx.Expression pnnx_expr_14089 2 1 5675 154 5830 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #5675=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.roll torch.roll_2441 3 1 shifted_x.23 5825 5826 x4.45 $input=shifted_x.23 $shifts=5825 $dims=5826 #shifted_x.23=(1,48,48,192)f32 #x4.45=(1,48,48,192)f32 Tensor.view Tensor.view_1218 2 1 x4.45 5830 x5.23 $input=x4.45 $shape=5830 #x4.45=(1,48,48,192)f32 #x5.23=(1,2304,192)f32 pnnx.Expression pnnx_expr_14087 2 1 5675 x5.23 input.103 expr=add(@0,@1) #5675=(1,2304,192)f32 #x5.23=(1,2304,192)f32 #input.103=(1,2304,192)f32 nn.LayerNorm layers_dfe.3.residual_group.blocks.3.norm2 1 1 input.103 5834 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.103=(1,2304,192)f32 #5834=(1,2304,192)f32 nn.Linear layers_dfe.3.residual_group.blocks.3.mlp.fc1 1 1 5834 5839 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #5834=(1,2304,192)f32 #5839=(1,2304,384)f32 nn.GELU layers_dfe.3.residual_group.blocks.3.mlp.act 1 1 5839 5840 #5839=(1,2304,384)f32 #5840=(1,2304,384)f32 nn.Dropout layers_dfe.3.residual_group.blocks.3.mlp.drop 1 1 5840 5841 #5840=(1,2304,384)f32 #5841=(1,2304,384)f32 nn.Linear layers_dfe.3.residual_group.blocks.3.mlp.fc2 1 1 5841 5842 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #5841=(1,2304,384)f32 #5842=(1,2304,192)f32 nn.Dropout pnnx_unique_44 1 1 5842 5843 #5842=(1,2304,192)f32 #5843=(1,2304,192)f32 pnnx.Expression pnnx_expr_14085 2 1 input.103 5843 5845 expr=add(@0,@1) #input.103=(1,2304,192)f32 #5843=(1,2304,192)f32 #5845=(1,2304,192)f32 nn.LayerNorm layers_dfe.3.residual_group.blocks.4.norm1 1 1 5845 5872 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #5845=(1,2304,192)f32 #5872=(1,2304,192)f32 pnnx.Expression pnnx_expr_14063 2 1 5845 154 5873 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #5845=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1219 2 1 5872 5873 x.47 $input=5872 $shape=5873 #5872=(1,2304,192)f32 #x.47=(1,48,48,192)f32 pnnx.Expression pnnx_expr_14042 1 1 x.47 5890 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.47=(1,48,48,192)f32 pnnx.Expression pnnx_expr_14037 0 1 5892 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1220 2 1 x.47 5890 x0.47 $input=x.47 $shape=5890 #x.47=(1,48,48,192)f32 #x0.47=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_14034 1 1 x.47 5895 expr=[-1,8,8,int(size(@0,3))] #x.47=(1,48,48,192)f32 torch.permute torch.permute_2616 2 1 x0.47 5892 5893 $input=x0.47 $dims=5892 #x0.47=(1,6,8,6,8,192)f32 #5893=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_68 1 1 5893 5894 memory_format=torch.contiguous_format $input=5893 #5893=(1,6,6,8,8,192)f32 #5894=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_14032 1 1 5845 5897 expr=[-1,64,int(size(@0,2))] #5845=(1,2304,192)f32 pnnx.Expression pnnx_expr_14022 0 1 5908 expr=-2 pnnx.Expression pnnx_expr_14021 0 1 5909 expr=-1 pnnx.Attribute layers_dfe.3.residual_group.blocks.4.attn 0 1 relative_position_bias_table.47 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.47=(225,6)f32 pnnx.Attribute pnnx_unique_45 0 1 relative_position_index.47 @relative_position_index=(64,64)i64 #relative_position_index.47=(64,64)i64 Tensor.view Tensor.view_1221 2 1 5894 5895 x_windows.47 $input=5894 $shape=5895 #5894=(1,6,6,8,8,192)f32 #x_windows.47=(36,8,8,192)f32 Tensor.view Tensor.view_1222 2 1 x_windows.47 5897 x1.47 $input=x_windows.47 $shape=5897 #x_windows.47=(36,8,8,192)f32 #x1.47=(36,64,192)f32 nn.Linear layers_dfe.3.residual_group.blocks.4.attn.qkv 1 1 x1.47 5929 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.47=(36,64,192)f32 #5929=(36,64,576)f32 pnnx.Expression pnnx_expr_14006 1 1 x1.47 5932 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.47=(36,64,192)f32 pnnx.Expression pnnx_expr_14001 0 1 5934 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_478 2 1 5929 5932 5933 $input=5929 $shape=5932 #5929=(36,64,576)f32 #5933=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_14000 0 1 21446 expr=0 pnnx.Expression pnnx_expr_13999 0 1 21447 expr=0 pnnx.Expression pnnx_expr_13998 0 1 21448 expr=0 pnnx.Expression pnnx_expr_13997 0 1 21449 expr=1 pnnx.Expression pnnx_expr_13996 0 1 21450 expr=0 pnnx.Expression pnnx_expr_13995 0 1 21451 expr=2 torch.permute torch.permute_2617 2 1 5933 5934 qkv0.47 $input=5933 $dims=5934 #5933=(36,64,3,6,32)f32 #qkv0.47=(3,36,6,64,32)f32 Tensor.select Tensor.select_716 3 1 qkv0.47 21446 21447 q.47 $input=qkv0.47 $dim=21446 $index=21447 #qkv0.47=(3,36,6,64,32)f32 #q.47=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13994 1 1 q.47 q0.47 expr=mul(@0,1.767767e-01) #q.47=(36,6,64,32)f32 #q0.47=(36,6,64,32)f32 Tensor.select Tensor.select_717 3 1 qkv0.47 21448 21449 k.47 $input=qkv0.47 $dim=21448 $index=21449 #qkv0.47=(3,36,6,64,32)f32 #k.47=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13992 0 1 5942 expr=[-1] Tensor.view Tensor.view_1223 2 1 relative_position_index.47 5942 5943 $input=relative_position_index.47 $shape=5942 #relative_position_index.47=(64,64)i64 #5943=(4096)i64 pnnx.Expression pnnx_expr_13991 1 1 5943 5944 expr=[@0] #5943=(4096)i64 pnnx.Expression pnnx_expr_13988 0 1 5946 expr=[64,64,-1] Tensor.index Tensor.index_348 2 1 relative_position_bias_table.47 5944 5945 $input=relative_position_bias_table.47 $expr=5944 #relative_position_bias_table.47=(225,6)f32 #5945=(4096,6)f32 pnnx.Expression pnnx_expr_13984 0 1 5948 expr=[2,0,1] Tensor.view Tensor.view_1224 2 1 5945 5946 relative_position_bias.47 $input=5945 $shape=5946 #5945=(4096,6)f32 #relative_position_bias.47=(64,64,6)f32 pnnx.Expression pnnx_expr_13983 0 1 21459 expr=0 torch.permute torch.permute_2618 2 1 relative_position_bias.47 5948 5949 $input=relative_position_bias.47 $dims=5948 #relative_position_bias.47=(64,64,6)f32 #5949=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_69 1 1 5949 relative_position_bias0.47 memory_format=torch.contiguous_format $input=5949 #5949=(6,64,64)f32 #relative_position_bias0.47=(6,64,64)f32 torch.transpose torch.transpose_3011 3 1 k.47 5908 5909 5940 $input=k.47 $dim0=5908 $dim1=5909 #k.47=(36,6,64,32)f32 #5940=(36,6,32,64)f32 torch.matmul torch.matmul_2248 2 1 q0.47 5940 attn.95 $input=q0.47 $other=5940 #q0.47=(36,6,64,32)f32 #5940=(36,6,32,64)f32 #attn.95=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3262 2 1 relative_position_bias0.47 21459 5951 $input=relative_position_bias0.47 $dim=21459 #relative_position_bias0.47=(6,64,64)f32 #5951=(1,6,64,64)f32 pnnx.Expression pnnx_expr_13981 2 1 attn.95 5951 input.105 expr=add(@0,@1) #attn.95=(36,6,64,64)f32 #5951=(1,6,64,64)f32 #input.105=(36,6,64,64)f32 nn.Softmax layers_dfe.3.residual_group.blocks.4.attn.softmax 1 1 input.105 5953 dim=-1 #input.105=(36,6,64,64)f32 #5953=(36,6,64,64)f32 nn.Dropout layers_dfe.3.residual_group.blocks.4.attn.attn_drop 1 1 5953 5954 #5953=(36,6,64,64)f32 #5954=(36,6,64,64)f32 Tensor.select Tensor.select_718 3 1 qkv0.47 21450 21451 v.47 $input=qkv0.47 $dim=21450 $index=21451 #qkv0.47=(3,36,6,64,32)f32 #v.47=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13980 0 1 21461 expr=1 pnnx.Expression pnnx_expr_13979 0 1 21462 expr=2 torch.matmul torch.matmul_2249 2 1 5954 v.47 5955 $input=5954 $other=v.47 #5954=(36,6,64,64)f32 #v.47=(36,6,64,32)f32 #5955=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13978 1 1 x1.47 5957 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.47=(36,64,192)f32 torch.transpose torch.transpose_3012 3 1 5955 21461 21462 5956 $input=5955 $dim0=21461 $dim1=21462 #5955=(36,6,64,32)f32 #5956=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_479 2 1 5956 5957 input0.49 $input=5956 $shape=5957 #5956=(36,64,6,32)f32 #input0.49=(36,64,192)f32 nn.Linear layers_dfe.3.residual_group.blocks.4.attn.proj 1 1 input0.49 5959 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.49=(36,64,192)f32 #5959=(36,64,192)f32 nn.Dropout layers_dfe.3.residual_group.blocks.4.attn.proj_drop 1 1 5959 5960 #5959=(36,64,192)f32 #5960=(36,64,192)f32 pnnx.Expression pnnx_expr_13974 1 1 5845 5961 expr=[-1,8,8,int(size(@0,2))] #5845=(1,2304,192)f32 pnnx.Expression pnnx_expr_13961 1 1 154 5967 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_13954 0 1 5969 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1225 2 1 5960 5961 windows.47 $input=5960 $shape=5961 #5960=(36,64,192)f32 #windows.47=(36,8,8,192)f32 Tensor.view Tensor.view_1226 2 1 windows.47 5967 x2.47 $input=windows.47 $shape=5967 #windows.47=(36,8,8,192)f32 #x2.47=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_13951 1 1 154 5972 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2619 2 1 x2.47 5969 5970 $input=x2.47 $dims=5969 #x2.47=(1,6,6,8,8,192)f32 #5970=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_70 1 1 5970 5971 memory_format=torch.contiguous_format $input=5970 #5970=(1,6,8,6,8,192)f32 #5971=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_13948 2 1 5845 154 5976 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #5845=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1227 2 1 5971 5972 x3.47 $input=5971 $shape=5972 #5971=(1,6,8,6,8,192)f32 #x3.47=(1,48,48,192)f32 Tensor.view Tensor.view_1228 2 1 x3.47 5976 x4.47 $input=x3.47 $shape=5976 #x3.47=(1,48,48,192)f32 #x4.47=(1,2304,192)f32 pnnx.Expression pnnx_expr_13946 2 1 5845 x4.47 input.107 expr=add(@0,@1) #5845=(1,2304,192)f32 #x4.47=(1,2304,192)f32 #input.107=(1,2304,192)f32 nn.LayerNorm layers_dfe.3.residual_group.blocks.4.norm2 1 1 input.107 5980 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.107=(1,2304,192)f32 #5980=(1,2304,192)f32 nn.Linear layers_dfe.3.residual_group.blocks.4.mlp.fc1 1 1 5980 5985 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #5980=(1,2304,192)f32 #5985=(1,2304,384)f32 nn.GELU layers_dfe.3.residual_group.blocks.4.mlp.act 1 1 5985 5986 #5985=(1,2304,384)f32 #5986=(1,2304,384)f32 nn.Dropout layers_dfe.3.residual_group.blocks.4.mlp.drop 1 1 5986 5987 #5986=(1,2304,384)f32 #5987=(1,2304,384)f32 nn.Linear layers_dfe.3.residual_group.blocks.4.mlp.fc2 1 1 5987 5988 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #5987=(1,2304,384)f32 #5988=(1,2304,192)f32 nn.Dropout pnnx_unique_46 1 1 5988 5989 #5988=(1,2304,192)f32 #5989=(1,2304,192)f32 pnnx.Expression pnnx_expr_13944 2 1 input.107 5989 5991 expr=add(@0,@1) #input.107=(1,2304,192)f32 #5989=(1,2304,192)f32 #5991=(1,2304,192)f32 pnnx.Attribute layers_dfe.3.residual_group.blocks.5 0 1 attn_mask.25 @attn_mask=(36,64,64)f32 #attn_mask.25=(36,64,64)f32 nn.LayerNorm layers_dfe.3.residual_group.blocks.5.norm1 1 1 5991 6020 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #5991=(1,2304,192)f32 #6020=(1,2304,192)f32 pnnx.Expression pnnx_expr_13921 2 1 5991 154 6021 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #5991=(1,2304,192)f32 #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_13919 0 1 6023 expr=[-4,-4] pnnx.Expression pnnx_expr_13917 0 1 6024 expr=[1,2] Tensor.view Tensor.view_1229 2 1 6020 6021 x.49 $input=6020 $shape=6021 #6020=(1,2304,192)f32 #x.49=(1,48,48,192)f32 torch.roll torch.roll_2442 3 1 x.49 6023 6024 x0.49 $input=x.49 $shifts=6023 $dims=6024 #x.49=(1,48,48,192)f32 #x0.49=(1,48,48,192)f32 pnnx.Expression pnnx_expr_13895 1 1 x0.49 6041 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.49=(1,48,48,192)f32 pnnx.Expression pnnx_expr_13890 0 1 6043 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1230 2 1 x0.49 6041 x1.49 $input=x0.49 $shape=6041 #x0.49=(1,48,48,192)f32 #x1.49=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_13887 1 1 x0.49 6046 expr=[-1,8,8,int(size(@0,3))] #x0.49=(1,48,48,192)f32 torch.permute torch.permute_2620 2 1 x1.49 6043 6044 $input=x1.49 $dims=6043 #x1.49=(1,6,8,6,8,192)f32 #6044=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_71 1 1 6044 6045 memory_format=torch.contiguous_format $input=6044 #6044=(1,6,6,8,8,192)f32 #6045=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_13885 1 1 5991 6048 expr=[-1,64,int(size(@0,2))] #5991=(1,2304,192)f32 pnnx.Expression pnnx_expr_13875 0 1 6059 expr=-2 pnnx.Expression pnnx_expr_13874 0 1 6060 expr=-1 pnnx.Attribute layers_dfe.3.residual_group.blocks.5.attn 0 1 relative_position_bias_table.49 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.49=(225,6)f32 pnnx.Attribute pnnx_unique_47 0 1 relative_position_index.49 @relative_position_index=(64,64)i64 #relative_position_index.49=(64,64)i64 Tensor.view Tensor.view_1231 2 1 6045 6046 x_windows.49 $input=6045 $shape=6046 #6045=(1,6,6,8,8,192)f32 #x_windows.49=(36,8,8,192)f32 Tensor.view Tensor.view_1232 2 1 x_windows.49 6048 x2.49 $input=x_windows.49 $shape=6048 #x_windows.49=(36,8,8,192)f32 #x2.49=(36,64,192)f32 nn.Linear layers_dfe.3.residual_group.blocks.5.attn.qkv 1 1 x2.49 6084 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.49=(36,64,192)f32 #6084=(36,64,576)f32 pnnx.Expression pnnx_expr_13855 1 1 x2.49 6087 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.49=(36,64,192)f32 pnnx.Expression pnnx_expr_13850 0 1 6089 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_480 2 1 6084 6087 6088 $input=6084 $shape=6087 #6084=(36,64,576)f32 #6088=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_13849 0 1 21505 expr=0 pnnx.Expression pnnx_expr_13848 0 1 21506 expr=0 pnnx.Expression pnnx_expr_13847 0 1 21507 expr=0 pnnx.Expression pnnx_expr_13846 0 1 21508 expr=1 pnnx.Expression pnnx_expr_13845 0 1 21509 expr=0 pnnx.Expression pnnx_expr_13844 0 1 21510 expr=2 torch.permute torch.permute_2621 2 1 6088 6089 qkv0.49 $input=6088 $dims=6089 #6088=(36,64,3,6,32)f32 #qkv0.49=(3,36,6,64,32)f32 Tensor.select Tensor.select_719 3 1 qkv0.49 21505 21506 q.49 $input=qkv0.49 $dim=21505 $index=21506 #qkv0.49=(3,36,6,64,32)f32 #q.49=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13843 1 1 q.49 q0.49 expr=mul(@0,1.767767e-01) #q.49=(36,6,64,32)f32 #q0.49=(36,6,64,32)f32 Tensor.select Tensor.select_720 3 1 qkv0.49 21507 21508 k.49 $input=qkv0.49 $dim=21507 $index=21508 #qkv0.49=(3,36,6,64,32)f32 #k.49=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13841 0 1 6097 expr=[-1] Tensor.view Tensor.view_1233 2 1 relative_position_index.49 6097 6098 $input=relative_position_index.49 $shape=6097 #relative_position_index.49=(64,64)i64 #6098=(4096)i64 pnnx.Expression pnnx_expr_13840 1 1 6098 6099 expr=[@0] #6098=(4096)i64 pnnx.Expression pnnx_expr_13837 0 1 6101 expr=[64,64,-1] Tensor.index Tensor.index_349 2 1 relative_position_bias_table.49 6099 6100 $input=relative_position_bias_table.49 $expr=6099 #relative_position_bias_table.49=(225,6)f32 #6100=(4096,6)f32 pnnx.Expression pnnx_expr_13833 0 1 6103 expr=[2,0,1] Tensor.view Tensor.view_1234 2 1 6100 6101 relative_position_bias.49 $input=6100 $shape=6101 #6100=(4096,6)f32 #relative_position_bias.49=(64,64,6)f32 pnnx.Expression pnnx_expr_13832 0 1 21518 expr=0 torch.permute torch.permute_2622 2 1 relative_position_bias.49 6103 6104 $input=relative_position_bias.49 $dims=6103 #relative_position_bias.49=(64,64,6)f32 #6104=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_72 1 1 6104 relative_position_bias0.49 memory_format=torch.contiguous_format $input=6104 #6104=(6,64,64)f32 #relative_position_bias0.49=(6,64,64)f32 torch.transpose torch.transpose_3013 3 1 k.49 6059 6060 6095 $input=k.49 $dim0=6059 $dim1=6060 #k.49=(36,6,64,32)f32 #6095=(36,6,32,64)f32 torch.matmul torch.matmul_2250 2 1 q0.49 6095 attn.99 $input=q0.49 $other=6095 #q0.49=(36,6,64,32)f32 #6095=(36,6,32,64)f32 #attn.99=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3263 2 1 relative_position_bias0.49 21518 6106 $input=relative_position_bias0.49 $dim=21518 #relative_position_bias0.49=(6,64,64)f32 #6106=(1,6,64,64)f32 pnnx.Expression pnnx_expr_13830 2 1 attn.99 6106 attn0.25 expr=add(@0,@1) #attn.99=(36,6,64,64)f32 #6106=(1,6,64,64)f32 #attn0.25=(36,6,64,64)f32 pnnx.Expression pnnx_expr_13821 2 1 x2.49 attn_mask.25 6113 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.49=(36,64,192)f32 #attn_mask.25=(36,64,64)f32 pnnx.Expression pnnx_expr_13820 0 1 21523 expr=1 pnnx.Expression pnnx_expr_13819 0 1 21524 expr=0 Tensor.view Tensor.view_1235 2 1 attn0.25 6113 6114 $input=attn0.25 $shape=6113 #attn0.25=(36,6,64,64)f32 #6114=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3264 2 1 attn_mask.25 21523 6115 $input=attn_mask.25 $dim=21523 #attn_mask.25=(36,64,64)f32 #6115=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3265 2 1 6115 21524 6116 $input=6115 $dim=21524 #6115=(36,1,64,64)f32 #6116=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_13817 2 1 6114 6116 attn1.25 expr=add(@0,@1) #6114=(1,36,6,64,64)f32 #6116=(1,36,1,64,64)f32 #attn1.25=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_13814 1 1 x2.49 6118 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.49=(36,64,192)f32 Tensor.view Tensor.view_1236 2 1 attn1.25 6118 input.109 $input=attn1.25 $shape=6118 #attn1.25=(1,36,6,64,64)f32 #input.109=(36,6,64,64)f32 nn.Softmax layers_dfe.3.residual_group.blocks.5.attn.softmax 1 1 input.109 6120 dim=-1 #input.109=(36,6,64,64)f32 #6120=(36,6,64,64)f32 nn.Dropout layers_dfe.3.residual_group.blocks.5.attn.attn_drop 1 1 6120 6121 #6120=(36,6,64,64)f32 #6121=(36,6,64,64)f32 Tensor.select Tensor.select_721 3 1 qkv0.49 21509 21510 v.49 $input=qkv0.49 $dim=21509 $index=21510 #qkv0.49=(3,36,6,64,32)f32 #v.49=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13813 0 1 21528 expr=1 pnnx.Expression pnnx_expr_13812 0 1 21529 expr=2 torch.matmul torch.matmul_2251 2 1 6121 v.49 6122 $input=6121 $other=v.49 #6121=(36,6,64,64)f32 #v.49=(36,6,64,32)f32 #6122=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13811 1 1 x2.49 6124 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.49=(36,64,192)f32 torch.transpose torch.transpose_3014 3 1 6122 21528 21529 6123 $input=6122 $dim0=21528 $dim1=21529 #6122=(36,6,64,32)f32 #6123=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_481 2 1 6123 6124 input0.51 $input=6123 $shape=6124 #6123=(36,64,6,32)f32 #input0.51=(36,64,192)f32 nn.Linear layers_dfe.3.residual_group.blocks.5.attn.proj 1 1 input0.51 6126 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.51=(36,64,192)f32 #6126=(36,64,192)f32 nn.Dropout layers_dfe.3.residual_group.blocks.5.attn.proj_drop 1 1 6126 6127 #6126=(36,64,192)f32 #6127=(36,64,192)f32 pnnx.Expression pnnx_expr_13807 1 1 5991 6128 expr=[-1,8,8,int(size(@0,2))] #5991=(1,2304,192)f32 pnnx.Expression pnnx_expr_13794 1 1 154 6134 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_13787 0 1 6136 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1237 2 1 6127 6128 windows.49 $input=6127 $shape=6128 #6127=(36,64,192)f32 #windows.49=(36,8,8,192)f32 Tensor.view Tensor.view_1238 2 1 windows.49 6134 x3.49 $input=windows.49 $shape=6134 #windows.49=(36,8,8,192)f32 #x3.49=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_13784 1 1 154 6139 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2623 2 1 x3.49 6136 6137 $input=x3.49 $dims=6136 #x3.49=(1,6,6,8,8,192)f32 #6137=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_73 1 1 6137 6138 memory_format=torch.contiguous_format $input=6137 #6137=(1,6,8,6,8,192)f32 #6138=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_13781 0 1 6141 expr=[4,4] pnnx.Expression pnnx_expr_13778 0 1 6142 expr=[1,2] Tensor.view Tensor.view_1239 2 1 6138 6139 shifted_x.25 $input=6138 $shape=6139 #6138=(1,6,8,6,8,192)f32 #shifted_x.25=(1,48,48,192)f32 pnnx.Expression pnnx_expr_13775 2 1 5991 154 6146 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #5991=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.roll torch.roll_2443 3 1 shifted_x.25 6141 6142 x4.49 $input=shifted_x.25 $shifts=6141 $dims=6142 #shifted_x.25=(1,48,48,192)f32 #x4.49=(1,48,48,192)f32 Tensor.view Tensor.view_1240 2 1 x4.49 6146 x5.25 $input=x4.49 $shape=6146 #x4.49=(1,48,48,192)f32 #x5.25=(1,2304,192)f32 pnnx.Expression pnnx_expr_13773 2 1 5991 x5.25 input.111 expr=add(@0,@1) #5991=(1,2304,192)f32 #x5.25=(1,2304,192)f32 #input.111=(1,2304,192)f32 nn.LayerNorm layers_dfe.3.residual_group.blocks.5.norm2 1 1 input.111 6150 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.111=(1,2304,192)f32 #6150=(1,2304,192)f32 nn.Linear layers_dfe.3.residual_group.blocks.5.mlp.fc1 1 1 6150 6155 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #6150=(1,2304,192)f32 #6155=(1,2304,384)f32 nn.GELU layers_dfe.3.residual_group.blocks.5.mlp.act 1 1 6155 6156 #6155=(1,2304,384)f32 #6156=(1,2304,384)f32 nn.Dropout layers_dfe.3.residual_group.blocks.5.mlp.drop 1 1 6156 6157 #6156=(1,2304,384)f32 #6157=(1,2304,384)f32 nn.Linear layers_dfe.3.residual_group.blocks.5.mlp.fc2 1 1 6157 6158 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #6157=(1,2304,384)f32 #6158=(1,2304,192)f32 nn.Dropout pnnx_unique_48 1 1 6158 6159 #6158=(1,2304,192)f32 #6159=(1,2304,192)f32 pnnx.Expression pnnx_expr_13771 2 1 input.111 6159 6161 expr=add(@0,@1) #input.111=(1,2304,192)f32 #6159=(1,2304,192)f32 #6161=(1,2304,192)f32 pnnx.Expression pnnx_expr_13769 0 1 6163 expr=1 pnnx.Expression pnnx_expr_13768 0 1 6164 expr=2 pnnx.Expression pnnx_expr_13763 2 1 6161 154 6170 expr=[int(size(@0,0)),192,int(size(@1,2)),int(size(@1,3))] #6161=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.transpose torch.transpose_3015 3 1 6161 6163 6164 6169 $input=6161 $dim0=6163 $dim1=6164 #6161=(1,2304,192)f32 #6169=(1,192,2304)f32 Tensor.view Tensor.view_1241 2 1 6169 6170 input.113 $input=6169 $shape=6170 #6169=(1,192,2304)f32 #input.113=(1,192,48,48)f32 nn.Conv2d layers_dfe.3.conv 1 1 input.113 6172 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(192)f32 @weight=(192,192,3,3)f32 #input.113=(1,192,48,48)f32 #6172=(1,192,48,48)f32 pnnx.Expression pnnx_expr_13762 0 1 6173 expr=-1 pnnx.Expression pnnx_expr_13761 0 1 6174 expr=2 pnnx.Expression pnnx_expr_13760 0 1 6175 expr=1 pnnx.Expression pnnx_expr_13759 0 1 21556 expr=2 torch.flatten torch.flatten_2187 3 1 6172 6174 6173 6176 $input=6172 $start_dim=6174 $end_dim=6173 #6172=(1,192,48,48)f32 #6176=(1,192,2304)f32 torch.transpose torch.transpose_3016 3 1 6176 6175 21556 6177 $input=6176 $dim0=6175 $dim1=21556 #6176=(1,192,2304)f32 #6177=(1,2304,192)f32 pnnx.Expression pnnx_expr_13758 2 1 6177 5196 6178 expr=add(@0,@1) #6177=(1,2304,192)f32 #5196=(1,2304,192)f32 #6178=(1,2304,192)f32 nn.LayerNorm layers_dfe.4.residual_group.blocks.0.norm1 1 1 6178 6222 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #6178=(1,2304,192)f32 #6222=(1,2304,192)f32 pnnx.Expression pnnx_expr_13735 2 1 6178 154 6223 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #6178=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1242 2 1 6222 6223 x.51 $input=6222 $shape=6223 #6222=(1,2304,192)f32 #x.51=(1,48,48,192)f32 pnnx.Expression pnnx_expr_13714 1 1 x.51 6240 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.51=(1,48,48,192)f32 pnnx.Expression pnnx_expr_13709 0 1 6242 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1243 2 1 x.51 6240 x0.51 $input=x.51 $shape=6240 #x.51=(1,48,48,192)f32 #x0.51=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_13706 1 1 x.51 6245 expr=[-1,8,8,int(size(@0,3))] #x.51=(1,48,48,192)f32 torch.permute torch.permute_2624 2 1 x0.51 6242 6243 $input=x0.51 $dims=6242 #x0.51=(1,6,8,6,8,192)f32 #6243=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_74 1 1 6243 6244 memory_format=torch.contiguous_format $input=6243 #6243=(1,6,6,8,8,192)f32 #6244=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_13704 1 1 6178 6247 expr=[-1,64,int(size(@0,2))] #6178=(1,2304,192)f32 pnnx.Expression pnnx_expr_13694 0 1 6258 expr=-2 pnnx.Expression pnnx_expr_13693 0 1 6259 expr=-1 pnnx.Attribute layers_dfe.4.residual_group.blocks.0.attn 0 1 relative_position_bias_table.51 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.51=(225,6)f32 pnnx.Attribute pnnx_unique_49 0 1 relative_position_index.51 @relative_position_index=(64,64)i64 #relative_position_index.51=(64,64)i64 Tensor.view Tensor.view_1244 2 1 6244 6245 x_windows.51 $input=6244 $shape=6245 #6244=(1,6,6,8,8,192)f32 #x_windows.51=(36,8,8,192)f32 Tensor.view Tensor.view_1245 2 1 x_windows.51 6247 x1.51 $input=x_windows.51 $shape=6247 #x_windows.51=(36,8,8,192)f32 #x1.51=(36,64,192)f32 nn.Linear layers_dfe.4.residual_group.blocks.0.attn.qkv 1 1 x1.51 6279 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.51=(36,64,192)f32 #6279=(36,64,576)f32 pnnx.Expression pnnx_expr_13678 1 1 x1.51 6282 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.51=(36,64,192)f32 pnnx.Expression pnnx_expr_13673 0 1 6284 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_482 2 1 6279 6282 6283 $input=6279 $shape=6282 #6279=(36,64,576)f32 #6283=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_13672 0 1 21574 expr=0 pnnx.Expression pnnx_expr_13671 0 1 21575 expr=0 pnnx.Expression pnnx_expr_13670 0 1 21576 expr=0 pnnx.Expression pnnx_expr_13669 0 1 21577 expr=1 pnnx.Expression pnnx_expr_13668 0 1 21578 expr=0 pnnx.Expression pnnx_expr_13667 0 1 21579 expr=2 torch.permute torch.permute_2625 2 1 6283 6284 qkv0.51 $input=6283 $dims=6284 #6283=(36,64,3,6,32)f32 #qkv0.51=(3,36,6,64,32)f32 Tensor.select Tensor.select_722 3 1 qkv0.51 21574 21575 q.51 $input=qkv0.51 $dim=21574 $index=21575 #qkv0.51=(3,36,6,64,32)f32 #q.51=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13666 1 1 q.51 q0.51 expr=mul(@0,1.767767e-01) #q.51=(36,6,64,32)f32 #q0.51=(36,6,64,32)f32 Tensor.select Tensor.select_723 3 1 qkv0.51 21576 21577 k.51 $input=qkv0.51 $dim=21576 $index=21577 #qkv0.51=(3,36,6,64,32)f32 #k.51=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13664 0 1 6292 expr=[-1] Tensor.view Tensor.view_1246 2 1 relative_position_index.51 6292 6293 $input=relative_position_index.51 $shape=6292 #relative_position_index.51=(64,64)i64 #6293=(4096)i64 pnnx.Expression pnnx_expr_13663 1 1 6293 6294 expr=[@0] #6293=(4096)i64 pnnx.Expression pnnx_expr_13660 0 1 6296 expr=[64,64,-1] Tensor.index Tensor.index_350 2 1 relative_position_bias_table.51 6294 6295 $input=relative_position_bias_table.51 $expr=6294 #relative_position_bias_table.51=(225,6)f32 #6295=(4096,6)f32 pnnx.Expression pnnx_expr_13656 0 1 6298 expr=[2,0,1] Tensor.view Tensor.view_1247 2 1 6295 6296 relative_position_bias.51 $input=6295 $shape=6296 #6295=(4096,6)f32 #relative_position_bias.51=(64,64,6)f32 pnnx.Expression pnnx_expr_13655 0 1 21587 expr=0 torch.permute torch.permute_2626 2 1 relative_position_bias.51 6298 6299 $input=relative_position_bias.51 $dims=6298 #relative_position_bias.51=(64,64,6)f32 #6299=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_75 1 1 6299 relative_position_bias0.51 memory_format=torch.contiguous_format $input=6299 #6299=(6,64,64)f32 #relative_position_bias0.51=(6,64,64)f32 torch.transpose torch.transpose_3017 3 1 k.51 6258 6259 6290 $input=k.51 $dim0=6258 $dim1=6259 #k.51=(36,6,64,32)f32 #6290=(36,6,32,64)f32 torch.matmul torch.matmul_2252 2 1 q0.51 6290 attn.103 $input=q0.51 $other=6290 #q0.51=(36,6,64,32)f32 #6290=(36,6,32,64)f32 #attn.103=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3266 2 1 relative_position_bias0.51 21587 6301 $input=relative_position_bias0.51 $dim=21587 #relative_position_bias0.51=(6,64,64)f32 #6301=(1,6,64,64)f32 pnnx.Expression pnnx_expr_13653 2 1 attn.103 6301 input.115 expr=add(@0,@1) #attn.103=(36,6,64,64)f32 #6301=(1,6,64,64)f32 #input.115=(36,6,64,64)f32 nn.Softmax layers_dfe.4.residual_group.blocks.0.attn.softmax 1 1 input.115 6303 dim=-1 #input.115=(36,6,64,64)f32 #6303=(36,6,64,64)f32 nn.Dropout layers_dfe.4.residual_group.blocks.0.attn.attn_drop 1 1 6303 6304 #6303=(36,6,64,64)f32 #6304=(36,6,64,64)f32 Tensor.select Tensor.select_724 3 1 qkv0.51 21578 21579 v.51 $input=qkv0.51 $dim=21578 $index=21579 #qkv0.51=(3,36,6,64,32)f32 #v.51=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13652 0 1 21589 expr=1 pnnx.Expression pnnx_expr_13651 0 1 21590 expr=2 torch.matmul torch.matmul_2253 2 1 6304 v.51 6305 $input=6304 $other=v.51 #6304=(36,6,64,64)f32 #v.51=(36,6,64,32)f32 #6305=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13650 1 1 x1.51 6307 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.51=(36,64,192)f32 torch.transpose torch.transpose_3018 3 1 6305 21589 21590 6306 $input=6305 $dim0=21589 $dim1=21590 #6305=(36,6,64,32)f32 #6306=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_483 2 1 6306 6307 input0.53 $input=6306 $shape=6307 #6306=(36,64,6,32)f32 #input0.53=(36,64,192)f32 nn.Linear layers_dfe.4.residual_group.blocks.0.attn.proj 1 1 input0.53 6309 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.53=(36,64,192)f32 #6309=(36,64,192)f32 nn.Dropout layers_dfe.4.residual_group.blocks.0.attn.proj_drop 1 1 6309 6310 #6309=(36,64,192)f32 #6310=(36,64,192)f32 pnnx.Expression pnnx_expr_13646 1 1 6178 6311 expr=[-1,8,8,int(size(@0,2))] #6178=(1,2304,192)f32 pnnx.Expression pnnx_expr_13633 1 1 154 6317 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_13626 0 1 6319 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1248 2 1 6310 6311 windows.51 $input=6310 $shape=6311 #6310=(36,64,192)f32 #windows.51=(36,8,8,192)f32 Tensor.view Tensor.view_1249 2 1 windows.51 6317 x2.51 $input=windows.51 $shape=6317 #windows.51=(36,8,8,192)f32 #x2.51=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_13623 1 1 154 6322 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2627 2 1 x2.51 6319 6320 $input=x2.51 $dims=6319 #x2.51=(1,6,6,8,8,192)f32 #6320=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_76 1 1 6320 6321 memory_format=torch.contiguous_format $input=6320 #6320=(1,6,8,6,8,192)f32 #6321=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_13620 2 1 6178 154 6326 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #6178=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1250 2 1 6321 6322 x3.51 $input=6321 $shape=6322 #6321=(1,6,8,6,8,192)f32 #x3.51=(1,48,48,192)f32 Tensor.view Tensor.view_1251 2 1 x3.51 6326 x4.51 $input=x3.51 $shape=6326 #x3.51=(1,48,48,192)f32 #x4.51=(1,2304,192)f32 pnnx.Expression pnnx_expr_13618 2 1 6178 x4.51 input.117 expr=add(@0,@1) #6178=(1,2304,192)f32 #x4.51=(1,2304,192)f32 #input.117=(1,2304,192)f32 nn.LayerNorm layers_dfe.4.residual_group.blocks.0.norm2 1 1 input.117 6330 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.117=(1,2304,192)f32 #6330=(1,2304,192)f32 nn.Linear layers_dfe.4.residual_group.blocks.0.mlp.fc1 1 1 6330 6335 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #6330=(1,2304,192)f32 #6335=(1,2304,384)f32 nn.GELU layers_dfe.4.residual_group.blocks.0.mlp.act 1 1 6335 6336 #6335=(1,2304,384)f32 #6336=(1,2304,384)f32 nn.Dropout layers_dfe.4.residual_group.blocks.0.mlp.drop 1 1 6336 6337 #6336=(1,2304,384)f32 #6337=(1,2304,384)f32 nn.Linear layers_dfe.4.residual_group.blocks.0.mlp.fc2 1 1 6337 6338 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #6337=(1,2304,384)f32 #6338=(1,2304,192)f32 nn.Dropout pnnx_unique_50 1 1 6338 6339 #6338=(1,2304,192)f32 #6339=(1,2304,192)f32 pnnx.Expression pnnx_expr_13616 2 1 input.117 6339 6341 expr=add(@0,@1) #input.117=(1,2304,192)f32 #6339=(1,2304,192)f32 #6341=(1,2304,192)f32 pnnx.Attribute layers_dfe.4.residual_group.blocks.1 0 1 attn_mask.27 @attn_mask=(36,64,64)f32 #attn_mask.27=(36,64,64)f32 nn.LayerNorm layers_dfe.4.residual_group.blocks.1.norm1 1 1 6341 6370 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #6341=(1,2304,192)f32 #6370=(1,2304,192)f32 pnnx.Expression pnnx_expr_13593 2 1 6341 154 6371 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #6341=(1,2304,192)f32 #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_13591 0 1 6373 expr=[-4,-4] pnnx.Expression pnnx_expr_13589 0 1 6374 expr=[1,2] Tensor.view Tensor.view_1252 2 1 6370 6371 x.53 $input=6370 $shape=6371 #6370=(1,2304,192)f32 #x.53=(1,48,48,192)f32 torch.roll torch.roll_2444 3 1 x.53 6373 6374 x0.53 $input=x.53 $shifts=6373 $dims=6374 #x.53=(1,48,48,192)f32 #x0.53=(1,48,48,192)f32 pnnx.Expression pnnx_expr_13567 1 1 x0.53 6391 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.53=(1,48,48,192)f32 pnnx.Expression pnnx_expr_13562 0 1 6393 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1253 2 1 x0.53 6391 x1.53 $input=x0.53 $shape=6391 #x0.53=(1,48,48,192)f32 #x1.53=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_13559 1 1 x0.53 6396 expr=[-1,8,8,int(size(@0,3))] #x0.53=(1,48,48,192)f32 torch.permute torch.permute_2628 2 1 x1.53 6393 6394 $input=x1.53 $dims=6393 #x1.53=(1,6,8,6,8,192)f32 #6394=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_77 1 1 6394 6395 memory_format=torch.contiguous_format $input=6394 #6394=(1,6,6,8,8,192)f32 #6395=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_13557 1 1 6341 6398 expr=[-1,64,int(size(@0,2))] #6341=(1,2304,192)f32 pnnx.Expression pnnx_expr_13547 0 1 6409 expr=-2 pnnx.Expression pnnx_expr_13546 0 1 6410 expr=-1 pnnx.Attribute layers_dfe.4.residual_group.blocks.1.attn 0 1 relative_position_bias_table.53 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.53=(225,6)f32 pnnx.Attribute pnnx_unique_51 0 1 relative_position_index.53 @relative_position_index=(64,64)i64 #relative_position_index.53=(64,64)i64 Tensor.view Tensor.view_1254 2 1 6395 6396 x_windows.53 $input=6395 $shape=6396 #6395=(1,6,6,8,8,192)f32 #x_windows.53=(36,8,8,192)f32 Tensor.view Tensor.view_1255 2 1 x_windows.53 6398 x2.53 $input=x_windows.53 $shape=6398 #x_windows.53=(36,8,8,192)f32 #x2.53=(36,64,192)f32 nn.Linear layers_dfe.4.residual_group.blocks.1.attn.qkv 1 1 x2.53 6434 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.53=(36,64,192)f32 #6434=(36,64,576)f32 pnnx.Expression pnnx_expr_13527 1 1 x2.53 6437 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.53=(36,64,192)f32 pnnx.Expression pnnx_expr_13522 0 1 6439 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_484 2 1 6434 6437 6438 $input=6434 $shape=6437 #6434=(36,64,576)f32 #6438=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_13521 0 1 21633 expr=0 pnnx.Expression pnnx_expr_13520 0 1 21634 expr=0 pnnx.Expression pnnx_expr_13519 0 1 21635 expr=0 pnnx.Expression pnnx_expr_13518 0 1 21636 expr=1 pnnx.Expression pnnx_expr_13517 0 1 21637 expr=0 pnnx.Expression pnnx_expr_13516 0 1 21638 expr=2 torch.permute torch.permute_2629 2 1 6438 6439 qkv0.53 $input=6438 $dims=6439 #6438=(36,64,3,6,32)f32 #qkv0.53=(3,36,6,64,32)f32 Tensor.select Tensor.select_725 3 1 qkv0.53 21633 21634 q.53 $input=qkv0.53 $dim=21633 $index=21634 #qkv0.53=(3,36,6,64,32)f32 #q.53=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13515 1 1 q.53 q0.53 expr=mul(@0,1.767767e-01) #q.53=(36,6,64,32)f32 #q0.53=(36,6,64,32)f32 Tensor.select Tensor.select_726 3 1 qkv0.53 21635 21636 k.53 $input=qkv0.53 $dim=21635 $index=21636 #qkv0.53=(3,36,6,64,32)f32 #k.53=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13513 0 1 6447 expr=[-1] Tensor.view Tensor.view_1256 2 1 relative_position_index.53 6447 6448 $input=relative_position_index.53 $shape=6447 #relative_position_index.53=(64,64)i64 #6448=(4096)i64 pnnx.Expression pnnx_expr_13512 1 1 6448 6449 expr=[@0] #6448=(4096)i64 pnnx.Expression pnnx_expr_13509 0 1 6451 expr=[64,64,-1] Tensor.index Tensor.index_351 2 1 relative_position_bias_table.53 6449 6450 $input=relative_position_bias_table.53 $expr=6449 #relative_position_bias_table.53=(225,6)f32 #6450=(4096,6)f32 pnnx.Expression pnnx_expr_13505 0 1 6453 expr=[2,0,1] Tensor.view Tensor.view_1257 2 1 6450 6451 relative_position_bias.53 $input=6450 $shape=6451 #6450=(4096,6)f32 #relative_position_bias.53=(64,64,6)f32 pnnx.Expression pnnx_expr_13504 0 1 21646 expr=0 torch.permute torch.permute_2630 2 1 relative_position_bias.53 6453 6454 $input=relative_position_bias.53 $dims=6453 #relative_position_bias.53=(64,64,6)f32 #6454=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_78 1 1 6454 relative_position_bias0.53 memory_format=torch.contiguous_format $input=6454 #6454=(6,64,64)f32 #relative_position_bias0.53=(6,64,64)f32 torch.transpose torch.transpose_3019 3 1 k.53 6409 6410 6445 $input=k.53 $dim0=6409 $dim1=6410 #k.53=(36,6,64,32)f32 #6445=(36,6,32,64)f32 torch.matmul torch.matmul_2254 2 1 q0.53 6445 attn.107 $input=q0.53 $other=6445 #q0.53=(36,6,64,32)f32 #6445=(36,6,32,64)f32 #attn.107=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3267 2 1 relative_position_bias0.53 21646 6456 $input=relative_position_bias0.53 $dim=21646 #relative_position_bias0.53=(6,64,64)f32 #6456=(1,6,64,64)f32 pnnx.Expression pnnx_expr_13502 2 1 attn.107 6456 attn0.27 expr=add(@0,@1) #attn.107=(36,6,64,64)f32 #6456=(1,6,64,64)f32 #attn0.27=(36,6,64,64)f32 pnnx.Expression pnnx_expr_13493 2 1 x2.53 attn_mask.27 6463 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.53=(36,64,192)f32 #attn_mask.27=(36,64,64)f32 pnnx.Expression pnnx_expr_13492 0 1 21651 expr=1 pnnx.Expression pnnx_expr_13491 0 1 21652 expr=0 Tensor.view Tensor.view_1258 2 1 attn0.27 6463 6464 $input=attn0.27 $shape=6463 #attn0.27=(36,6,64,64)f32 #6464=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3268 2 1 attn_mask.27 21651 6465 $input=attn_mask.27 $dim=21651 #attn_mask.27=(36,64,64)f32 #6465=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3269 2 1 6465 21652 6466 $input=6465 $dim=21652 #6465=(36,1,64,64)f32 #6466=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_13489 2 1 6464 6466 attn1.27 expr=add(@0,@1) #6464=(1,36,6,64,64)f32 #6466=(1,36,1,64,64)f32 #attn1.27=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_13486 1 1 x2.53 6468 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.53=(36,64,192)f32 Tensor.view Tensor.view_1259 2 1 attn1.27 6468 input.119 $input=attn1.27 $shape=6468 #attn1.27=(1,36,6,64,64)f32 #input.119=(36,6,64,64)f32 nn.Softmax layers_dfe.4.residual_group.blocks.1.attn.softmax 1 1 input.119 6470 dim=-1 #input.119=(36,6,64,64)f32 #6470=(36,6,64,64)f32 nn.Dropout layers_dfe.4.residual_group.blocks.1.attn.attn_drop 1 1 6470 6471 #6470=(36,6,64,64)f32 #6471=(36,6,64,64)f32 Tensor.select Tensor.select_727 3 1 qkv0.53 21637 21638 v.53 $input=qkv0.53 $dim=21637 $index=21638 #qkv0.53=(3,36,6,64,32)f32 #v.53=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13485 0 1 21656 expr=1 pnnx.Expression pnnx_expr_13484 0 1 21657 expr=2 torch.matmul torch.matmul_2255 2 1 6471 v.53 6472 $input=6471 $other=v.53 #6471=(36,6,64,64)f32 #v.53=(36,6,64,32)f32 #6472=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13483 1 1 x2.53 6474 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.53=(36,64,192)f32 torch.transpose torch.transpose_3020 3 1 6472 21656 21657 6473 $input=6472 $dim0=21656 $dim1=21657 #6472=(36,6,64,32)f32 #6473=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_485 2 1 6473 6474 input0.55 $input=6473 $shape=6474 #6473=(36,64,6,32)f32 #input0.55=(36,64,192)f32 nn.Linear layers_dfe.4.residual_group.blocks.1.attn.proj 1 1 input0.55 6476 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.55=(36,64,192)f32 #6476=(36,64,192)f32 nn.Dropout layers_dfe.4.residual_group.blocks.1.attn.proj_drop 1 1 6476 6477 #6476=(36,64,192)f32 #6477=(36,64,192)f32 pnnx.Expression pnnx_expr_13479 1 1 6341 6478 expr=[-1,8,8,int(size(@0,2))] #6341=(1,2304,192)f32 pnnx.Expression pnnx_expr_13466 1 1 154 6484 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_13459 0 1 6486 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1260 2 1 6477 6478 windows.53 $input=6477 $shape=6478 #6477=(36,64,192)f32 #windows.53=(36,8,8,192)f32 Tensor.view Tensor.view_1261 2 1 windows.53 6484 x3.53 $input=windows.53 $shape=6484 #windows.53=(36,8,8,192)f32 #x3.53=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_13456 1 1 154 6489 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2631 2 1 x3.53 6486 6487 $input=x3.53 $dims=6486 #x3.53=(1,6,6,8,8,192)f32 #6487=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_79 1 1 6487 6488 memory_format=torch.contiguous_format $input=6487 #6487=(1,6,8,6,8,192)f32 #6488=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_13453 0 1 6491 expr=[4,4] pnnx.Expression pnnx_expr_13450 0 1 6492 expr=[1,2] Tensor.view Tensor.view_1262 2 1 6488 6489 shifted_x.27 $input=6488 $shape=6489 #6488=(1,6,8,6,8,192)f32 #shifted_x.27=(1,48,48,192)f32 pnnx.Expression pnnx_expr_13447 2 1 6341 154 6496 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #6341=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.roll torch.roll_2445 3 1 shifted_x.27 6491 6492 x4.53 $input=shifted_x.27 $shifts=6491 $dims=6492 #shifted_x.27=(1,48,48,192)f32 #x4.53=(1,48,48,192)f32 Tensor.view Tensor.view_1263 2 1 x4.53 6496 x5.27 $input=x4.53 $shape=6496 #x4.53=(1,48,48,192)f32 #x5.27=(1,2304,192)f32 pnnx.Expression pnnx_expr_13445 2 1 6341 x5.27 input.121 expr=add(@0,@1) #6341=(1,2304,192)f32 #x5.27=(1,2304,192)f32 #input.121=(1,2304,192)f32 nn.LayerNorm layers_dfe.4.residual_group.blocks.1.norm2 1 1 input.121 6500 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.121=(1,2304,192)f32 #6500=(1,2304,192)f32 nn.Linear layers_dfe.4.residual_group.blocks.1.mlp.fc1 1 1 6500 6505 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #6500=(1,2304,192)f32 #6505=(1,2304,384)f32 nn.GELU layers_dfe.4.residual_group.blocks.1.mlp.act 1 1 6505 6506 #6505=(1,2304,384)f32 #6506=(1,2304,384)f32 nn.Dropout layers_dfe.4.residual_group.blocks.1.mlp.drop 1 1 6506 6507 #6506=(1,2304,384)f32 #6507=(1,2304,384)f32 nn.Linear layers_dfe.4.residual_group.blocks.1.mlp.fc2 1 1 6507 6508 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #6507=(1,2304,384)f32 #6508=(1,2304,192)f32 nn.Dropout pnnx_unique_52 1 1 6508 6509 #6508=(1,2304,192)f32 #6509=(1,2304,192)f32 pnnx.Expression pnnx_expr_13443 2 1 input.121 6509 6511 expr=add(@0,@1) #input.121=(1,2304,192)f32 #6509=(1,2304,192)f32 #6511=(1,2304,192)f32 nn.LayerNorm layers_dfe.4.residual_group.blocks.2.norm1 1 1 6511 6538 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #6511=(1,2304,192)f32 #6538=(1,2304,192)f32 pnnx.Expression pnnx_expr_13421 2 1 6511 154 6539 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #6511=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1264 2 1 6538 6539 x.55 $input=6538 $shape=6539 #6538=(1,2304,192)f32 #x.55=(1,48,48,192)f32 pnnx.Expression pnnx_expr_13400 1 1 x.55 6556 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.55=(1,48,48,192)f32 pnnx.Expression pnnx_expr_13395 0 1 6558 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1265 2 1 x.55 6556 x0.55 $input=x.55 $shape=6556 #x.55=(1,48,48,192)f32 #x0.55=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_13392 1 1 x.55 6561 expr=[-1,8,8,int(size(@0,3))] #x.55=(1,48,48,192)f32 torch.permute torch.permute_2632 2 1 x0.55 6558 6559 $input=x0.55 $dims=6558 #x0.55=(1,6,8,6,8,192)f32 #6559=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_80 1 1 6559 6560 memory_format=torch.contiguous_format $input=6559 #6559=(1,6,6,8,8,192)f32 #6560=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_13390 1 1 6511 6563 expr=[-1,64,int(size(@0,2))] #6511=(1,2304,192)f32 pnnx.Expression pnnx_expr_13380 0 1 6574 expr=-2 pnnx.Expression pnnx_expr_13379 0 1 6575 expr=-1 pnnx.Attribute layers_dfe.4.residual_group.blocks.2.attn 0 1 relative_position_bias_table.55 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.55=(225,6)f32 pnnx.Attribute pnnx_unique_53 0 1 relative_position_index.55 @relative_position_index=(64,64)i64 #relative_position_index.55=(64,64)i64 Tensor.view Tensor.view_1266 2 1 6560 6561 x_windows.55 $input=6560 $shape=6561 #6560=(1,6,6,8,8,192)f32 #x_windows.55=(36,8,8,192)f32 Tensor.view Tensor.view_1267 2 1 x_windows.55 6563 x1.55 $input=x_windows.55 $shape=6563 #x_windows.55=(36,8,8,192)f32 #x1.55=(36,64,192)f32 nn.Linear layers_dfe.4.residual_group.blocks.2.attn.qkv 1 1 x1.55 6595 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.55=(36,64,192)f32 #6595=(36,64,576)f32 pnnx.Expression pnnx_expr_13364 1 1 x1.55 6598 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.55=(36,64,192)f32 pnnx.Expression pnnx_expr_13359 0 1 6600 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_486 2 1 6595 6598 6599 $input=6595 $shape=6598 #6595=(36,64,576)f32 #6599=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_13358 0 1 21701 expr=0 pnnx.Expression pnnx_expr_13357 0 1 21702 expr=0 pnnx.Expression pnnx_expr_13356 0 1 21703 expr=0 pnnx.Expression pnnx_expr_13355 0 1 21704 expr=1 pnnx.Expression pnnx_expr_13354 0 1 21705 expr=0 pnnx.Expression pnnx_expr_13353 0 1 21706 expr=2 torch.permute torch.permute_2633 2 1 6599 6600 qkv0.55 $input=6599 $dims=6600 #6599=(36,64,3,6,32)f32 #qkv0.55=(3,36,6,64,32)f32 Tensor.select Tensor.select_728 3 1 qkv0.55 21701 21702 q.55 $input=qkv0.55 $dim=21701 $index=21702 #qkv0.55=(3,36,6,64,32)f32 #q.55=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13352 1 1 q.55 q0.55 expr=mul(@0,1.767767e-01) #q.55=(36,6,64,32)f32 #q0.55=(36,6,64,32)f32 Tensor.select Tensor.select_729 3 1 qkv0.55 21703 21704 k.55 $input=qkv0.55 $dim=21703 $index=21704 #qkv0.55=(3,36,6,64,32)f32 #k.55=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13350 0 1 6608 expr=[-1] Tensor.view Tensor.view_1268 2 1 relative_position_index.55 6608 6609 $input=relative_position_index.55 $shape=6608 #relative_position_index.55=(64,64)i64 #6609=(4096)i64 pnnx.Expression pnnx_expr_13349 1 1 6609 6610 expr=[@0] #6609=(4096)i64 pnnx.Expression pnnx_expr_13346 0 1 6612 expr=[64,64,-1] Tensor.index Tensor.index_352 2 1 relative_position_bias_table.55 6610 6611 $input=relative_position_bias_table.55 $expr=6610 #relative_position_bias_table.55=(225,6)f32 #6611=(4096,6)f32 pnnx.Expression pnnx_expr_13342 0 1 6614 expr=[2,0,1] Tensor.view Tensor.view_1269 2 1 6611 6612 relative_position_bias.55 $input=6611 $shape=6612 #6611=(4096,6)f32 #relative_position_bias.55=(64,64,6)f32 pnnx.Expression pnnx_expr_13341 0 1 21714 expr=0 torch.permute torch.permute_2634 2 1 relative_position_bias.55 6614 6615 $input=relative_position_bias.55 $dims=6614 #relative_position_bias.55=(64,64,6)f32 #6615=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_81 1 1 6615 relative_position_bias0.55 memory_format=torch.contiguous_format $input=6615 #6615=(6,64,64)f32 #relative_position_bias0.55=(6,64,64)f32 torch.transpose torch.transpose_3021 3 1 k.55 6574 6575 6606 $input=k.55 $dim0=6574 $dim1=6575 #k.55=(36,6,64,32)f32 #6606=(36,6,32,64)f32 torch.matmul torch.matmul_2256 2 1 q0.55 6606 attn.111 $input=q0.55 $other=6606 #q0.55=(36,6,64,32)f32 #6606=(36,6,32,64)f32 #attn.111=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3270 2 1 relative_position_bias0.55 21714 6617 $input=relative_position_bias0.55 $dim=21714 #relative_position_bias0.55=(6,64,64)f32 #6617=(1,6,64,64)f32 pnnx.Expression pnnx_expr_13339 2 1 attn.111 6617 input.123 expr=add(@0,@1) #attn.111=(36,6,64,64)f32 #6617=(1,6,64,64)f32 #input.123=(36,6,64,64)f32 nn.Softmax layers_dfe.4.residual_group.blocks.2.attn.softmax 1 1 input.123 6619 dim=-1 #input.123=(36,6,64,64)f32 #6619=(36,6,64,64)f32 nn.Dropout layers_dfe.4.residual_group.blocks.2.attn.attn_drop 1 1 6619 6620 #6619=(36,6,64,64)f32 #6620=(36,6,64,64)f32 Tensor.select Tensor.select_730 3 1 qkv0.55 21705 21706 v.55 $input=qkv0.55 $dim=21705 $index=21706 #qkv0.55=(3,36,6,64,32)f32 #v.55=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13338 0 1 21716 expr=1 pnnx.Expression pnnx_expr_13337 0 1 21717 expr=2 torch.matmul torch.matmul_2257 2 1 6620 v.55 6621 $input=6620 $other=v.55 #6620=(36,6,64,64)f32 #v.55=(36,6,64,32)f32 #6621=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13336 1 1 x1.55 6623 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.55=(36,64,192)f32 torch.transpose torch.transpose_3022 3 1 6621 21716 21717 6622 $input=6621 $dim0=21716 $dim1=21717 #6621=(36,6,64,32)f32 #6622=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_487 2 1 6622 6623 input0.57 $input=6622 $shape=6623 #6622=(36,64,6,32)f32 #input0.57=(36,64,192)f32 nn.Linear layers_dfe.4.residual_group.blocks.2.attn.proj 1 1 input0.57 6625 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.57=(36,64,192)f32 #6625=(36,64,192)f32 nn.Dropout layers_dfe.4.residual_group.blocks.2.attn.proj_drop 1 1 6625 6626 #6625=(36,64,192)f32 #6626=(36,64,192)f32 pnnx.Expression pnnx_expr_13332 1 1 6511 6627 expr=[-1,8,8,int(size(@0,2))] #6511=(1,2304,192)f32 pnnx.Expression pnnx_expr_13319 1 1 154 6633 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_13312 0 1 6635 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1270 2 1 6626 6627 windows.55 $input=6626 $shape=6627 #6626=(36,64,192)f32 #windows.55=(36,8,8,192)f32 Tensor.view Tensor.view_1271 2 1 windows.55 6633 x2.55 $input=windows.55 $shape=6633 #windows.55=(36,8,8,192)f32 #x2.55=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_13309 1 1 154 6638 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2635 2 1 x2.55 6635 6636 $input=x2.55 $dims=6635 #x2.55=(1,6,6,8,8,192)f32 #6636=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_82 1 1 6636 6637 memory_format=torch.contiguous_format $input=6636 #6636=(1,6,8,6,8,192)f32 #6637=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_13306 2 1 6511 154 6642 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #6511=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1272 2 1 6637 6638 x3.55 $input=6637 $shape=6638 #6637=(1,6,8,6,8,192)f32 #x3.55=(1,48,48,192)f32 Tensor.view Tensor.view_1273 2 1 x3.55 6642 x4.55 $input=x3.55 $shape=6642 #x3.55=(1,48,48,192)f32 #x4.55=(1,2304,192)f32 pnnx.Expression pnnx_expr_13304 2 1 6511 x4.55 input.125 expr=add(@0,@1) #6511=(1,2304,192)f32 #x4.55=(1,2304,192)f32 #input.125=(1,2304,192)f32 nn.LayerNorm layers_dfe.4.residual_group.blocks.2.norm2 1 1 input.125 6646 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.125=(1,2304,192)f32 #6646=(1,2304,192)f32 nn.Linear layers_dfe.4.residual_group.blocks.2.mlp.fc1 1 1 6646 6651 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #6646=(1,2304,192)f32 #6651=(1,2304,384)f32 nn.GELU layers_dfe.4.residual_group.blocks.2.mlp.act 1 1 6651 6652 #6651=(1,2304,384)f32 #6652=(1,2304,384)f32 nn.Dropout layers_dfe.4.residual_group.blocks.2.mlp.drop 1 1 6652 6653 #6652=(1,2304,384)f32 #6653=(1,2304,384)f32 nn.Linear layers_dfe.4.residual_group.blocks.2.mlp.fc2 1 1 6653 6654 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #6653=(1,2304,384)f32 #6654=(1,2304,192)f32 nn.Dropout pnnx_unique_54 1 1 6654 6655 #6654=(1,2304,192)f32 #6655=(1,2304,192)f32 pnnx.Expression pnnx_expr_13302 2 1 input.125 6655 6657 expr=add(@0,@1) #input.125=(1,2304,192)f32 #6655=(1,2304,192)f32 #6657=(1,2304,192)f32 pnnx.Attribute layers_dfe.4.residual_group.blocks.3 0 1 attn_mask.29 @attn_mask=(36,64,64)f32 #attn_mask.29=(36,64,64)f32 nn.LayerNorm layers_dfe.4.residual_group.blocks.3.norm1 1 1 6657 6686 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #6657=(1,2304,192)f32 #6686=(1,2304,192)f32 pnnx.Expression pnnx_expr_13279 2 1 6657 154 6687 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #6657=(1,2304,192)f32 #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_13277 0 1 6689 expr=[-4,-4] pnnx.Expression pnnx_expr_13275 0 1 6690 expr=[1,2] Tensor.view Tensor.view_1274 2 1 6686 6687 x.57 $input=6686 $shape=6687 #6686=(1,2304,192)f32 #x.57=(1,48,48,192)f32 torch.roll torch.roll_2446 3 1 x.57 6689 6690 x0.57 $input=x.57 $shifts=6689 $dims=6690 #x.57=(1,48,48,192)f32 #x0.57=(1,48,48,192)f32 pnnx.Expression pnnx_expr_13253 1 1 x0.57 6707 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.57=(1,48,48,192)f32 pnnx.Expression pnnx_expr_13248 0 1 6709 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1275 2 1 x0.57 6707 x1.57 $input=x0.57 $shape=6707 #x0.57=(1,48,48,192)f32 #x1.57=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_13245 1 1 x0.57 6712 expr=[-1,8,8,int(size(@0,3))] #x0.57=(1,48,48,192)f32 torch.permute torch.permute_2636 2 1 x1.57 6709 6710 $input=x1.57 $dims=6709 #x1.57=(1,6,8,6,8,192)f32 #6710=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_83 1 1 6710 6711 memory_format=torch.contiguous_format $input=6710 #6710=(1,6,6,8,8,192)f32 #6711=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_13243 1 1 6657 6714 expr=[-1,64,int(size(@0,2))] #6657=(1,2304,192)f32 pnnx.Expression pnnx_expr_13233 0 1 6725 expr=-2 pnnx.Expression pnnx_expr_13232 0 1 6726 expr=-1 pnnx.Attribute layers_dfe.4.residual_group.blocks.3.attn 0 1 relative_position_bias_table.57 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.57=(225,6)f32 pnnx.Attribute pnnx_unique_55 0 1 relative_position_index.57 @relative_position_index=(64,64)i64 #relative_position_index.57=(64,64)i64 Tensor.view Tensor.view_1276 2 1 6711 6712 x_windows.57 $input=6711 $shape=6712 #6711=(1,6,6,8,8,192)f32 #x_windows.57=(36,8,8,192)f32 Tensor.view Tensor.view_1277 2 1 x_windows.57 6714 x2.57 $input=x_windows.57 $shape=6714 #x_windows.57=(36,8,8,192)f32 #x2.57=(36,64,192)f32 nn.Linear layers_dfe.4.residual_group.blocks.3.attn.qkv 1 1 x2.57 6750 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.57=(36,64,192)f32 #6750=(36,64,576)f32 pnnx.Expression pnnx_expr_13213 1 1 x2.57 6753 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.57=(36,64,192)f32 pnnx.Expression pnnx_expr_13208 0 1 6755 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_488 2 1 6750 6753 6754 $input=6750 $shape=6753 #6750=(36,64,576)f32 #6754=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_13207 0 1 21760 expr=0 pnnx.Expression pnnx_expr_13206 0 1 21761 expr=0 pnnx.Expression pnnx_expr_13205 0 1 21762 expr=0 pnnx.Expression pnnx_expr_13204 0 1 21763 expr=1 pnnx.Expression pnnx_expr_13203 0 1 21764 expr=0 pnnx.Expression pnnx_expr_13202 0 1 21765 expr=2 torch.permute torch.permute_2637 2 1 6754 6755 qkv0.57 $input=6754 $dims=6755 #6754=(36,64,3,6,32)f32 #qkv0.57=(3,36,6,64,32)f32 Tensor.select Tensor.select_731 3 1 qkv0.57 21760 21761 q.57 $input=qkv0.57 $dim=21760 $index=21761 #qkv0.57=(3,36,6,64,32)f32 #q.57=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13201 1 1 q.57 q0.57 expr=mul(@0,1.767767e-01) #q.57=(36,6,64,32)f32 #q0.57=(36,6,64,32)f32 Tensor.select Tensor.select_732 3 1 qkv0.57 21762 21763 k.57 $input=qkv0.57 $dim=21762 $index=21763 #qkv0.57=(3,36,6,64,32)f32 #k.57=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13199 0 1 6763 expr=[-1] Tensor.view Tensor.view_1278 2 1 relative_position_index.57 6763 6764 $input=relative_position_index.57 $shape=6763 #relative_position_index.57=(64,64)i64 #6764=(4096)i64 pnnx.Expression pnnx_expr_13198 1 1 6764 6765 expr=[@0] #6764=(4096)i64 pnnx.Expression pnnx_expr_13195 0 1 6767 expr=[64,64,-1] Tensor.index Tensor.index_353 2 1 relative_position_bias_table.57 6765 6766 $input=relative_position_bias_table.57 $expr=6765 #relative_position_bias_table.57=(225,6)f32 #6766=(4096,6)f32 pnnx.Expression pnnx_expr_13191 0 1 6769 expr=[2,0,1] Tensor.view Tensor.view_1279 2 1 6766 6767 relative_position_bias.57 $input=6766 $shape=6767 #6766=(4096,6)f32 #relative_position_bias.57=(64,64,6)f32 pnnx.Expression pnnx_expr_13190 0 1 21773 expr=0 torch.permute torch.permute_2638 2 1 relative_position_bias.57 6769 6770 $input=relative_position_bias.57 $dims=6769 #relative_position_bias.57=(64,64,6)f32 #6770=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_84 1 1 6770 relative_position_bias0.57 memory_format=torch.contiguous_format $input=6770 #6770=(6,64,64)f32 #relative_position_bias0.57=(6,64,64)f32 torch.transpose torch.transpose_3023 3 1 k.57 6725 6726 6761 $input=k.57 $dim0=6725 $dim1=6726 #k.57=(36,6,64,32)f32 #6761=(36,6,32,64)f32 torch.matmul torch.matmul_2258 2 1 q0.57 6761 attn.115 $input=q0.57 $other=6761 #q0.57=(36,6,64,32)f32 #6761=(36,6,32,64)f32 #attn.115=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3271 2 1 relative_position_bias0.57 21773 6772 $input=relative_position_bias0.57 $dim=21773 #relative_position_bias0.57=(6,64,64)f32 #6772=(1,6,64,64)f32 pnnx.Expression pnnx_expr_13188 2 1 attn.115 6772 attn0.29 expr=add(@0,@1) #attn.115=(36,6,64,64)f32 #6772=(1,6,64,64)f32 #attn0.29=(36,6,64,64)f32 pnnx.Expression pnnx_expr_13179 2 1 x2.57 attn_mask.29 6779 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.57=(36,64,192)f32 #attn_mask.29=(36,64,64)f32 pnnx.Expression pnnx_expr_13178 0 1 21778 expr=1 pnnx.Expression pnnx_expr_13177 0 1 21779 expr=0 Tensor.view Tensor.view_1280 2 1 attn0.29 6779 6780 $input=attn0.29 $shape=6779 #attn0.29=(36,6,64,64)f32 #6780=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3272 2 1 attn_mask.29 21778 6781 $input=attn_mask.29 $dim=21778 #attn_mask.29=(36,64,64)f32 #6781=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3273 2 1 6781 21779 6782 $input=6781 $dim=21779 #6781=(36,1,64,64)f32 #6782=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_13175 2 1 6780 6782 attn1.29 expr=add(@0,@1) #6780=(1,36,6,64,64)f32 #6782=(1,36,1,64,64)f32 #attn1.29=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_13172 1 1 x2.57 6784 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.57=(36,64,192)f32 Tensor.view Tensor.view_1281 2 1 attn1.29 6784 input.127 $input=attn1.29 $shape=6784 #attn1.29=(1,36,6,64,64)f32 #input.127=(36,6,64,64)f32 nn.Softmax layers_dfe.4.residual_group.blocks.3.attn.softmax 1 1 input.127 6786 dim=-1 #input.127=(36,6,64,64)f32 #6786=(36,6,64,64)f32 nn.Dropout layers_dfe.4.residual_group.blocks.3.attn.attn_drop 1 1 6786 6787 #6786=(36,6,64,64)f32 #6787=(36,6,64,64)f32 Tensor.select Tensor.select_733 3 1 qkv0.57 21764 21765 v.57 $input=qkv0.57 $dim=21764 $index=21765 #qkv0.57=(3,36,6,64,32)f32 #v.57=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13171 0 1 21783 expr=1 pnnx.Expression pnnx_expr_13170 0 1 21784 expr=2 torch.matmul torch.matmul_2259 2 1 6787 v.57 6788 $input=6787 $other=v.57 #6787=(36,6,64,64)f32 #v.57=(36,6,64,32)f32 #6788=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13169 1 1 x2.57 6790 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.57=(36,64,192)f32 torch.transpose torch.transpose_3024 3 1 6788 21783 21784 6789 $input=6788 $dim0=21783 $dim1=21784 #6788=(36,6,64,32)f32 #6789=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_489 2 1 6789 6790 input0.59 $input=6789 $shape=6790 #6789=(36,64,6,32)f32 #input0.59=(36,64,192)f32 nn.Linear layers_dfe.4.residual_group.blocks.3.attn.proj 1 1 input0.59 6792 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.59=(36,64,192)f32 #6792=(36,64,192)f32 nn.Dropout layers_dfe.4.residual_group.blocks.3.attn.proj_drop 1 1 6792 6793 #6792=(36,64,192)f32 #6793=(36,64,192)f32 pnnx.Expression pnnx_expr_13165 1 1 6657 6794 expr=[-1,8,8,int(size(@0,2))] #6657=(1,2304,192)f32 pnnx.Expression pnnx_expr_13152 1 1 154 6800 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_13145 0 1 6802 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1282 2 1 6793 6794 windows.57 $input=6793 $shape=6794 #6793=(36,64,192)f32 #windows.57=(36,8,8,192)f32 Tensor.view Tensor.view_1283 2 1 windows.57 6800 x3.57 $input=windows.57 $shape=6800 #windows.57=(36,8,8,192)f32 #x3.57=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_13142 1 1 154 6805 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2639 2 1 x3.57 6802 6803 $input=x3.57 $dims=6802 #x3.57=(1,6,6,8,8,192)f32 #6803=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_85 1 1 6803 6804 memory_format=torch.contiguous_format $input=6803 #6803=(1,6,8,6,8,192)f32 #6804=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_13139 0 1 6807 expr=[4,4] pnnx.Expression pnnx_expr_13136 0 1 6808 expr=[1,2] Tensor.view Tensor.view_1284 2 1 6804 6805 shifted_x.29 $input=6804 $shape=6805 #6804=(1,6,8,6,8,192)f32 #shifted_x.29=(1,48,48,192)f32 pnnx.Expression pnnx_expr_13133 2 1 6657 154 6812 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #6657=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.roll torch.roll_2447 3 1 shifted_x.29 6807 6808 x4.57 $input=shifted_x.29 $shifts=6807 $dims=6808 #shifted_x.29=(1,48,48,192)f32 #x4.57=(1,48,48,192)f32 Tensor.view Tensor.view_1285 2 1 x4.57 6812 x5.29 $input=x4.57 $shape=6812 #x4.57=(1,48,48,192)f32 #x5.29=(1,2304,192)f32 pnnx.Expression pnnx_expr_13131 2 1 6657 x5.29 input.129 expr=add(@0,@1) #6657=(1,2304,192)f32 #x5.29=(1,2304,192)f32 #input.129=(1,2304,192)f32 nn.LayerNorm layers_dfe.4.residual_group.blocks.3.norm2 1 1 input.129 6816 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.129=(1,2304,192)f32 #6816=(1,2304,192)f32 nn.Linear layers_dfe.4.residual_group.blocks.3.mlp.fc1 1 1 6816 6821 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #6816=(1,2304,192)f32 #6821=(1,2304,384)f32 nn.GELU layers_dfe.4.residual_group.blocks.3.mlp.act 1 1 6821 6822 #6821=(1,2304,384)f32 #6822=(1,2304,384)f32 nn.Dropout layers_dfe.4.residual_group.blocks.3.mlp.drop 1 1 6822 6823 #6822=(1,2304,384)f32 #6823=(1,2304,384)f32 nn.Linear layers_dfe.4.residual_group.blocks.3.mlp.fc2 1 1 6823 6824 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #6823=(1,2304,384)f32 #6824=(1,2304,192)f32 nn.Dropout pnnx_unique_56 1 1 6824 6825 #6824=(1,2304,192)f32 #6825=(1,2304,192)f32 pnnx.Expression pnnx_expr_13129 2 1 input.129 6825 6827 expr=add(@0,@1) #input.129=(1,2304,192)f32 #6825=(1,2304,192)f32 #6827=(1,2304,192)f32 nn.LayerNorm layers_dfe.4.residual_group.blocks.4.norm1 1 1 6827 6854 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #6827=(1,2304,192)f32 #6854=(1,2304,192)f32 pnnx.Expression pnnx_expr_13107 2 1 6827 154 6855 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #6827=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1286 2 1 6854 6855 x.59 $input=6854 $shape=6855 #6854=(1,2304,192)f32 #x.59=(1,48,48,192)f32 pnnx.Expression pnnx_expr_13086 1 1 x.59 6872 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.59=(1,48,48,192)f32 pnnx.Expression pnnx_expr_13081 0 1 6874 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1287 2 1 x.59 6872 x0.59 $input=x.59 $shape=6872 #x.59=(1,48,48,192)f32 #x0.59=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_13078 1 1 x.59 6877 expr=[-1,8,8,int(size(@0,3))] #x.59=(1,48,48,192)f32 torch.permute torch.permute_2640 2 1 x0.59 6874 6875 $input=x0.59 $dims=6874 #x0.59=(1,6,8,6,8,192)f32 #6875=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_86 1 1 6875 6876 memory_format=torch.contiguous_format $input=6875 #6875=(1,6,6,8,8,192)f32 #6876=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_13076 1 1 6827 6879 expr=[-1,64,int(size(@0,2))] #6827=(1,2304,192)f32 pnnx.Expression pnnx_expr_13066 0 1 6890 expr=-2 pnnx.Expression pnnx_expr_13065 0 1 6891 expr=-1 pnnx.Attribute layers_dfe.4.residual_group.blocks.4.attn 0 1 relative_position_bias_table.59 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.59=(225,6)f32 pnnx.Attribute pnnx_unique_57 0 1 relative_position_index.59 @relative_position_index=(64,64)i64 #relative_position_index.59=(64,64)i64 Tensor.view Tensor.view_1288 2 1 6876 6877 x_windows.59 $input=6876 $shape=6877 #6876=(1,6,6,8,8,192)f32 #x_windows.59=(36,8,8,192)f32 Tensor.view Tensor.view_1289 2 1 x_windows.59 6879 x1.59 $input=x_windows.59 $shape=6879 #x_windows.59=(36,8,8,192)f32 #x1.59=(36,64,192)f32 nn.Linear layers_dfe.4.residual_group.blocks.4.attn.qkv 1 1 x1.59 6911 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.59=(36,64,192)f32 #6911=(36,64,576)f32 pnnx.Expression pnnx_expr_13050 1 1 x1.59 6914 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.59=(36,64,192)f32 pnnx.Expression pnnx_expr_13045 0 1 6916 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_490 2 1 6911 6914 6915 $input=6911 $shape=6914 #6911=(36,64,576)f32 #6915=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_13044 0 1 21828 expr=0 pnnx.Expression pnnx_expr_13043 0 1 21829 expr=0 pnnx.Expression pnnx_expr_13042 0 1 21830 expr=0 pnnx.Expression pnnx_expr_13041 0 1 21831 expr=1 pnnx.Expression pnnx_expr_13040 0 1 21832 expr=0 pnnx.Expression pnnx_expr_13039 0 1 21833 expr=2 torch.permute torch.permute_2641 2 1 6915 6916 qkv0.59 $input=6915 $dims=6916 #6915=(36,64,3,6,32)f32 #qkv0.59=(3,36,6,64,32)f32 Tensor.select Tensor.select_734 3 1 qkv0.59 21828 21829 q.59 $input=qkv0.59 $dim=21828 $index=21829 #qkv0.59=(3,36,6,64,32)f32 #q.59=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13038 1 1 q.59 q0.59 expr=mul(@0,1.767767e-01) #q.59=(36,6,64,32)f32 #q0.59=(36,6,64,32)f32 Tensor.select Tensor.select_735 3 1 qkv0.59 21830 21831 k.59 $input=qkv0.59 $dim=21830 $index=21831 #qkv0.59=(3,36,6,64,32)f32 #k.59=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13036 0 1 6924 expr=[-1] Tensor.view Tensor.view_1290 2 1 relative_position_index.59 6924 6925 $input=relative_position_index.59 $shape=6924 #relative_position_index.59=(64,64)i64 #6925=(4096)i64 pnnx.Expression pnnx_expr_13035 1 1 6925 6926 expr=[@0] #6925=(4096)i64 pnnx.Expression pnnx_expr_13032 0 1 6928 expr=[64,64,-1] Tensor.index Tensor.index_354 2 1 relative_position_bias_table.59 6926 6927 $input=relative_position_bias_table.59 $expr=6926 #relative_position_bias_table.59=(225,6)f32 #6927=(4096,6)f32 pnnx.Expression pnnx_expr_13028 0 1 6930 expr=[2,0,1] Tensor.view Tensor.view_1291 2 1 6927 6928 relative_position_bias.59 $input=6927 $shape=6928 #6927=(4096,6)f32 #relative_position_bias.59=(64,64,6)f32 pnnx.Expression pnnx_expr_13027 0 1 21841 expr=0 torch.permute torch.permute_2642 2 1 relative_position_bias.59 6930 6931 $input=relative_position_bias.59 $dims=6930 #relative_position_bias.59=(64,64,6)f32 #6931=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_87 1 1 6931 relative_position_bias0.59 memory_format=torch.contiguous_format $input=6931 #6931=(6,64,64)f32 #relative_position_bias0.59=(6,64,64)f32 torch.transpose torch.transpose_3025 3 1 k.59 6890 6891 6922 $input=k.59 $dim0=6890 $dim1=6891 #k.59=(36,6,64,32)f32 #6922=(36,6,32,64)f32 torch.matmul torch.matmul_2260 2 1 q0.59 6922 attn.119 $input=q0.59 $other=6922 #q0.59=(36,6,64,32)f32 #6922=(36,6,32,64)f32 #attn.119=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3274 2 1 relative_position_bias0.59 21841 6933 $input=relative_position_bias0.59 $dim=21841 #relative_position_bias0.59=(6,64,64)f32 #6933=(1,6,64,64)f32 pnnx.Expression pnnx_expr_13025 2 1 attn.119 6933 input.131 expr=add(@0,@1) #attn.119=(36,6,64,64)f32 #6933=(1,6,64,64)f32 #input.131=(36,6,64,64)f32 nn.Softmax layers_dfe.4.residual_group.blocks.4.attn.softmax 1 1 input.131 6935 dim=-1 #input.131=(36,6,64,64)f32 #6935=(36,6,64,64)f32 nn.Dropout layers_dfe.4.residual_group.blocks.4.attn.attn_drop 1 1 6935 6936 #6935=(36,6,64,64)f32 #6936=(36,6,64,64)f32 Tensor.select Tensor.select_736 3 1 qkv0.59 21832 21833 v.59 $input=qkv0.59 $dim=21832 $index=21833 #qkv0.59=(3,36,6,64,32)f32 #v.59=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13024 0 1 21843 expr=1 pnnx.Expression pnnx_expr_13023 0 1 21844 expr=2 torch.matmul torch.matmul_2261 2 1 6936 v.59 6937 $input=6936 $other=v.59 #6936=(36,6,64,64)f32 #v.59=(36,6,64,32)f32 #6937=(36,6,64,32)f32 pnnx.Expression pnnx_expr_13022 1 1 x1.59 6939 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.59=(36,64,192)f32 torch.transpose torch.transpose_3026 3 1 6937 21843 21844 6938 $input=6937 $dim0=21843 $dim1=21844 #6937=(36,6,64,32)f32 #6938=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_491 2 1 6938 6939 input0.61 $input=6938 $shape=6939 #6938=(36,64,6,32)f32 #input0.61=(36,64,192)f32 nn.Linear layers_dfe.4.residual_group.blocks.4.attn.proj 1 1 input0.61 6941 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.61=(36,64,192)f32 #6941=(36,64,192)f32 nn.Dropout layers_dfe.4.residual_group.blocks.4.attn.proj_drop 1 1 6941 6942 #6941=(36,64,192)f32 #6942=(36,64,192)f32 pnnx.Expression pnnx_expr_13018 1 1 6827 6943 expr=[-1,8,8,int(size(@0,2))] #6827=(1,2304,192)f32 pnnx.Expression pnnx_expr_13005 1 1 154 6949 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_12998 0 1 6951 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1292 2 1 6942 6943 windows.59 $input=6942 $shape=6943 #6942=(36,64,192)f32 #windows.59=(36,8,8,192)f32 Tensor.view Tensor.view_1293 2 1 windows.59 6949 x2.59 $input=windows.59 $shape=6949 #windows.59=(36,8,8,192)f32 #x2.59=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_12995 1 1 154 6954 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2643 2 1 x2.59 6951 6952 $input=x2.59 $dims=6951 #x2.59=(1,6,6,8,8,192)f32 #6952=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_88 1 1 6952 6953 memory_format=torch.contiguous_format $input=6952 #6952=(1,6,8,6,8,192)f32 #6953=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_12992 2 1 6827 154 6958 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #6827=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1294 2 1 6953 6954 x3.59 $input=6953 $shape=6954 #6953=(1,6,8,6,8,192)f32 #x3.59=(1,48,48,192)f32 Tensor.view Tensor.view_1295 2 1 x3.59 6958 x4.59 $input=x3.59 $shape=6958 #x3.59=(1,48,48,192)f32 #x4.59=(1,2304,192)f32 pnnx.Expression pnnx_expr_12990 2 1 6827 x4.59 input.133 expr=add(@0,@1) #6827=(1,2304,192)f32 #x4.59=(1,2304,192)f32 #input.133=(1,2304,192)f32 nn.LayerNorm layers_dfe.4.residual_group.blocks.4.norm2 1 1 input.133 6962 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.133=(1,2304,192)f32 #6962=(1,2304,192)f32 nn.Linear layers_dfe.4.residual_group.blocks.4.mlp.fc1 1 1 6962 6967 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #6962=(1,2304,192)f32 #6967=(1,2304,384)f32 nn.GELU layers_dfe.4.residual_group.blocks.4.mlp.act 1 1 6967 6968 #6967=(1,2304,384)f32 #6968=(1,2304,384)f32 nn.Dropout layers_dfe.4.residual_group.blocks.4.mlp.drop 1 1 6968 6969 #6968=(1,2304,384)f32 #6969=(1,2304,384)f32 nn.Linear layers_dfe.4.residual_group.blocks.4.mlp.fc2 1 1 6969 6970 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #6969=(1,2304,384)f32 #6970=(1,2304,192)f32 nn.Dropout pnnx_unique_58 1 1 6970 6971 #6970=(1,2304,192)f32 #6971=(1,2304,192)f32 pnnx.Expression pnnx_expr_12988 2 1 input.133 6971 6973 expr=add(@0,@1) #input.133=(1,2304,192)f32 #6971=(1,2304,192)f32 #6973=(1,2304,192)f32 pnnx.Attribute layers_dfe.4.residual_group.blocks.5 0 1 attn_mask.31 @attn_mask=(36,64,64)f32 #attn_mask.31=(36,64,64)f32 nn.LayerNorm layers_dfe.4.residual_group.blocks.5.norm1 1 1 6973 7002 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #6973=(1,2304,192)f32 #7002=(1,2304,192)f32 pnnx.Expression pnnx_expr_12965 2 1 6973 154 7003 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #6973=(1,2304,192)f32 #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_12963 0 1 7005 expr=[-4,-4] pnnx.Expression pnnx_expr_12961 0 1 7006 expr=[1,2] Tensor.view Tensor.view_1296 2 1 7002 7003 x.61 $input=7002 $shape=7003 #7002=(1,2304,192)f32 #x.61=(1,48,48,192)f32 torch.roll torch.roll_2448 3 1 x.61 7005 7006 x0.61 $input=x.61 $shifts=7005 $dims=7006 #x.61=(1,48,48,192)f32 #x0.61=(1,48,48,192)f32 pnnx.Expression pnnx_expr_12939 1 1 x0.61 7023 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.61=(1,48,48,192)f32 pnnx.Expression pnnx_expr_12934 0 1 7025 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1297 2 1 x0.61 7023 x1.61 $input=x0.61 $shape=7023 #x0.61=(1,48,48,192)f32 #x1.61=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_12931 1 1 x0.61 7028 expr=[-1,8,8,int(size(@0,3))] #x0.61=(1,48,48,192)f32 torch.permute torch.permute_2644 2 1 x1.61 7025 7026 $input=x1.61 $dims=7025 #x1.61=(1,6,8,6,8,192)f32 #7026=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_89 1 1 7026 7027 memory_format=torch.contiguous_format $input=7026 #7026=(1,6,6,8,8,192)f32 #7027=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_12929 1 1 6973 7030 expr=[-1,64,int(size(@0,2))] #6973=(1,2304,192)f32 pnnx.Expression pnnx_expr_12919 0 1 7041 expr=-2 pnnx.Expression pnnx_expr_12918 0 1 7042 expr=-1 pnnx.Attribute layers_dfe.4.residual_group.blocks.5.attn 0 1 relative_position_bias_table.61 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.61=(225,6)f32 pnnx.Attribute pnnx_unique_59 0 1 relative_position_index.61 @relative_position_index=(64,64)i64 #relative_position_index.61=(64,64)i64 Tensor.view Tensor.view_1298 2 1 7027 7028 x_windows.61 $input=7027 $shape=7028 #7027=(1,6,6,8,8,192)f32 #x_windows.61=(36,8,8,192)f32 Tensor.view Tensor.view_1299 2 1 x_windows.61 7030 x2.61 $input=x_windows.61 $shape=7030 #x_windows.61=(36,8,8,192)f32 #x2.61=(36,64,192)f32 nn.Linear layers_dfe.4.residual_group.blocks.5.attn.qkv 1 1 x2.61 7066 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.61=(36,64,192)f32 #7066=(36,64,576)f32 pnnx.Expression pnnx_expr_12899 1 1 x2.61 7069 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.61=(36,64,192)f32 pnnx.Expression pnnx_expr_12894 0 1 7071 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_492 2 1 7066 7069 7070 $input=7066 $shape=7069 #7066=(36,64,576)f32 #7070=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_12893 0 1 21887 expr=0 pnnx.Expression pnnx_expr_12892 0 1 21888 expr=0 pnnx.Expression pnnx_expr_12891 0 1 21889 expr=0 pnnx.Expression pnnx_expr_12890 0 1 21890 expr=1 pnnx.Expression pnnx_expr_12889 0 1 21891 expr=0 pnnx.Expression pnnx_expr_12888 0 1 21892 expr=2 torch.permute torch.permute_2645 2 1 7070 7071 qkv0.61 $input=7070 $dims=7071 #7070=(36,64,3,6,32)f32 #qkv0.61=(3,36,6,64,32)f32 Tensor.select Tensor.select_737 3 1 qkv0.61 21887 21888 q.61 $input=qkv0.61 $dim=21887 $index=21888 #qkv0.61=(3,36,6,64,32)f32 #q.61=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12887 1 1 q.61 q0.61 expr=mul(@0,1.767767e-01) #q.61=(36,6,64,32)f32 #q0.61=(36,6,64,32)f32 Tensor.select Tensor.select_738 3 1 qkv0.61 21889 21890 k.61 $input=qkv0.61 $dim=21889 $index=21890 #qkv0.61=(3,36,6,64,32)f32 #k.61=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12885 0 1 7079 expr=[-1] Tensor.view Tensor.view_1300 2 1 relative_position_index.61 7079 7080 $input=relative_position_index.61 $shape=7079 #relative_position_index.61=(64,64)i64 #7080=(4096)i64 pnnx.Expression pnnx_expr_12884 1 1 7080 7081 expr=[@0] #7080=(4096)i64 pnnx.Expression pnnx_expr_12881 0 1 7083 expr=[64,64,-1] Tensor.index Tensor.index_355 2 1 relative_position_bias_table.61 7081 7082 $input=relative_position_bias_table.61 $expr=7081 #relative_position_bias_table.61=(225,6)f32 #7082=(4096,6)f32 pnnx.Expression pnnx_expr_12877 0 1 7085 expr=[2,0,1] Tensor.view Tensor.view_1301 2 1 7082 7083 relative_position_bias.61 $input=7082 $shape=7083 #7082=(4096,6)f32 #relative_position_bias.61=(64,64,6)f32 pnnx.Expression pnnx_expr_12876 0 1 21900 expr=0 torch.permute torch.permute_2646 2 1 relative_position_bias.61 7085 7086 $input=relative_position_bias.61 $dims=7085 #relative_position_bias.61=(64,64,6)f32 #7086=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_90 1 1 7086 relative_position_bias0.61 memory_format=torch.contiguous_format $input=7086 #7086=(6,64,64)f32 #relative_position_bias0.61=(6,64,64)f32 torch.transpose torch.transpose_3027 3 1 k.61 7041 7042 7077 $input=k.61 $dim0=7041 $dim1=7042 #k.61=(36,6,64,32)f32 #7077=(36,6,32,64)f32 torch.matmul torch.matmul_2262 2 1 q0.61 7077 attn.123 $input=q0.61 $other=7077 #q0.61=(36,6,64,32)f32 #7077=(36,6,32,64)f32 #attn.123=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3275 2 1 relative_position_bias0.61 21900 7088 $input=relative_position_bias0.61 $dim=21900 #relative_position_bias0.61=(6,64,64)f32 #7088=(1,6,64,64)f32 pnnx.Expression pnnx_expr_12874 2 1 attn.123 7088 attn0.31 expr=add(@0,@1) #attn.123=(36,6,64,64)f32 #7088=(1,6,64,64)f32 #attn0.31=(36,6,64,64)f32 pnnx.Expression pnnx_expr_12865 2 1 x2.61 attn_mask.31 7095 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.61=(36,64,192)f32 #attn_mask.31=(36,64,64)f32 pnnx.Expression pnnx_expr_12864 0 1 21905 expr=1 pnnx.Expression pnnx_expr_12863 0 1 21906 expr=0 Tensor.view Tensor.view_1302 2 1 attn0.31 7095 7096 $input=attn0.31 $shape=7095 #attn0.31=(36,6,64,64)f32 #7096=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3276 2 1 attn_mask.31 21905 7097 $input=attn_mask.31 $dim=21905 #attn_mask.31=(36,64,64)f32 #7097=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3277 2 1 7097 21906 7098 $input=7097 $dim=21906 #7097=(36,1,64,64)f32 #7098=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_12861 2 1 7096 7098 attn1.31 expr=add(@0,@1) #7096=(1,36,6,64,64)f32 #7098=(1,36,1,64,64)f32 #attn1.31=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_12858 1 1 x2.61 7100 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.61=(36,64,192)f32 Tensor.view Tensor.view_1303 2 1 attn1.31 7100 input.135 $input=attn1.31 $shape=7100 #attn1.31=(1,36,6,64,64)f32 #input.135=(36,6,64,64)f32 nn.Softmax layers_dfe.4.residual_group.blocks.5.attn.softmax 1 1 input.135 7102 dim=-1 #input.135=(36,6,64,64)f32 #7102=(36,6,64,64)f32 nn.Dropout layers_dfe.4.residual_group.blocks.5.attn.attn_drop 1 1 7102 7103 #7102=(36,6,64,64)f32 #7103=(36,6,64,64)f32 Tensor.select Tensor.select_739 3 1 qkv0.61 21891 21892 v.61 $input=qkv0.61 $dim=21891 $index=21892 #qkv0.61=(3,36,6,64,32)f32 #v.61=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12857 0 1 21910 expr=1 pnnx.Expression pnnx_expr_12856 0 1 21911 expr=2 torch.matmul torch.matmul_2263 2 1 7103 v.61 7104 $input=7103 $other=v.61 #7103=(36,6,64,64)f32 #v.61=(36,6,64,32)f32 #7104=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12855 1 1 x2.61 7106 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.61=(36,64,192)f32 torch.transpose torch.transpose_3028 3 1 7104 21910 21911 7105 $input=7104 $dim0=21910 $dim1=21911 #7104=(36,6,64,32)f32 #7105=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_493 2 1 7105 7106 input0.63 $input=7105 $shape=7106 #7105=(36,64,6,32)f32 #input0.63=(36,64,192)f32 nn.Linear layers_dfe.4.residual_group.blocks.5.attn.proj 1 1 input0.63 7108 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.63=(36,64,192)f32 #7108=(36,64,192)f32 nn.Dropout layers_dfe.4.residual_group.blocks.5.attn.proj_drop 1 1 7108 7109 #7108=(36,64,192)f32 #7109=(36,64,192)f32 pnnx.Expression pnnx_expr_12851 1 1 6973 7110 expr=[-1,8,8,int(size(@0,2))] #6973=(1,2304,192)f32 pnnx.Expression pnnx_expr_12838 1 1 154 7116 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_12831 0 1 7118 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1304 2 1 7109 7110 windows.61 $input=7109 $shape=7110 #7109=(36,64,192)f32 #windows.61=(36,8,8,192)f32 Tensor.view Tensor.view_1305 2 1 windows.61 7116 x3.61 $input=windows.61 $shape=7116 #windows.61=(36,8,8,192)f32 #x3.61=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_12828 1 1 154 7121 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2647 2 1 x3.61 7118 7119 $input=x3.61 $dims=7118 #x3.61=(1,6,6,8,8,192)f32 #7119=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_91 1 1 7119 7120 memory_format=torch.contiguous_format $input=7119 #7119=(1,6,8,6,8,192)f32 #7120=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_12825 0 1 7123 expr=[4,4] pnnx.Expression pnnx_expr_12822 0 1 7124 expr=[1,2] Tensor.view Tensor.view_1306 2 1 7120 7121 shifted_x.31 $input=7120 $shape=7121 #7120=(1,6,8,6,8,192)f32 #shifted_x.31=(1,48,48,192)f32 pnnx.Expression pnnx_expr_12819 2 1 6973 154 7128 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #6973=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.roll torch.roll_2449 3 1 shifted_x.31 7123 7124 x4.61 $input=shifted_x.31 $shifts=7123 $dims=7124 #shifted_x.31=(1,48,48,192)f32 #x4.61=(1,48,48,192)f32 Tensor.view Tensor.view_1307 2 1 x4.61 7128 x5.31 $input=x4.61 $shape=7128 #x4.61=(1,48,48,192)f32 #x5.31=(1,2304,192)f32 pnnx.Expression pnnx_expr_12817 2 1 6973 x5.31 input.137 expr=add(@0,@1) #6973=(1,2304,192)f32 #x5.31=(1,2304,192)f32 #input.137=(1,2304,192)f32 nn.LayerNorm layers_dfe.4.residual_group.blocks.5.norm2 1 1 input.137 7132 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.137=(1,2304,192)f32 #7132=(1,2304,192)f32 nn.Linear layers_dfe.4.residual_group.blocks.5.mlp.fc1 1 1 7132 7137 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #7132=(1,2304,192)f32 #7137=(1,2304,384)f32 nn.GELU layers_dfe.4.residual_group.blocks.5.mlp.act 1 1 7137 7138 #7137=(1,2304,384)f32 #7138=(1,2304,384)f32 nn.Dropout layers_dfe.4.residual_group.blocks.5.mlp.drop 1 1 7138 7139 #7138=(1,2304,384)f32 #7139=(1,2304,384)f32 nn.Linear layers_dfe.4.residual_group.blocks.5.mlp.fc2 1 1 7139 7140 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #7139=(1,2304,384)f32 #7140=(1,2304,192)f32 nn.Dropout pnnx_unique_60 1 1 7140 7141 #7140=(1,2304,192)f32 #7141=(1,2304,192)f32 pnnx.Expression pnnx_expr_12815 2 1 input.137 7141 7143 expr=add(@0,@1) #input.137=(1,2304,192)f32 #7141=(1,2304,192)f32 #7143=(1,2304,192)f32 pnnx.Expression pnnx_expr_12813 0 1 7145 expr=1 pnnx.Expression pnnx_expr_12812 0 1 7146 expr=2 pnnx.Expression pnnx_expr_12807 2 1 7143 154 7152 expr=[int(size(@0,0)),192,int(size(@1,2)),int(size(@1,3))] #7143=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.transpose torch.transpose_3029 3 1 7143 7145 7146 7151 $input=7143 $dim0=7145 $dim1=7146 #7143=(1,2304,192)f32 #7151=(1,192,2304)f32 Tensor.view Tensor.view_1308 2 1 7151 7152 input.139 $input=7151 $shape=7152 #7151=(1,192,2304)f32 #input.139=(1,192,48,48)f32 nn.Conv2d layers_dfe.4.conv 1 1 input.139 7154 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(192)f32 @weight=(192,192,3,3)f32 #input.139=(1,192,48,48)f32 #7154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_12806 0 1 7155 expr=-1 pnnx.Expression pnnx_expr_12805 0 1 7156 expr=2 pnnx.Expression pnnx_expr_12804 0 1 7157 expr=1 pnnx.Expression pnnx_expr_12803 0 1 21938 expr=2 torch.flatten torch.flatten_2188 3 1 7154 7156 7155 7158 $input=7154 $start_dim=7156 $end_dim=7155 #7154=(1,192,48,48)f32 #7158=(1,192,2304)f32 torch.transpose torch.transpose_3030 3 1 7158 7157 21938 7159 $input=7158 $dim0=7157 $dim1=21938 #7158=(1,192,2304)f32 #7159=(1,2304,192)f32 pnnx.Expression pnnx_expr_12802 2 1 7159 6178 7160 expr=add(@0,@1) #7159=(1,2304,192)f32 #6178=(1,2304,192)f32 #7160=(1,2304,192)f32 nn.LayerNorm layers_dfe.5.residual_group.blocks.0.norm1 1 1 7160 7204 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #7160=(1,2304,192)f32 #7204=(1,2304,192)f32 pnnx.Expression pnnx_expr_12779 2 1 7160 154 7205 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #7160=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1309 2 1 7204 7205 x.63 $input=7204 $shape=7205 #7204=(1,2304,192)f32 #x.63=(1,48,48,192)f32 pnnx.Expression pnnx_expr_12758 1 1 x.63 7222 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.63=(1,48,48,192)f32 pnnx.Expression pnnx_expr_12753 0 1 7224 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1310 2 1 x.63 7222 x0.63 $input=x.63 $shape=7222 #x.63=(1,48,48,192)f32 #x0.63=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_12750 1 1 x.63 7227 expr=[-1,8,8,int(size(@0,3))] #x.63=(1,48,48,192)f32 torch.permute torch.permute_2648 2 1 x0.63 7224 7225 $input=x0.63 $dims=7224 #x0.63=(1,6,8,6,8,192)f32 #7225=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_92 1 1 7225 7226 memory_format=torch.contiguous_format $input=7225 #7225=(1,6,6,8,8,192)f32 #7226=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_12748 1 1 7160 7229 expr=[-1,64,int(size(@0,2))] #7160=(1,2304,192)f32 pnnx.Expression pnnx_expr_12738 0 1 7240 expr=-2 pnnx.Expression pnnx_expr_12737 0 1 7241 expr=-1 pnnx.Attribute layers_dfe.5.residual_group.blocks.0.attn 0 1 relative_position_bias_table.63 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.63=(225,6)f32 pnnx.Attribute pnnx_unique_61 0 1 relative_position_index.63 @relative_position_index=(64,64)i64 #relative_position_index.63=(64,64)i64 Tensor.view Tensor.view_1311 2 1 7226 7227 x_windows.63 $input=7226 $shape=7227 #7226=(1,6,6,8,8,192)f32 #x_windows.63=(36,8,8,192)f32 Tensor.view Tensor.view_1312 2 1 x_windows.63 7229 x1.63 $input=x_windows.63 $shape=7229 #x_windows.63=(36,8,8,192)f32 #x1.63=(36,64,192)f32 nn.Linear layers_dfe.5.residual_group.blocks.0.attn.qkv 1 1 x1.63 7261 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.63=(36,64,192)f32 #7261=(36,64,576)f32 pnnx.Expression pnnx_expr_12722 1 1 x1.63 7264 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.63=(36,64,192)f32 pnnx.Expression pnnx_expr_12717 0 1 7266 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_494 2 1 7261 7264 7265 $input=7261 $shape=7264 #7261=(36,64,576)f32 #7265=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_12716 0 1 21956 expr=0 pnnx.Expression pnnx_expr_12715 0 1 21957 expr=0 pnnx.Expression pnnx_expr_12714 0 1 21958 expr=0 pnnx.Expression pnnx_expr_12713 0 1 21959 expr=1 pnnx.Expression pnnx_expr_12712 0 1 21960 expr=0 pnnx.Expression pnnx_expr_12711 0 1 21961 expr=2 torch.permute torch.permute_2649 2 1 7265 7266 qkv0.63 $input=7265 $dims=7266 #7265=(36,64,3,6,32)f32 #qkv0.63=(3,36,6,64,32)f32 Tensor.select Tensor.select_740 3 1 qkv0.63 21956 21957 q.63 $input=qkv0.63 $dim=21956 $index=21957 #qkv0.63=(3,36,6,64,32)f32 #q.63=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12710 1 1 q.63 q0.63 expr=mul(@0,1.767767e-01) #q.63=(36,6,64,32)f32 #q0.63=(36,6,64,32)f32 Tensor.select Tensor.select_741 3 1 qkv0.63 21958 21959 k.63 $input=qkv0.63 $dim=21958 $index=21959 #qkv0.63=(3,36,6,64,32)f32 #k.63=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12708 0 1 7274 expr=[-1] Tensor.view Tensor.view_1313 2 1 relative_position_index.63 7274 7275 $input=relative_position_index.63 $shape=7274 #relative_position_index.63=(64,64)i64 #7275=(4096)i64 pnnx.Expression pnnx_expr_12707 1 1 7275 7276 expr=[@0] #7275=(4096)i64 pnnx.Expression pnnx_expr_12704 0 1 7278 expr=[64,64,-1] Tensor.index Tensor.index_356 2 1 relative_position_bias_table.63 7276 7277 $input=relative_position_bias_table.63 $expr=7276 #relative_position_bias_table.63=(225,6)f32 #7277=(4096,6)f32 pnnx.Expression pnnx_expr_12700 0 1 7280 expr=[2,0,1] Tensor.view Tensor.view_1314 2 1 7277 7278 relative_position_bias.63 $input=7277 $shape=7278 #7277=(4096,6)f32 #relative_position_bias.63=(64,64,6)f32 pnnx.Expression pnnx_expr_12699 0 1 21969 expr=0 torch.permute torch.permute_2650 2 1 relative_position_bias.63 7280 7281 $input=relative_position_bias.63 $dims=7280 #relative_position_bias.63=(64,64,6)f32 #7281=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_93 1 1 7281 relative_position_bias0.63 memory_format=torch.contiguous_format $input=7281 #7281=(6,64,64)f32 #relative_position_bias0.63=(6,64,64)f32 torch.transpose torch.transpose_3031 3 1 k.63 7240 7241 7272 $input=k.63 $dim0=7240 $dim1=7241 #k.63=(36,6,64,32)f32 #7272=(36,6,32,64)f32 torch.matmul torch.matmul_2264 2 1 q0.63 7272 attn.127 $input=q0.63 $other=7272 #q0.63=(36,6,64,32)f32 #7272=(36,6,32,64)f32 #attn.127=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3278 2 1 relative_position_bias0.63 21969 7283 $input=relative_position_bias0.63 $dim=21969 #relative_position_bias0.63=(6,64,64)f32 #7283=(1,6,64,64)f32 pnnx.Expression pnnx_expr_12697 2 1 attn.127 7283 input.145 expr=add(@0,@1) #attn.127=(36,6,64,64)f32 #7283=(1,6,64,64)f32 #input.145=(36,6,64,64)f32 nn.Softmax layers_dfe.5.residual_group.blocks.0.attn.softmax 1 1 input.145 7285 dim=-1 #input.145=(36,6,64,64)f32 #7285=(36,6,64,64)f32 nn.Dropout layers_dfe.5.residual_group.blocks.0.attn.attn_drop 1 1 7285 7286 #7285=(36,6,64,64)f32 #7286=(36,6,64,64)f32 Tensor.select Tensor.select_742 3 1 qkv0.63 21960 21961 v.63 $input=qkv0.63 $dim=21960 $index=21961 #qkv0.63=(3,36,6,64,32)f32 #v.63=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12696 0 1 21971 expr=1 pnnx.Expression pnnx_expr_12695 0 1 21972 expr=2 torch.matmul torch.matmul_2265 2 1 7286 v.63 7287 $input=7286 $other=v.63 #7286=(36,6,64,64)f32 #v.63=(36,6,64,32)f32 #7287=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12694 1 1 x1.63 7289 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.63=(36,64,192)f32 torch.transpose torch.transpose_3032 3 1 7287 21971 21972 7288 $input=7287 $dim0=21971 $dim1=21972 #7287=(36,6,64,32)f32 #7288=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_495 2 1 7288 7289 input0.67 $input=7288 $shape=7289 #7288=(36,64,6,32)f32 #input0.67=(36,64,192)f32 nn.Linear layers_dfe.5.residual_group.blocks.0.attn.proj 1 1 input0.67 7291 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.67=(36,64,192)f32 #7291=(36,64,192)f32 nn.Dropout layers_dfe.5.residual_group.blocks.0.attn.proj_drop 1 1 7291 7292 #7291=(36,64,192)f32 #7292=(36,64,192)f32 pnnx.Expression pnnx_expr_12690 1 1 7160 7293 expr=[-1,8,8,int(size(@0,2))] #7160=(1,2304,192)f32 pnnx.Expression pnnx_expr_12677 1 1 154 7299 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_12670 0 1 7301 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1315 2 1 7292 7293 windows.63 $input=7292 $shape=7293 #7292=(36,64,192)f32 #windows.63=(36,8,8,192)f32 Tensor.view Tensor.view_1316 2 1 windows.63 7299 x2.63 $input=windows.63 $shape=7299 #windows.63=(36,8,8,192)f32 #x2.63=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_12667 1 1 154 7304 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2651 2 1 x2.63 7301 7302 $input=x2.63 $dims=7301 #x2.63=(1,6,6,8,8,192)f32 #7302=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_94 1 1 7302 7303 memory_format=torch.contiguous_format $input=7302 #7302=(1,6,8,6,8,192)f32 #7303=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_12664 2 1 7160 154 7308 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #7160=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1317 2 1 7303 7304 x3.63 $input=7303 $shape=7304 #7303=(1,6,8,6,8,192)f32 #x3.63=(1,48,48,192)f32 Tensor.view Tensor.view_1318 2 1 x3.63 7308 x4.63 $input=x3.63 $shape=7308 #x3.63=(1,48,48,192)f32 #x4.63=(1,2304,192)f32 pnnx.Expression pnnx_expr_12662 2 1 7160 x4.63 input.147 expr=add(@0,@1) #7160=(1,2304,192)f32 #x4.63=(1,2304,192)f32 #input.147=(1,2304,192)f32 nn.LayerNorm layers_dfe.5.residual_group.blocks.0.norm2 1 1 input.147 7312 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.147=(1,2304,192)f32 #7312=(1,2304,192)f32 nn.Linear layers_dfe.5.residual_group.blocks.0.mlp.fc1 1 1 7312 7317 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #7312=(1,2304,192)f32 #7317=(1,2304,384)f32 nn.GELU layers_dfe.5.residual_group.blocks.0.mlp.act 1 1 7317 7318 #7317=(1,2304,384)f32 #7318=(1,2304,384)f32 nn.Dropout layers_dfe.5.residual_group.blocks.0.mlp.drop 1 1 7318 7319 #7318=(1,2304,384)f32 #7319=(1,2304,384)f32 nn.Linear layers_dfe.5.residual_group.blocks.0.mlp.fc2 1 1 7319 7320 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #7319=(1,2304,384)f32 #7320=(1,2304,192)f32 nn.Dropout pnnx_unique_62 1 1 7320 7321 #7320=(1,2304,192)f32 #7321=(1,2304,192)f32 pnnx.Expression pnnx_expr_12660 2 1 input.147 7321 7323 expr=add(@0,@1) #input.147=(1,2304,192)f32 #7321=(1,2304,192)f32 #7323=(1,2304,192)f32 pnnx.Attribute layers_dfe.5.residual_group.blocks.1 0 1 attn_mask.33 @attn_mask=(36,64,64)f32 #attn_mask.33=(36,64,64)f32 nn.LayerNorm layers_dfe.5.residual_group.blocks.1.norm1 1 1 7323 7352 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #7323=(1,2304,192)f32 #7352=(1,2304,192)f32 pnnx.Expression pnnx_expr_12637 2 1 7323 154 7353 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #7323=(1,2304,192)f32 #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_12635 0 1 7355 expr=[-4,-4] pnnx.Expression pnnx_expr_12633 0 1 7356 expr=[1,2] Tensor.view Tensor.view_1319 2 1 7352 7353 x.65 $input=7352 $shape=7353 #7352=(1,2304,192)f32 #x.65=(1,48,48,192)f32 torch.roll torch.roll_2450 3 1 x.65 7355 7356 x0.65 $input=x.65 $shifts=7355 $dims=7356 #x.65=(1,48,48,192)f32 #x0.65=(1,48,48,192)f32 pnnx.Expression pnnx_expr_12611 1 1 x0.65 7373 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.65=(1,48,48,192)f32 pnnx.Expression pnnx_expr_12606 0 1 7375 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1320 2 1 x0.65 7373 x1.65 $input=x0.65 $shape=7373 #x0.65=(1,48,48,192)f32 #x1.65=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_12603 1 1 x0.65 7378 expr=[-1,8,8,int(size(@0,3))] #x0.65=(1,48,48,192)f32 torch.permute torch.permute_2652 2 1 x1.65 7375 7376 $input=x1.65 $dims=7375 #x1.65=(1,6,8,6,8,192)f32 #7376=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_95 1 1 7376 7377 memory_format=torch.contiguous_format $input=7376 #7376=(1,6,6,8,8,192)f32 #7377=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_12601 1 1 7323 7380 expr=[-1,64,int(size(@0,2))] #7323=(1,2304,192)f32 pnnx.Expression pnnx_expr_12591 0 1 7391 expr=-2 pnnx.Expression pnnx_expr_12590 0 1 7392 expr=-1 pnnx.Attribute layers_dfe.5.residual_group.blocks.1.attn 0 1 relative_position_bias_table.65 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.65=(225,6)f32 pnnx.Attribute pnnx_unique_63 0 1 relative_position_index.65 @relative_position_index=(64,64)i64 #relative_position_index.65=(64,64)i64 Tensor.view Tensor.view_1321 2 1 7377 7378 x_windows.65 $input=7377 $shape=7378 #7377=(1,6,6,8,8,192)f32 #x_windows.65=(36,8,8,192)f32 Tensor.view Tensor.view_1322 2 1 x_windows.65 7380 x2.65 $input=x_windows.65 $shape=7380 #x_windows.65=(36,8,8,192)f32 #x2.65=(36,64,192)f32 nn.Linear layers_dfe.5.residual_group.blocks.1.attn.qkv 1 1 x2.65 7416 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.65=(36,64,192)f32 #7416=(36,64,576)f32 pnnx.Expression pnnx_expr_12571 1 1 x2.65 7419 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.65=(36,64,192)f32 pnnx.Expression pnnx_expr_12566 0 1 7421 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_496 2 1 7416 7419 7420 $input=7416 $shape=7419 #7416=(36,64,576)f32 #7420=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_12565 0 1 22015 expr=0 pnnx.Expression pnnx_expr_12564 0 1 22016 expr=0 pnnx.Expression pnnx_expr_12563 0 1 22017 expr=0 pnnx.Expression pnnx_expr_12562 0 1 22018 expr=1 pnnx.Expression pnnx_expr_12561 0 1 22019 expr=0 pnnx.Expression pnnx_expr_12560 0 1 22020 expr=2 torch.permute torch.permute_2653 2 1 7420 7421 qkv0.65 $input=7420 $dims=7421 #7420=(36,64,3,6,32)f32 #qkv0.65=(3,36,6,64,32)f32 Tensor.select Tensor.select_743 3 1 qkv0.65 22015 22016 q.65 $input=qkv0.65 $dim=22015 $index=22016 #qkv0.65=(3,36,6,64,32)f32 #q.65=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12559 1 1 q.65 q0.65 expr=mul(@0,1.767767e-01) #q.65=(36,6,64,32)f32 #q0.65=(36,6,64,32)f32 Tensor.select Tensor.select_744 3 1 qkv0.65 22017 22018 k.65 $input=qkv0.65 $dim=22017 $index=22018 #qkv0.65=(3,36,6,64,32)f32 #k.65=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12557 0 1 7429 expr=[-1] Tensor.view Tensor.view_1323 2 1 relative_position_index.65 7429 7430 $input=relative_position_index.65 $shape=7429 #relative_position_index.65=(64,64)i64 #7430=(4096)i64 pnnx.Expression pnnx_expr_12556 1 1 7430 7431 expr=[@0] #7430=(4096)i64 pnnx.Expression pnnx_expr_12553 0 1 7433 expr=[64,64,-1] Tensor.index Tensor.index_357 2 1 relative_position_bias_table.65 7431 7432 $input=relative_position_bias_table.65 $expr=7431 #relative_position_bias_table.65=(225,6)f32 #7432=(4096,6)f32 pnnx.Expression pnnx_expr_12549 0 1 7435 expr=[2,0,1] Tensor.view Tensor.view_1324 2 1 7432 7433 relative_position_bias.65 $input=7432 $shape=7433 #7432=(4096,6)f32 #relative_position_bias.65=(64,64,6)f32 pnnx.Expression pnnx_expr_12548 0 1 22028 expr=0 torch.permute torch.permute_2654 2 1 relative_position_bias.65 7435 7436 $input=relative_position_bias.65 $dims=7435 #relative_position_bias.65=(64,64,6)f32 #7436=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_96 1 1 7436 relative_position_bias0.65 memory_format=torch.contiguous_format $input=7436 #7436=(6,64,64)f32 #relative_position_bias0.65=(6,64,64)f32 torch.transpose torch.transpose_3033 3 1 k.65 7391 7392 7427 $input=k.65 $dim0=7391 $dim1=7392 #k.65=(36,6,64,32)f32 #7427=(36,6,32,64)f32 torch.matmul torch.matmul_2266 2 1 q0.65 7427 attn.131 $input=q0.65 $other=7427 #q0.65=(36,6,64,32)f32 #7427=(36,6,32,64)f32 #attn.131=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3279 2 1 relative_position_bias0.65 22028 7438 $input=relative_position_bias0.65 $dim=22028 #relative_position_bias0.65=(6,64,64)f32 #7438=(1,6,64,64)f32 pnnx.Expression pnnx_expr_12546 2 1 attn.131 7438 attn0.33 expr=add(@0,@1) #attn.131=(36,6,64,64)f32 #7438=(1,6,64,64)f32 #attn0.33=(36,6,64,64)f32 pnnx.Expression pnnx_expr_12537 2 1 x2.65 attn_mask.33 7445 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.65=(36,64,192)f32 #attn_mask.33=(36,64,64)f32 pnnx.Expression pnnx_expr_12536 0 1 22033 expr=1 pnnx.Expression pnnx_expr_12535 0 1 22034 expr=0 Tensor.view Tensor.view_1325 2 1 attn0.33 7445 7446 $input=attn0.33 $shape=7445 #attn0.33=(36,6,64,64)f32 #7446=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3280 2 1 attn_mask.33 22033 7447 $input=attn_mask.33 $dim=22033 #attn_mask.33=(36,64,64)f32 #7447=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3281 2 1 7447 22034 7448 $input=7447 $dim=22034 #7447=(36,1,64,64)f32 #7448=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_12533 2 1 7446 7448 attn1.33 expr=add(@0,@1) #7446=(1,36,6,64,64)f32 #7448=(1,36,1,64,64)f32 #attn1.33=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_12530 1 1 x2.65 7450 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.65=(36,64,192)f32 Tensor.view Tensor.view_1326 2 1 attn1.33 7450 input.149 $input=attn1.33 $shape=7450 #attn1.33=(1,36,6,64,64)f32 #input.149=(36,6,64,64)f32 nn.Softmax layers_dfe.5.residual_group.blocks.1.attn.softmax 1 1 input.149 7452 dim=-1 #input.149=(36,6,64,64)f32 #7452=(36,6,64,64)f32 nn.Dropout layers_dfe.5.residual_group.blocks.1.attn.attn_drop 1 1 7452 7453 #7452=(36,6,64,64)f32 #7453=(36,6,64,64)f32 Tensor.select Tensor.select_745 3 1 qkv0.65 22019 22020 v.65 $input=qkv0.65 $dim=22019 $index=22020 #qkv0.65=(3,36,6,64,32)f32 #v.65=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12529 0 1 22038 expr=1 pnnx.Expression pnnx_expr_12528 0 1 22039 expr=2 torch.matmul torch.matmul_2267 2 1 7453 v.65 7454 $input=7453 $other=v.65 #7453=(36,6,64,64)f32 #v.65=(36,6,64,32)f32 #7454=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12527 1 1 x2.65 7456 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.65=(36,64,192)f32 torch.transpose torch.transpose_3034 3 1 7454 22038 22039 7455 $input=7454 $dim0=22038 $dim1=22039 #7454=(36,6,64,32)f32 #7455=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_497 2 1 7455 7456 input0.69 $input=7455 $shape=7456 #7455=(36,64,6,32)f32 #input0.69=(36,64,192)f32 nn.Linear layers_dfe.5.residual_group.blocks.1.attn.proj 1 1 input0.69 7458 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.69=(36,64,192)f32 #7458=(36,64,192)f32 nn.Dropout layers_dfe.5.residual_group.blocks.1.attn.proj_drop 1 1 7458 7459 #7458=(36,64,192)f32 #7459=(36,64,192)f32 pnnx.Expression pnnx_expr_12523 1 1 7323 7460 expr=[-1,8,8,int(size(@0,2))] #7323=(1,2304,192)f32 pnnx.Expression pnnx_expr_12510 1 1 154 7466 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_12503 0 1 7468 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1327 2 1 7459 7460 windows.65 $input=7459 $shape=7460 #7459=(36,64,192)f32 #windows.65=(36,8,8,192)f32 Tensor.view Tensor.view_1328 2 1 windows.65 7466 x3.65 $input=windows.65 $shape=7466 #windows.65=(36,8,8,192)f32 #x3.65=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_12500 1 1 154 7471 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2655 2 1 x3.65 7468 7469 $input=x3.65 $dims=7468 #x3.65=(1,6,6,8,8,192)f32 #7469=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_97 1 1 7469 7470 memory_format=torch.contiguous_format $input=7469 #7469=(1,6,8,6,8,192)f32 #7470=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_12497 0 1 7473 expr=[4,4] pnnx.Expression pnnx_expr_12494 0 1 7474 expr=[1,2] Tensor.view Tensor.view_1329 2 1 7470 7471 shifted_x.33 $input=7470 $shape=7471 #7470=(1,6,8,6,8,192)f32 #shifted_x.33=(1,48,48,192)f32 pnnx.Expression pnnx_expr_12491 2 1 7323 154 7478 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #7323=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.roll torch.roll_2451 3 1 shifted_x.33 7473 7474 x4.65 $input=shifted_x.33 $shifts=7473 $dims=7474 #shifted_x.33=(1,48,48,192)f32 #x4.65=(1,48,48,192)f32 Tensor.view Tensor.view_1330 2 1 x4.65 7478 x5.33 $input=x4.65 $shape=7478 #x4.65=(1,48,48,192)f32 #x5.33=(1,2304,192)f32 pnnx.Expression pnnx_expr_12489 2 1 7323 x5.33 input.151 expr=add(@0,@1) #7323=(1,2304,192)f32 #x5.33=(1,2304,192)f32 #input.151=(1,2304,192)f32 nn.LayerNorm layers_dfe.5.residual_group.blocks.1.norm2 1 1 input.151 7482 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.151=(1,2304,192)f32 #7482=(1,2304,192)f32 nn.Linear layers_dfe.5.residual_group.blocks.1.mlp.fc1 1 1 7482 7487 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #7482=(1,2304,192)f32 #7487=(1,2304,384)f32 nn.GELU layers_dfe.5.residual_group.blocks.1.mlp.act 1 1 7487 7488 #7487=(1,2304,384)f32 #7488=(1,2304,384)f32 nn.Dropout layers_dfe.5.residual_group.blocks.1.mlp.drop 1 1 7488 7489 #7488=(1,2304,384)f32 #7489=(1,2304,384)f32 nn.Linear layers_dfe.5.residual_group.blocks.1.mlp.fc2 1 1 7489 7490 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #7489=(1,2304,384)f32 #7490=(1,2304,192)f32 nn.Dropout pnnx_unique_64 1 1 7490 7491 #7490=(1,2304,192)f32 #7491=(1,2304,192)f32 pnnx.Expression pnnx_expr_12487 2 1 input.151 7491 7493 expr=add(@0,@1) #input.151=(1,2304,192)f32 #7491=(1,2304,192)f32 #7493=(1,2304,192)f32 nn.LayerNorm layers_dfe.5.residual_group.blocks.2.norm1 1 1 7493 7520 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #7493=(1,2304,192)f32 #7520=(1,2304,192)f32 pnnx.Expression pnnx_expr_12465 2 1 7493 154 7521 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #7493=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1331 2 1 7520 7521 x.67 $input=7520 $shape=7521 #7520=(1,2304,192)f32 #x.67=(1,48,48,192)f32 pnnx.Expression pnnx_expr_12444 1 1 x.67 7538 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.67=(1,48,48,192)f32 pnnx.Expression pnnx_expr_12439 0 1 7540 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1332 2 1 x.67 7538 x0.67 $input=x.67 $shape=7538 #x.67=(1,48,48,192)f32 #x0.67=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_12436 1 1 x.67 7543 expr=[-1,8,8,int(size(@0,3))] #x.67=(1,48,48,192)f32 torch.permute torch.permute_2656 2 1 x0.67 7540 7541 $input=x0.67 $dims=7540 #x0.67=(1,6,8,6,8,192)f32 #7541=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_98 1 1 7541 7542 memory_format=torch.contiguous_format $input=7541 #7541=(1,6,6,8,8,192)f32 #7542=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_12434 1 1 7493 7545 expr=[-1,64,int(size(@0,2))] #7493=(1,2304,192)f32 pnnx.Expression pnnx_expr_12424 0 1 7556 expr=-2 pnnx.Expression pnnx_expr_12423 0 1 7557 expr=-1 pnnx.Attribute layers_dfe.5.residual_group.blocks.2.attn 0 1 relative_position_bias_table.67 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.67=(225,6)f32 pnnx.Attribute pnnx_unique_65 0 1 relative_position_index.67 @relative_position_index=(64,64)i64 #relative_position_index.67=(64,64)i64 Tensor.view Tensor.view_1333 2 1 7542 7543 x_windows.67 $input=7542 $shape=7543 #7542=(1,6,6,8,8,192)f32 #x_windows.67=(36,8,8,192)f32 Tensor.view Tensor.view_1334 2 1 x_windows.67 7545 x1.67 $input=x_windows.67 $shape=7545 #x_windows.67=(36,8,8,192)f32 #x1.67=(36,64,192)f32 nn.Linear layers_dfe.5.residual_group.blocks.2.attn.qkv 1 1 x1.67 7577 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.67=(36,64,192)f32 #7577=(36,64,576)f32 pnnx.Expression pnnx_expr_12408 1 1 x1.67 7580 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.67=(36,64,192)f32 pnnx.Expression pnnx_expr_12403 0 1 7582 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_498 2 1 7577 7580 7581 $input=7577 $shape=7580 #7577=(36,64,576)f32 #7581=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_12402 0 1 22083 expr=0 pnnx.Expression pnnx_expr_12401 0 1 22084 expr=0 pnnx.Expression pnnx_expr_12400 0 1 22085 expr=0 pnnx.Expression pnnx_expr_12399 0 1 22086 expr=1 pnnx.Expression pnnx_expr_12398 0 1 22087 expr=0 pnnx.Expression pnnx_expr_12397 0 1 22088 expr=2 torch.permute torch.permute_2657 2 1 7581 7582 qkv0.67 $input=7581 $dims=7582 #7581=(36,64,3,6,32)f32 #qkv0.67=(3,36,6,64,32)f32 Tensor.select Tensor.select_746 3 1 qkv0.67 22083 22084 q.67 $input=qkv0.67 $dim=22083 $index=22084 #qkv0.67=(3,36,6,64,32)f32 #q.67=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12396 1 1 q.67 q0.67 expr=mul(@0,1.767767e-01) #q.67=(36,6,64,32)f32 #q0.67=(36,6,64,32)f32 Tensor.select Tensor.select_747 3 1 qkv0.67 22085 22086 k.67 $input=qkv0.67 $dim=22085 $index=22086 #qkv0.67=(3,36,6,64,32)f32 #k.67=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12394 0 1 7590 expr=[-1] Tensor.view Tensor.view_1335 2 1 relative_position_index.67 7590 7591 $input=relative_position_index.67 $shape=7590 #relative_position_index.67=(64,64)i64 #7591=(4096)i64 pnnx.Expression pnnx_expr_12393 1 1 7591 7592 expr=[@0] #7591=(4096)i64 pnnx.Expression pnnx_expr_12390 0 1 7594 expr=[64,64,-1] Tensor.index Tensor.index_358 2 1 relative_position_bias_table.67 7592 7593 $input=relative_position_bias_table.67 $expr=7592 #relative_position_bias_table.67=(225,6)f32 #7593=(4096,6)f32 pnnx.Expression pnnx_expr_12386 0 1 7596 expr=[2,0,1] Tensor.view Tensor.view_1336 2 1 7593 7594 relative_position_bias.67 $input=7593 $shape=7594 #7593=(4096,6)f32 #relative_position_bias.67=(64,64,6)f32 pnnx.Expression pnnx_expr_12385 0 1 22096 expr=0 torch.permute torch.permute_2658 2 1 relative_position_bias.67 7596 7597 $input=relative_position_bias.67 $dims=7596 #relative_position_bias.67=(64,64,6)f32 #7597=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_99 1 1 7597 relative_position_bias0.67 memory_format=torch.contiguous_format $input=7597 #7597=(6,64,64)f32 #relative_position_bias0.67=(6,64,64)f32 torch.transpose torch.transpose_3035 3 1 k.67 7556 7557 7588 $input=k.67 $dim0=7556 $dim1=7557 #k.67=(36,6,64,32)f32 #7588=(36,6,32,64)f32 torch.matmul torch.matmul_2268 2 1 q0.67 7588 attn.135 $input=q0.67 $other=7588 #q0.67=(36,6,64,32)f32 #7588=(36,6,32,64)f32 #attn.135=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3282 2 1 relative_position_bias0.67 22096 7599 $input=relative_position_bias0.67 $dim=22096 #relative_position_bias0.67=(6,64,64)f32 #7599=(1,6,64,64)f32 pnnx.Expression pnnx_expr_12383 2 1 attn.135 7599 input.153 expr=add(@0,@1) #attn.135=(36,6,64,64)f32 #7599=(1,6,64,64)f32 #input.153=(36,6,64,64)f32 nn.Softmax layers_dfe.5.residual_group.blocks.2.attn.softmax 1 1 input.153 7601 dim=-1 #input.153=(36,6,64,64)f32 #7601=(36,6,64,64)f32 nn.Dropout layers_dfe.5.residual_group.blocks.2.attn.attn_drop 1 1 7601 7602 #7601=(36,6,64,64)f32 #7602=(36,6,64,64)f32 Tensor.select Tensor.select_748 3 1 qkv0.67 22087 22088 v.67 $input=qkv0.67 $dim=22087 $index=22088 #qkv0.67=(3,36,6,64,32)f32 #v.67=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12382 0 1 22098 expr=1 pnnx.Expression pnnx_expr_12381 0 1 22099 expr=2 torch.matmul torch.matmul_2269 2 1 7602 v.67 7603 $input=7602 $other=v.67 #7602=(36,6,64,64)f32 #v.67=(36,6,64,32)f32 #7603=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12380 1 1 x1.67 7605 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.67=(36,64,192)f32 torch.transpose torch.transpose_3036 3 1 7603 22098 22099 7604 $input=7603 $dim0=22098 $dim1=22099 #7603=(36,6,64,32)f32 #7604=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_499 2 1 7604 7605 input0.71 $input=7604 $shape=7605 #7604=(36,64,6,32)f32 #input0.71=(36,64,192)f32 nn.Linear layers_dfe.5.residual_group.blocks.2.attn.proj 1 1 input0.71 7607 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.71=(36,64,192)f32 #7607=(36,64,192)f32 nn.Dropout layers_dfe.5.residual_group.blocks.2.attn.proj_drop 1 1 7607 7608 #7607=(36,64,192)f32 #7608=(36,64,192)f32 pnnx.Expression pnnx_expr_12376 1 1 7493 7609 expr=[-1,8,8,int(size(@0,2))] #7493=(1,2304,192)f32 pnnx.Expression pnnx_expr_12363 1 1 154 7615 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_12356 0 1 7617 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1337 2 1 7608 7609 windows.67 $input=7608 $shape=7609 #7608=(36,64,192)f32 #windows.67=(36,8,8,192)f32 Tensor.view Tensor.view_1338 2 1 windows.67 7615 x2.67 $input=windows.67 $shape=7615 #windows.67=(36,8,8,192)f32 #x2.67=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_12353 1 1 154 7620 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2659 2 1 x2.67 7617 7618 $input=x2.67 $dims=7617 #x2.67=(1,6,6,8,8,192)f32 #7618=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_100 1 1 7618 7619 memory_format=torch.contiguous_format $input=7618 #7618=(1,6,8,6,8,192)f32 #7619=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_12350 2 1 7493 154 7624 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #7493=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1339 2 1 7619 7620 x3.67 $input=7619 $shape=7620 #7619=(1,6,8,6,8,192)f32 #x3.67=(1,48,48,192)f32 Tensor.view Tensor.view_1340 2 1 x3.67 7624 x4.67 $input=x3.67 $shape=7624 #x3.67=(1,48,48,192)f32 #x4.67=(1,2304,192)f32 pnnx.Expression pnnx_expr_12348 2 1 7493 x4.67 input.155 expr=add(@0,@1) #7493=(1,2304,192)f32 #x4.67=(1,2304,192)f32 #input.155=(1,2304,192)f32 nn.LayerNorm layers_dfe.5.residual_group.blocks.2.norm2 1 1 input.155 7628 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.155=(1,2304,192)f32 #7628=(1,2304,192)f32 nn.Linear layers_dfe.5.residual_group.blocks.2.mlp.fc1 1 1 7628 7633 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #7628=(1,2304,192)f32 #7633=(1,2304,384)f32 nn.GELU layers_dfe.5.residual_group.blocks.2.mlp.act 1 1 7633 7634 #7633=(1,2304,384)f32 #7634=(1,2304,384)f32 nn.Dropout layers_dfe.5.residual_group.blocks.2.mlp.drop 1 1 7634 7635 #7634=(1,2304,384)f32 #7635=(1,2304,384)f32 nn.Linear layers_dfe.5.residual_group.blocks.2.mlp.fc2 1 1 7635 7636 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #7635=(1,2304,384)f32 #7636=(1,2304,192)f32 nn.Dropout pnnx_unique_66 1 1 7636 7637 #7636=(1,2304,192)f32 #7637=(1,2304,192)f32 pnnx.Expression pnnx_expr_12346 2 1 input.155 7637 7639 expr=add(@0,@1) #input.155=(1,2304,192)f32 #7637=(1,2304,192)f32 #7639=(1,2304,192)f32 pnnx.Attribute layers_dfe.5.residual_group.blocks.3 0 1 attn_mask.35 @attn_mask=(36,64,64)f32 #attn_mask.35=(36,64,64)f32 nn.LayerNorm layers_dfe.5.residual_group.blocks.3.norm1 1 1 7639 7668 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #7639=(1,2304,192)f32 #7668=(1,2304,192)f32 pnnx.Expression pnnx_expr_12323 2 1 7639 154 7669 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #7639=(1,2304,192)f32 #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_12321 0 1 7671 expr=[-4,-4] pnnx.Expression pnnx_expr_12319 0 1 7672 expr=[1,2] Tensor.view Tensor.view_1341 2 1 7668 7669 x.69 $input=7668 $shape=7669 #7668=(1,2304,192)f32 #x.69=(1,48,48,192)f32 torch.roll torch.roll_2452 3 1 x.69 7671 7672 x0.69 $input=x.69 $shifts=7671 $dims=7672 #x.69=(1,48,48,192)f32 #x0.69=(1,48,48,192)f32 pnnx.Expression pnnx_expr_12297 1 1 x0.69 7689 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.69=(1,48,48,192)f32 pnnx.Expression pnnx_expr_12292 0 1 7691 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1342 2 1 x0.69 7689 x1.69 $input=x0.69 $shape=7689 #x0.69=(1,48,48,192)f32 #x1.69=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_12289 1 1 x0.69 7694 expr=[-1,8,8,int(size(@0,3))] #x0.69=(1,48,48,192)f32 torch.permute torch.permute_2660 2 1 x1.69 7691 7692 $input=x1.69 $dims=7691 #x1.69=(1,6,8,6,8,192)f32 #7692=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_101 1 1 7692 7693 memory_format=torch.contiguous_format $input=7692 #7692=(1,6,6,8,8,192)f32 #7693=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_12287 1 1 7639 7696 expr=[-1,64,int(size(@0,2))] #7639=(1,2304,192)f32 pnnx.Expression pnnx_expr_12277 0 1 7707 expr=-2 pnnx.Expression pnnx_expr_12276 0 1 7708 expr=-1 pnnx.Attribute layers_dfe.5.residual_group.blocks.3.attn 0 1 relative_position_bias_table.69 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.69=(225,6)f32 pnnx.Attribute pnnx_unique_67 0 1 relative_position_index.69 @relative_position_index=(64,64)i64 #relative_position_index.69=(64,64)i64 Tensor.view Tensor.view_1343 2 1 7693 7694 x_windows.69 $input=7693 $shape=7694 #7693=(1,6,6,8,8,192)f32 #x_windows.69=(36,8,8,192)f32 Tensor.view Tensor.view_1344 2 1 x_windows.69 7696 x2.69 $input=x_windows.69 $shape=7696 #x_windows.69=(36,8,8,192)f32 #x2.69=(36,64,192)f32 nn.Linear layers_dfe.5.residual_group.blocks.3.attn.qkv 1 1 x2.69 7732 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.69=(36,64,192)f32 #7732=(36,64,576)f32 pnnx.Expression pnnx_expr_12257 1 1 x2.69 7735 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.69=(36,64,192)f32 pnnx.Expression pnnx_expr_12252 0 1 7737 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_500 2 1 7732 7735 7736 $input=7732 $shape=7735 #7732=(36,64,576)f32 #7736=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_12251 0 1 22142 expr=0 pnnx.Expression pnnx_expr_12250 0 1 22143 expr=0 pnnx.Expression pnnx_expr_12249 0 1 22144 expr=0 pnnx.Expression pnnx_expr_12248 0 1 22145 expr=1 pnnx.Expression pnnx_expr_12247 0 1 22146 expr=0 pnnx.Expression pnnx_expr_12246 0 1 22147 expr=2 torch.permute torch.permute_2661 2 1 7736 7737 qkv0.69 $input=7736 $dims=7737 #7736=(36,64,3,6,32)f32 #qkv0.69=(3,36,6,64,32)f32 Tensor.select Tensor.select_749 3 1 qkv0.69 22142 22143 q.69 $input=qkv0.69 $dim=22142 $index=22143 #qkv0.69=(3,36,6,64,32)f32 #q.69=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12245 1 1 q.69 q0.69 expr=mul(@0,1.767767e-01) #q.69=(36,6,64,32)f32 #q0.69=(36,6,64,32)f32 Tensor.select Tensor.select_750 3 1 qkv0.69 22144 22145 k.69 $input=qkv0.69 $dim=22144 $index=22145 #qkv0.69=(3,36,6,64,32)f32 #k.69=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12243 0 1 7745 expr=[-1] Tensor.view Tensor.view_1345 2 1 relative_position_index.69 7745 7746 $input=relative_position_index.69 $shape=7745 #relative_position_index.69=(64,64)i64 #7746=(4096)i64 pnnx.Expression pnnx_expr_12242 1 1 7746 7747 expr=[@0] #7746=(4096)i64 pnnx.Expression pnnx_expr_12239 0 1 7749 expr=[64,64,-1] Tensor.index Tensor.index_359 2 1 relative_position_bias_table.69 7747 7748 $input=relative_position_bias_table.69 $expr=7747 #relative_position_bias_table.69=(225,6)f32 #7748=(4096,6)f32 pnnx.Expression pnnx_expr_12235 0 1 7751 expr=[2,0,1] Tensor.view Tensor.view_1346 2 1 7748 7749 relative_position_bias.69 $input=7748 $shape=7749 #7748=(4096,6)f32 #relative_position_bias.69=(64,64,6)f32 pnnx.Expression pnnx_expr_12234 0 1 22155 expr=0 torch.permute torch.permute_2662 2 1 relative_position_bias.69 7751 7752 $input=relative_position_bias.69 $dims=7751 #relative_position_bias.69=(64,64,6)f32 #7752=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_102 1 1 7752 relative_position_bias0.69 memory_format=torch.contiguous_format $input=7752 #7752=(6,64,64)f32 #relative_position_bias0.69=(6,64,64)f32 torch.transpose torch.transpose_3037 3 1 k.69 7707 7708 7743 $input=k.69 $dim0=7707 $dim1=7708 #k.69=(36,6,64,32)f32 #7743=(36,6,32,64)f32 torch.matmul torch.matmul_2270 2 1 q0.69 7743 attn.139 $input=q0.69 $other=7743 #q0.69=(36,6,64,32)f32 #7743=(36,6,32,64)f32 #attn.139=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3283 2 1 relative_position_bias0.69 22155 7754 $input=relative_position_bias0.69 $dim=22155 #relative_position_bias0.69=(6,64,64)f32 #7754=(1,6,64,64)f32 pnnx.Expression pnnx_expr_12232 2 1 attn.139 7754 attn0.35 expr=add(@0,@1) #attn.139=(36,6,64,64)f32 #7754=(1,6,64,64)f32 #attn0.35=(36,6,64,64)f32 pnnx.Expression pnnx_expr_12223 2 1 x2.69 attn_mask.35 7761 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.69=(36,64,192)f32 #attn_mask.35=(36,64,64)f32 pnnx.Expression pnnx_expr_12222 0 1 22160 expr=1 pnnx.Expression pnnx_expr_12221 0 1 22161 expr=0 Tensor.view Tensor.view_1347 2 1 attn0.35 7761 7762 $input=attn0.35 $shape=7761 #attn0.35=(36,6,64,64)f32 #7762=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3284 2 1 attn_mask.35 22160 7763 $input=attn_mask.35 $dim=22160 #attn_mask.35=(36,64,64)f32 #7763=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3285 2 1 7763 22161 7764 $input=7763 $dim=22161 #7763=(36,1,64,64)f32 #7764=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_12219 2 1 7762 7764 attn1.35 expr=add(@0,@1) #7762=(1,36,6,64,64)f32 #7764=(1,36,1,64,64)f32 #attn1.35=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_12216 1 1 x2.69 7766 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.69=(36,64,192)f32 Tensor.view Tensor.view_1348 2 1 attn1.35 7766 input.157 $input=attn1.35 $shape=7766 #attn1.35=(1,36,6,64,64)f32 #input.157=(36,6,64,64)f32 nn.Softmax layers_dfe.5.residual_group.blocks.3.attn.softmax 1 1 input.157 7768 dim=-1 #input.157=(36,6,64,64)f32 #7768=(36,6,64,64)f32 nn.Dropout layers_dfe.5.residual_group.blocks.3.attn.attn_drop 1 1 7768 7769 #7768=(36,6,64,64)f32 #7769=(36,6,64,64)f32 Tensor.select Tensor.select_751 3 1 qkv0.69 22146 22147 v.69 $input=qkv0.69 $dim=22146 $index=22147 #qkv0.69=(3,36,6,64,32)f32 #v.69=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12215 0 1 22165 expr=1 pnnx.Expression pnnx_expr_12214 0 1 22166 expr=2 torch.matmul torch.matmul_2271 2 1 7769 v.69 7770 $input=7769 $other=v.69 #7769=(36,6,64,64)f32 #v.69=(36,6,64,32)f32 #7770=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12213 1 1 x2.69 7772 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.69=(36,64,192)f32 torch.transpose torch.transpose_3038 3 1 7770 22165 22166 7771 $input=7770 $dim0=22165 $dim1=22166 #7770=(36,6,64,32)f32 #7771=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_501 2 1 7771 7772 input0.73 $input=7771 $shape=7772 #7771=(36,64,6,32)f32 #input0.73=(36,64,192)f32 nn.Linear layers_dfe.5.residual_group.blocks.3.attn.proj 1 1 input0.73 7774 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.73=(36,64,192)f32 #7774=(36,64,192)f32 nn.Dropout layers_dfe.5.residual_group.blocks.3.attn.proj_drop 1 1 7774 7775 #7774=(36,64,192)f32 #7775=(36,64,192)f32 pnnx.Expression pnnx_expr_12209 1 1 7639 7776 expr=[-1,8,8,int(size(@0,2))] #7639=(1,2304,192)f32 pnnx.Expression pnnx_expr_12196 1 1 154 7782 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_12189 0 1 7784 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1349 2 1 7775 7776 windows.69 $input=7775 $shape=7776 #7775=(36,64,192)f32 #windows.69=(36,8,8,192)f32 Tensor.view Tensor.view_1350 2 1 windows.69 7782 x3.69 $input=windows.69 $shape=7782 #windows.69=(36,8,8,192)f32 #x3.69=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_12186 1 1 154 7787 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2663 2 1 x3.69 7784 7785 $input=x3.69 $dims=7784 #x3.69=(1,6,6,8,8,192)f32 #7785=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_103 1 1 7785 7786 memory_format=torch.contiguous_format $input=7785 #7785=(1,6,8,6,8,192)f32 #7786=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_12183 0 1 7789 expr=[4,4] pnnx.Expression pnnx_expr_12180 0 1 7790 expr=[1,2] Tensor.view Tensor.view_1351 2 1 7786 7787 shifted_x.35 $input=7786 $shape=7787 #7786=(1,6,8,6,8,192)f32 #shifted_x.35=(1,48,48,192)f32 pnnx.Expression pnnx_expr_12177 2 1 7639 154 7794 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #7639=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.roll torch.roll_2453 3 1 shifted_x.35 7789 7790 x4.69 $input=shifted_x.35 $shifts=7789 $dims=7790 #shifted_x.35=(1,48,48,192)f32 #x4.69=(1,48,48,192)f32 Tensor.view Tensor.view_1352 2 1 x4.69 7794 x5.35 $input=x4.69 $shape=7794 #x4.69=(1,48,48,192)f32 #x5.35=(1,2304,192)f32 pnnx.Expression pnnx_expr_12175 2 1 7639 x5.35 input.159 expr=add(@0,@1) #7639=(1,2304,192)f32 #x5.35=(1,2304,192)f32 #input.159=(1,2304,192)f32 nn.LayerNorm layers_dfe.5.residual_group.blocks.3.norm2 1 1 input.159 7798 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.159=(1,2304,192)f32 #7798=(1,2304,192)f32 nn.Linear layers_dfe.5.residual_group.blocks.3.mlp.fc1 1 1 7798 7803 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #7798=(1,2304,192)f32 #7803=(1,2304,384)f32 nn.GELU layers_dfe.5.residual_group.blocks.3.mlp.act 1 1 7803 7804 #7803=(1,2304,384)f32 #7804=(1,2304,384)f32 nn.Dropout layers_dfe.5.residual_group.blocks.3.mlp.drop 1 1 7804 7805 #7804=(1,2304,384)f32 #7805=(1,2304,384)f32 nn.Linear layers_dfe.5.residual_group.blocks.3.mlp.fc2 1 1 7805 7806 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #7805=(1,2304,384)f32 #7806=(1,2304,192)f32 nn.Dropout pnnx_unique_68 1 1 7806 7807 #7806=(1,2304,192)f32 #7807=(1,2304,192)f32 pnnx.Expression pnnx_expr_12173 2 1 input.159 7807 7809 expr=add(@0,@1) #input.159=(1,2304,192)f32 #7807=(1,2304,192)f32 #7809=(1,2304,192)f32 nn.LayerNorm layers_dfe.5.residual_group.blocks.4.norm1 1 1 7809 7836 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #7809=(1,2304,192)f32 #7836=(1,2304,192)f32 pnnx.Expression pnnx_expr_12151 2 1 7809 154 7837 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #7809=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1353 2 1 7836 7837 x.71 $input=7836 $shape=7837 #7836=(1,2304,192)f32 #x.71=(1,48,48,192)f32 pnnx.Expression pnnx_expr_12130 1 1 x.71 7854 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.71=(1,48,48,192)f32 pnnx.Expression pnnx_expr_12125 0 1 7856 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1354 2 1 x.71 7854 x0.71 $input=x.71 $shape=7854 #x.71=(1,48,48,192)f32 #x0.71=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_12122 1 1 x.71 7859 expr=[-1,8,8,int(size(@0,3))] #x.71=(1,48,48,192)f32 torch.permute torch.permute_2664 2 1 x0.71 7856 7857 $input=x0.71 $dims=7856 #x0.71=(1,6,8,6,8,192)f32 #7857=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_104 1 1 7857 7858 memory_format=torch.contiguous_format $input=7857 #7857=(1,6,6,8,8,192)f32 #7858=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_12120 1 1 7809 7861 expr=[-1,64,int(size(@0,2))] #7809=(1,2304,192)f32 pnnx.Expression pnnx_expr_12110 0 1 7872 expr=-2 pnnx.Expression pnnx_expr_12109 0 1 7873 expr=-1 pnnx.Attribute layers_dfe.5.residual_group.blocks.4.attn 0 1 relative_position_bias_table.71 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.71=(225,6)f32 pnnx.Attribute pnnx_unique_69 0 1 relative_position_index.71 @relative_position_index=(64,64)i64 #relative_position_index.71=(64,64)i64 Tensor.view Tensor.view_1355 2 1 7858 7859 x_windows.71 $input=7858 $shape=7859 #7858=(1,6,6,8,8,192)f32 #x_windows.71=(36,8,8,192)f32 Tensor.view Tensor.view_1356 2 1 x_windows.71 7861 x1.71 $input=x_windows.71 $shape=7861 #x_windows.71=(36,8,8,192)f32 #x1.71=(36,64,192)f32 nn.Linear layers_dfe.5.residual_group.blocks.4.attn.qkv 1 1 x1.71 7893 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.71=(36,64,192)f32 #7893=(36,64,576)f32 pnnx.Expression pnnx_expr_12094 1 1 x1.71 7896 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.71=(36,64,192)f32 pnnx.Expression pnnx_expr_12089 0 1 7898 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_502 2 1 7893 7896 7897 $input=7893 $shape=7896 #7893=(36,64,576)f32 #7897=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_12088 0 1 22210 expr=0 pnnx.Expression pnnx_expr_12087 0 1 22211 expr=0 pnnx.Expression pnnx_expr_12086 0 1 22212 expr=0 pnnx.Expression pnnx_expr_12085 0 1 22213 expr=1 pnnx.Expression pnnx_expr_12084 0 1 22214 expr=0 pnnx.Expression pnnx_expr_12083 0 1 22215 expr=2 torch.permute torch.permute_2665 2 1 7897 7898 qkv0.71 $input=7897 $dims=7898 #7897=(36,64,3,6,32)f32 #qkv0.71=(3,36,6,64,32)f32 Tensor.select Tensor.select_752 3 1 qkv0.71 22210 22211 q.71 $input=qkv0.71 $dim=22210 $index=22211 #qkv0.71=(3,36,6,64,32)f32 #q.71=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12082 1 1 q.71 q0.71 expr=mul(@0,1.767767e-01) #q.71=(36,6,64,32)f32 #q0.71=(36,6,64,32)f32 Tensor.select Tensor.select_753 3 1 qkv0.71 22212 22213 k.71 $input=qkv0.71 $dim=22212 $index=22213 #qkv0.71=(3,36,6,64,32)f32 #k.71=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12080 0 1 7906 expr=[-1] Tensor.view Tensor.view_1357 2 1 relative_position_index.71 7906 7907 $input=relative_position_index.71 $shape=7906 #relative_position_index.71=(64,64)i64 #7907=(4096)i64 pnnx.Expression pnnx_expr_12079 1 1 7907 7908 expr=[@0] #7907=(4096)i64 pnnx.Expression pnnx_expr_12076 0 1 7910 expr=[64,64,-1] Tensor.index Tensor.index_360 2 1 relative_position_bias_table.71 7908 7909 $input=relative_position_bias_table.71 $expr=7908 #relative_position_bias_table.71=(225,6)f32 #7909=(4096,6)f32 pnnx.Expression pnnx_expr_12072 0 1 7912 expr=[2,0,1] Tensor.view Tensor.view_1358 2 1 7909 7910 relative_position_bias.71 $input=7909 $shape=7910 #7909=(4096,6)f32 #relative_position_bias.71=(64,64,6)f32 pnnx.Expression pnnx_expr_12071 0 1 22223 expr=0 torch.permute torch.permute_2666 2 1 relative_position_bias.71 7912 7913 $input=relative_position_bias.71 $dims=7912 #relative_position_bias.71=(64,64,6)f32 #7913=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_105 1 1 7913 relative_position_bias0.71 memory_format=torch.contiguous_format $input=7913 #7913=(6,64,64)f32 #relative_position_bias0.71=(6,64,64)f32 torch.transpose torch.transpose_3039 3 1 k.71 7872 7873 7904 $input=k.71 $dim0=7872 $dim1=7873 #k.71=(36,6,64,32)f32 #7904=(36,6,32,64)f32 torch.matmul torch.matmul_2272 2 1 q0.71 7904 attn.143 $input=q0.71 $other=7904 #q0.71=(36,6,64,32)f32 #7904=(36,6,32,64)f32 #attn.143=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3286 2 1 relative_position_bias0.71 22223 7915 $input=relative_position_bias0.71 $dim=22223 #relative_position_bias0.71=(6,64,64)f32 #7915=(1,6,64,64)f32 pnnx.Expression pnnx_expr_12069 2 1 attn.143 7915 input.161 expr=add(@0,@1) #attn.143=(36,6,64,64)f32 #7915=(1,6,64,64)f32 #input.161=(36,6,64,64)f32 nn.Softmax layers_dfe.5.residual_group.blocks.4.attn.softmax 1 1 input.161 7917 dim=-1 #input.161=(36,6,64,64)f32 #7917=(36,6,64,64)f32 nn.Dropout layers_dfe.5.residual_group.blocks.4.attn.attn_drop 1 1 7917 7918 #7917=(36,6,64,64)f32 #7918=(36,6,64,64)f32 Tensor.select Tensor.select_754 3 1 qkv0.71 22214 22215 v.71 $input=qkv0.71 $dim=22214 $index=22215 #qkv0.71=(3,36,6,64,32)f32 #v.71=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12068 0 1 22225 expr=1 pnnx.Expression pnnx_expr_12067 0 1 22226 expr=2 torch.matmul torch.matmul_2273 2 1 7918 v.71 7919 $input=7918 $other=v.71 #7918=(36,6,64,64)f32 #v.71=(36,6,64,32)f32 #7919=(36,6,64,32)f32 pnnx.Expression pnnx_expr_12066 1 1 x1.71 7921 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.71=(36,64,192)f32 torch.transpose torch.transpose_3040 3 1 7919 22225 22226 7920 $input=7919 $dim0=22225 $dim1=22226 #7919=(36,6,64,32)f32 #7920=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_503 2 1 7920 7921 input0.75 $input=7920 $shape=7921 #7920=(36,64,6,32)f32 #input0.75=(36,64,192)f32 nn.Linear layers_dfe.5.residual_group.blocks.4.attn.proj 1 1 input0.75 7923 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.75=(36,64,192)f32 #7923=(36,64,192)f32 nn.Dropout layers_dfe.5.residual_group.blocks.4.attn.proj_drop 1 1 7923 7924 #7923=(36,64,192)f32 #7924=(36,64,192)f32 pnnx.Expression pnnx_expr_12062 1 1 7809 7925 expr=[-1,8,8,int(size(@0,2))] #7809=(1,2304,192)f32 pnnx.Expression pnnx_expr_12049 1 1 154 7931 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_12042 0 1 7933 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1359 2 1 7924 7925 windows.71 $input=7924 $shape=7925 #7924=(36,64,192)f32 #windows.71=(36,8,8,192)f32 Tensor.view Tensor.view_1360 2 1 windows.71 7931 x2.71 $input=windows.71 $shape=7931 #windows.71=(36,8,8,192)f32 #x2.71=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_12039 1 1 154 7936 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2667 2 1 x2.71 7933 7934 $input=x2.71 $dims=7933 #x2.71=(1,6,6,8,8,192)f32 #7934=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_106 1 1 7934 7935 memory_format=torch.contiguous_format $input=7934 #7934=(1,6,8,6,8,192)f32 #7935=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_12036 2 1 7809 154 7940 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #7809=(1,2304,192)f32 #154=(1,192,48,48)f32 Tensor.view Tensor.view_1361 2 1 7935 7936 x3.71 $input=7935 $shape=7936 #7935=(1,6,8,6,8,192)f32 #x3.71=(1,48,48,192)f32 Tensor.view Tensor.view_1362 2 1 x3.71 7940 x4.71 $input=x3.71 $shape=7940 #x3.71=(1,48,48,192)f32 #x4.71=(1,2304,192)f32 pnnx.Expression pnnx_expr_12034 2 1 7809 x4.71 input.163 expr=add(@0,@1) #7809=(1,2304,192)f32 #x4.71=(1,2304,192)f32 #input.163=(1,2304,192)f32 nn.LayerNorm layers_dfe.5.residual_group.blocks.4.norm2 1 1 input.163 7944 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.163=(1,2304,192)f32 #7944=(1,2304,192)f32 nn.Linear layers_dfe.5.residual_group.blocks.4.mlp.fc1 1 1 7944 7949 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #7944=(1,2304,192)f32 #7949=(1,2304,384)f32 nn.GELU layers_dfe.5.residual_group.blocks.4.mlp.act 1 1 7949 7950 #7949=(1,2304,384)f32 #7950=(1,2304,384)f32 nn.Dropout layers_dfe.5.residual_group.blocks.4.mlp.drop 1 1 7950 7951 #7950=(1,2304,384)f32 #7951=(1,2304,384)f32 nn.Linear layers_dfe.5.residual_group.blocks.4.mlp.fc2 1 1 7951 7952 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #7951=(1,2304,384)f32 #7952=(1,2304,192)f32 nn.Dropout pnnx_unique_70 1 1 7952 7953 #7952=(1,2304,192)f32 #7953=(1,2304,192)f32 pnnx.Expression pnnx_expr_12032 2 1 input.163 7953 7955 expr=add(@0,@1) #input.163=(1,2304,192)f32 #7953=(1,2304,192)f32 #7955=(1,2304,192)f32 pnnx.Attribute layers_dfe.5.residual_group.blocks.5 0 1 attn_mask.37 @attn_mask=(36,64,64)f32 #attn_mask.37=(36,64,64)f32 nn.LayerNorm layers_dfe.5.residual_group.blocks.5.norm1 1 1 7955 7984 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #7955=(1,2304,192)f32 #7984=(1,2304,192)f32 pnnx.Expression pnnx_expr_12009 2 1 7955 154 7985 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #7955=(1,2304,192)f32 #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_12007 0 1 7987 expr=[-4,-4] pnnx.Expression pnnx_expr_12005 0 1 7988 expr=[1,2] Tensor.view Tensor.view_1363 2 1 7984 7985 x.73 $input=7984 $shape=7985 #7984=(1,2304,192)f32 #x.73=(1,48,48,192)f32 torch.roll torch.roll_2454 3 1 x.73 7987 7988 x0.73 $input=x.73 $shifts=7987 $dims=7988 #x.73=(1,48,48,192)f32 #x0.73=(1,48,48,192)f32 pnnx.Expression pnnx_expr_11983 1 1 x0.73 8005 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.73=(1,48,48,192)f32 pnnx.Expression pnnx_expr_11978 0 1 8007 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1364 2 1 x0.73 8005 x1.73 $input=x0.73 $shape=8005 #x0.73=(1,48,48,192)f32 #x1.73=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_11975 1 1 x0.73 8010 expr=[-1,8,8,int(size(@0,3))] #x0.73=(1,48,48,192)f32 torch.permute torch.permute_2668 2 1 x1.73 8007 8008 $input=x1.73 $dims=8007 #x1.73=(1,6,8,6,8,192)f32 #8008=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_107 1 1 8008 8009 memory_format=torch.contiguous_format $input=8008 #8008=(1,6,6,8,8,192)f32 #8009=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_11973 1 1 7955 8012 expr=[-1,64,int(size(@0,2))] #7955=(1,2304,192)f32 pnnx.Expression pnnx_expr_11963 0 1 8023 expr=-2 pnnx.Expression pnnx_expr_11962 0 1 8024 expr=-1 pnnx.Attribute layers_dfe.5.residual_group.blocks.5.attn 0 1 relative_position_bias_table.73 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.73=(225,6)f32 pnnx.Attribute pnnx_unique_71 0 1 relative_position_index.73 @relative_position_index=(64,64)i64 #relative_position_index.73=(64,64)i64 Tensor.view Tensor.view_1365 2 1 8009 8010 x_windows.73 $input=8009 $shape=8010 #8009=(1,6,6,8,8,192)f32 #x_windows.73=(36,8,8,192)f32 Tensor.view Tensor.view_1366 2 1 x_windows.73 8012 x2.73 $input=x_windows.73 $shape=8012 #x_windows.73=(36,8,8,192)f32 #x2.73=(36,64,192)f32 nn.Linear layers_dfe.5.residual_group.blocks.5.attn.qkv 1 1 x2.73 8048 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.73=(36,64,192)f32 #8048=(36,64,576)f32 pnnx.Expression pnnx_expr_11943 1 1 x2.73 8051 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.73=(36,64,192)f32 pnnx.Expression pnnx_expr_11938 0 1 8053 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_504 2 1 8048 8051 8052 $input=8048 $shape=8051 #8048=(36,64,576)f32 #8052=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_11937 0 1 22269 expr=0 pnnx.Expression pnnx_expr_11936 0 1 22270 expr=0 pnnx.Expression pnnx_expr_11935 0 1 22271 expr=0 pnnx.Expression pnnx_expr_11934 0 1 22272 expr=1 pnnx.Expression pnnx_expr_11933 0 1 22273 expr=0 pnnx.Expression pnnx_expr_11932 0 1 22274 expr=2 torch.permute torch.permute_2669 2 1 8052 8053 qkv0.73 $input=8052 $dims=8053 #8052=(36,64,3,6,32)f32 #qkv0.73=(3,36,6,64,32)f32 Tensor.select Tensor.select_755 3 1 qkv0.73 22269 22270 q.73 $input=qkv0.73 $dim=22269 $index=22270 #qkv0.73=(3,36,6,64,32)f32 #q.73=(36,6,64,32)f32 pnnx.Expression pnnx_expr_11931 1 1 q.73 q0.73 expr=mul(@0,1.767767e-01) #q.73=(36,6,64,32)f32 #q0.73=(36,6,64,32)f32 Tensor.select Tensor.select_756 3 1 qkv0.73 22271 22272 k.73 $input=qkv0.73 $dim=22271 $index=22272 #qkv0.73=(3,36,6,64,32)f32 #k.73=(36,6,64,32)f32 pnnx.Expression pnnx_expr_11929 0 1 8061 expr=[-1] Tensor.view Tensor.view_1367 2 1 relative_position_index.73 8061 8062 $input=relative_position_index.73 $shape=8061 #relative_position_index.73=(64,64)i64 #8062=(4096)i64 pnnx.Expression pnnx_expr_11928 1 1 8062 8063 expr=[@0] #8062=(4096)i64 pnnx.Expression pnnx_expr_11925 0 1 8065 expr=[64,64,-1] Tensor.index Tensor.index_361 2 1 relative_position_bias_table.73 8063 8064 $input=relative_position_bias_table.73 $expr=8063 #relative_position_bias_table.73=(225,6)f32 #8064=(4096,6)f32 pnnx.Expression pnnx_expr_11921 0 1 8067 expr=[2,0,1] Tensor.view Tensor.view_1368 2 1 8064 8065 relative_position_bias.73 $input=8064 $shape=8065 #8064=(4096,6)f32 #relative_position_bias.73=(64,64,6)f32 pnnx.Expression pnnx_expr_11920 0 1 22282 expr=0 torch.permute torch.permute_2670 2 1 relative_position_bias.73 8067 8068 $input=relative_position_bias.73 $dims=8067 #relative_position_bias.73=(64,64,6)f32 #8068=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_108 1 1 8068 relative_position_bias0.73 memory_format=torch.contiguous_format $input=8068 #8068=(6,64,64)f32 #relative_position_bias0.73=(6,64,64)f32 torch.transpose torch.transpose_3041 3 1 k.73 8023 8024 8059 $input=k.73 $dim0=8023 $dim1=8024 #k.73=(36,6,64,32)f32 #8059=(36,6,32,64)f32 torch.matmul torch.matmul_2274 2 1 q0.73 8059 attn.147 $input=q0.73 $other=8059 #q0.73=(36,6,64,32)f32 #8059=(36,6,32,64)f32 #attn.147=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3287 2 1 relative_position_bias0.73 22282 8070 $input=relative_position_bias0.73 $dim=22282 #relative_position_bias0.73=(6,64,64)f32 #8070=(1,6,64,64)f32 pnnx.Expression pnnx_expr_11918 2 1 attn.147 8070 attn0.37 expr=add(@0,@1) #attn.147=(36,6,64,64)f32 #8070=(1,6,64,64)f32 #attn0.37=(36,6,64,64)f32 pnnx.Expression pnnx_expr_11909 2 1 x2.73 attn_mask.37 8077 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.73=(36,64,192)f32 #attn_mask.37=(36,64,64)f32 pnnx.Expression pnnx_expr_11908 0 1 22287 expr=1 pnnx.Expression pnnx_expr_11907 0 1 22288 expr=0 Tensor.view Tensor.view_1369 2 1 attn0.37 8077 8078 $input=attn0.37 $shape=8077 #attn0.37=(36,6,64,64)f32 #8078=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3288 2 1 attn_mask.37 22287 8079 $input=attn_mask.37 $dim=22287 #attn_mask.37=(36,64,64)f32 #8079=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3289 2 1 8079 22288 8080 $input=8079 $dim=22288 #8079=(36,1,64,64)f32 #8080=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_11905 2 1 8078 8080 attn1.37 expr=add(@0,@1) #8078=(1,36,6,64,64)f32 #8080=(1,36,1,64,64)f32 #attn1.37=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_11902 1 1 x2.73 8082 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.73=(36,64,192)f32 Tensor.view Tensor.view_1370 2 1 attn1.37 8082 input.165 $input=attn1.37 $shape=8082 #attn1.37=(1,36,6,64,64)f32 #input.165=(36,6,64,64)f32 nn.Softmax layers_dfe.5.residual_group.blocks.5.attn.softmax 1 1 input.165 8084 dim=-1 #input.165=(36,6,64,64)f32 #8084=(36,6,64,64)f32 nn.Dropout layers_dfe.5.residual_group.blocks.5.attn.attn_drop 1 1 8084 8085 #8084=(36,6,64,64)f32 #8085=(36,6,64,64)f32 Tensor.select Tensor.select_757 3 1 qkv0.73 22273 22274 v.73 $input=qkv0.73 $dim=22273 $index=22274 #qkv0.73=(3,36,6,64,32)f32 #v.73=(36,6,64,32)f32 pnnx.Expression pnnx_expr_11901 0 1 22292 expr=1 pnnx.Expression pnnx_expr_11900 0 1 22293 expr=2 torch.matmul torch.matmul_2275 2 1 8085 v.73 8086 $input=8085 $other=v.73 #8085=(36,6,64,64)f32 #v.73=(36,6,64,32)f32 #8086=(36,6,64,32)f32 pnnx.Expression pnnx_expr_11899 1 1 x2.73 8088 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.73=(36,64,192)f32 torch.transpose torch.transpose_3042 3 1 8086 22292 22293 8087 $input=8086 $dim0=22292 $dim1=22293 #8086=(36,6,64,32)f32 #8087=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_505 2 1 8087 8088 input0.65 $input=8087 $shape=8088 #8087=(36,64,6,32)f32 #input0.65=(36,64,192)f32 nn.Linear layers_dfe.5.residual_group.blocks.5.attn.proj 1 1 input0.65 8090 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.65=(36,64,192)f32 #8090=(36,64,192)f32 nn.Dropout layers_dfe.5.residual_group.blocks.5.attn.proj_drop 1 1 8090 8091 #8090=(36,64,192)f32 #8091=(36,64,192)f32 pnnx.Expression pnnx_expr_11895 1 1 7955 8092 expr=[-1,8,8,int(size(@0,2))] #7955=(1,2304,192)f32 pnnx.Expression pnnx_expr_11882 1 1 154 8098 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #154=(1,192,48,48)f32 pnnx.Expression pnnx_expr_11875 0 1 8100 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1371 2 1 8091 8092 windows.73 $input=8091 $shape=8092 #8091=(36,64,192)f32 #windows.73=(36,8,8,192)f32 Tensor.view Tensor.view_1372 2 1 windows.73 8098 x3.73 $input=windows.73 $shape=8098 #windows.73=(36,8,8,192)f32 #x3.73=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_11872 1 1 154 8103 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #154=(1,192,48,48)f32 torch.permute torch.permute_2671 2 1 x3.73 8100 8101 $input=x3.73 $dims=8100 #x3.73=(1,6,6,8,8,192)f32 #8101=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_109 1 1 8101 8102 memory_format=torch.contiguous_format $input=8101 #8101=(1,6,8,6,8,192)f32 #8102=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_11869 0 1 8105 expr=[4,4] pnnx.Expression pnnx_expr_11866 0 1 8106 expr=[1,2] Tensor.view Tensor.view_1373 2 1 8102 8103 shifted_x.37 $input=8102 $shape=8103 #8102=(1,6,8,6,8,192)f32 #shifted_x.37=(1,48,48,192)f32 pnnx.Expression pnnx_expr_11863 2 1 7955 154 8110 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #7955=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.roll torch.roll_2455 3 1 shifted_x.37 8105 8106 x4.73 $input=shifted_x.37 $shifts=8105 $dims=8106 #shifted_x.37=(1,48,48,192)f32 #x4.73=(1,48,48,192)f32 Tensor.view Tensor.view_1374 2 1 x4.73 8110 x5.37 $input=x4.73 $shape=8110 #x4.73=(1,48,48,192)f32 #x5.37=(1,2304,192)f32 pnnx.Expression pnnx_expr_11861 2 1 7955 x5.37 input.167 expr=add(@0,@1) #7955=(1,2304,192)f32 #x5.37=(1,2304,192)f32 #input.167=(1,2304,192)f32 nn.LayerNorm layers_dfe.5.residual_group.blocks.5.norm2 1 1 input.167 8114 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.167=(1,2304,192)f32 #8114=(1,2304,192)f32 nn.Linear layers_dfe.5.residual_group.blocks.5.mlp.fc1 1 1 8114 8119 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #8114=(1,2304,192)f32 #8119=(1,2304,384)f32 nn.GELU layers_dfe.5.residual_group.blocks.5.mlp.act 1 1 8119 8120 #8119=(1,2304,384)f32 #8120=(1,2304,384)f32 nn.Dropout layers_dfe.5.residual_group.blocks.5.mlp.drop 1 1 8120 8121 #8120=(1,2304,384)f32 #8121=(1,2304,384)f32 nn.Linear layers_dfe.5.residual_group.blocks.5.mlp.fc2 1 1 8121 8122 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #8121=(1,2304,384)f32 #8122=(1,2304,192)f32 nn.Dropout pnnx_unique_72 1 1 8122 8123 #8122=(1,2304,192)f32 #8123=(1,2304,192)f32 pnnx.Expression pnnx_expr_11859 2 1 input.167 8123 8125 expr=add(@0,@1) #input.167=(1,2304,192)f32 #8123=(1,2304,192)f32 #8125=(1,2304,192)f32 pnnx.Expression pnnx_expr_11857 0 1 8127 expr=1 pnnx.Expression pnnx_expr_11856 0 1 8128 expr=2 pnnx.Expression pnnx_expr_11851 2 1 8125 154 8134 expr=[int(size(@0,0)),192,int(size(@1,2)),int(size(@1,3))] #8125=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.transpose torch.transpose_3043 3 1 8125 8127 8128 8133 $input=8125 $dim0=8127 $dim1=8128 #8125=(1,2304,192)f32 #8133=(1,192,2304)f32 Tensor.view Tensor.view_1375 2 1 8133 8134 input.141 $input=8133 $shape=8134 #8133=(1,192,2304)f32 #input.141=(1,192,48,48)f32 nn.Conv2d layers_dfe.5.conv 1 1 input.141 8136 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(192)f32 @weight=(192,192,3,3)f32 #input.141=(1,192,48,48)f32 #8136=(1,192,48,48)f32 pnnx.Expression pnnx_expr_11850 0 1 8137 expr=-1 pnnx.Expression pnnx_expr_11849 0 1 8138 expr=2 pnnx.Expression pnnx_expr_11848 0 1 8139 expr=1 pnnx.Expression pnnx_expr_11847 0 1 22320 expr=2 torch.flatten torch.flatten_2189 3 1 8136 8138 8137 8140 $input=8136 $start_dim=8138 $end_dim=8137 #8136=(1,192,48,48)f32 #8140=(1,192,2304)f32 torch.transpose torch.transpose_3044 3 1 8140 8139 22320 8141 $input=8140 $dim0=8139 $dim1=22320 #8140=(1,192,2304)f32 #8141=(1,2304,192)f32 pnnx.Expression pnnx_expr_11846 2 1 8141 7160 8142 expr=add(@0,@1) #8141=(1,2304,192)f32 #7160=(1,2304,192)f32 #8142=(1,2304,192)f32 nn.LayerNorm norm_dfe 1 1 8142 832 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #8142=(1,2304,192)f32 #832=(1,2304,192)f32 pnnx.Expression pnnx_expr_11844 0 1 8144 expr=1 pnnx.Expression pnnx_expr_11843 0 1 8145 expr=2 pnnx.Expression pnnx_expr_11838 2 1 832 154 8151 expr=[int(size(@0,0)),192,int(size(@1,2)),int(size(@1,3))] #832=(1,2304,192)f32 #154=(1,192,48,48)f32 torch.transpose torch.transpose_3045 3 1 832 8144 8145 8150 $input=832 $dim0=8144 $dim1=8145 #832=(1,2304,192)f32 #8150=(1,192,2304)f32 Tensor.view Tensor.view_1376 2 1 8150 8151 input.143 $input=8150 $shape=8151 #8150=(1,192,2304)f32 #input.143=(1,192,48,48)f32 nn.Conv2d conv_after_body_dfe 1 1 input.143 838 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(192)f32 @weight=(192,192,3,3)f32 #input.143=(1,192,48,48)f32 #838=(1,192,48,48)f32 pnnx.Expression pnnx_expr_11836 2 1 838 154 x_a.1 expr=add(@0,@1) #838=(1,192,48,48)f32 #154=(1,192,48,48)f32 #x_a.1=(1,192,48,48)f32 pnnx.Expression pnnx_expr_11671 0 1 8153 expr=-1 pnnx.Expression pnnx_expr_11670 0 1 8154 expr=2 pnnx.Expression pnnx_expr_11669 0 1 8155 expr=1 pnnx.Expression pnnx_expr_11668 0 1 22324 expr=2 torch.flatten torch.flatten_2190 3 1 157 8154 8153 8157 $input=157 $start_dim=8154 $end_dim=8153 #157=(1,192,48,48)f32 #8157=(1,192,2304)f32 torch.transpose torch.transpose_3046 3 1 8157 8155 22324 input.169 $input=8157 $dim0=8155 $dim1=22324 #8157=(1,192,2304)f32 #input.169=(1,2304,192)f32 nn.LayerNorm pnnx_unique_73 1 1 input.169 8159 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.169=(1,2304,192)f32 #8159=(1,2304,192)f32 nn.Dropout pnnx_unique_74 1 1 8159 1327 #8159=(1,2304,192)f32 #1327=(1,2304,192)f32 nn.LayerNorm pnnx_unique_75 1 1 1327 8203 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #1327=(1,2304,192)f32 #8203=(1,2304,192)f32 pnnx.Expression pnnx_expr_11645 2 1 1327 157 8204 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #1327=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1377 2 1 8203 8204 x.75 $input=8203 $shape=8204 #8203=(1,2304,192)f32 #x.75=(1,48,48,192)f32 pnnx.Expression pnnx_expr_11624 1 1 x.75 8221 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.75=(1,48,48,192)f32 pnnx.Expression pnnx_expr_11619 0 1 8223 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1378 2 1 x.75 8221 x5.39 $input=x.75 $shape=8221 #x.75=(1,48,48,192)f32 #x5.39=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_11616 1 1 x.75 8226 expr=[-1,8,8,int(size(@0,3))] #x.75=(1,48,48,192)f32 torch.permute torch.permute_2672 2 1 x5.39 8223 8224 $input=x5.39 $dims=8223 #x5.39=(1,6,8,6,8,192)f32 #8224=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_110 1 1 8224 8225 memory_format=torch.contiguous_format $input=8224 #8224=(1,6,6,8,8,192)f32 #8225=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_11614 1 1 1327 8228 expr=[-1,64,int(size(@0,2))] #1327=(1,2304,192)f32 pnnx.Expression pnnx_expr_11604 0 1 8239 expr=-2 pnnx.Expression pnnx_expr_11603 0 1 8240 expr=-1 pnnx.Attribute pnnx_unique_76 0 1 relative_position_bias_table.75 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.75=(225,6)f32 pnnx.Attribute pnnx_unique_77 0 1 relative_position_index.75 @relative_position_index=(64,64)i64 #relative_position_index.75=(64,64)i64 Tensor.view Tensor.view_1379 2 1 8225 8226 x_windows.75 $input=8225 $shape=8226 #8225=(1,6,6,8,8,192)f32 #x_windows.75=(36,8,8,192)f32 Tensor.view Tensor.view_1380 2 1 x_windows.75 8228 x6.3 $input=x_windows.75 $shape=8228 #x_windows.75=(36,8,8,192)f32 #x6.3=(36,64,192)f32 nn.Linear pnnx_unique_78 1 1 x6.3 8260 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x6.3=(36,64,192)f32 #8260=(36,64,576)f32 pnnx.Expression pnnx_expr_11588 1 1 x6.3 8263 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x6.3=(36,64,192)f32 pnnx.Expression pnnx_expr_11583 0 1 8265 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_506 2 1 8260 8263 8264 $input=8260 $shape=8263 #8260=(36,64,576)f32 #8264=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_11582 0 1 22342 expr=0 pnnx.Expression pnnx_expr_11581 0 1 22343 expr=0 pnnx.Expression pnnx_expr_11580 0 1 22344 expr=0 pnnx.Expression pnnx_expr_11579 0 1 22345 expr=1 pnnx.Expression pnnx_expr_11578 0 1 22346 expr=0 pnnx.Expression pnnx_expr_11577 0 1 22347 expr=2 torch.permute torch.permute_2673 2 1 8264 8265 qkv1.3 $input=8264 $dims=8265 #8264=(36,64,3,6,32)f32 #qkv1.3=(3,36,6,64,32)f32 Tensor.select Tensor.select_758 3 1 qkv1.3 22342 22343 q.75 $input=qkv1.3 $dim=22342 $index=22343 #qkv1.3=(3,36,6,64,32)f32 #q.75=(36,6,64,32)f32 pnnx.Expression pnnx_expr_11576 1 1 q.75 q1.3 expr=mul(@0,1.767767e-01) #q.75=(36,6,64,32)f32 #q1.3=(36,6,64,32)f32 Tensor.select Tensor.select_759 3 1 qkv1.3 22344 22345 k.75 $input=qkv1.3 $dim=22344 $index=22345 #qkv1.3=(3,36,6,64,32)f32 #k.75=(36,6,64,32)f32 pnnx.Expression pnnx_expr_11574 0 1 8273 expr=[-1] Tensor.view Tensor.view_1381 2 1 relative_position_index.75 8273 8274 $input=relative_position_index.75 $shape=8273 #relative_position_index.75=(64,64)i64 #8274=(4096)i64 pnnx.Expression pnnx_expr_11573 1 1 8274 8275 expr=[@0] #8274=(4096)i64 pnnx.Expression pnnx_expr_11570 0 1 8277 expr=[64,64,-1] Tensor.index Tensor.index_362 2 1 relative_position_bias_table.75 8275 8276 $input=relative_position_bias_table.75 $expr=8275 #relative_position_bias_table.75=(225,6)f32 #8276=(4096,6)f32 pnnx.Expression pnnx_expr_11566 0 1 8279 expr=[2,0,1] Tensor.view Tensor.view_1382 2 1 8276 8277 relative_position_bias.75 $input=8276 $shape=8277 #8276=(4096,6)f32 #relative_position_bias.75=(64,64,6)f32 pnnx.Expression pnnx_expr_11565 0 1 22355 expr=0 torch.permute torch.permute_2674 2 1 relative_position_bias.75 8279 8280 $input=relative_position_bias.75 $dims=8279 #relative_position_bias.75=(64,64,6)f32 #8280=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_111 1 1 8280 relative_position_bias1.3 memory_format=torch.contiguous_format $input=8280 #8280=(6,64,64)f32 #relative_position_bias1.3=(6,64,64)f32 torch.transpose torch.transpose_3047 3 1 k.75 8239 8240 8271 $input=k.75 $dim0=8239 $dim1=8240 #k.75=(36,6,64,32)f32 #8271=(36,6,32,64)f32 torch.matmul torch.matmul_2276 2 1 q1.3 8271 attn.151 $input=q1.3 $other=8271 #q1.3=(36,6,64,32)f32 #8271=(36,6,32,64)f32 #attn.151=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3290 2 1 relative_position_bias1.3 22355 8282 $input=relative_position_bias1.3 $dim=22355 #relative_position_bias1.3=(6,64,64)f32 #8282=(1,6,64,64)f32 pnnx.Expression pnnx_expr_11563 2 1 attn.151 8282 input.171 expr=add(@0,@1) #attn.151=(36,6,64,64)f32 #8282=(1,6,64,64)f32 #input.171=(36,6,64,64)f32 nn.Softmax pnnx_unique_79 1 1 input.171 8284 dim=-1 #input.171=(36,6,64,64)f32 #8284=(36,6,64,64)f32 nn.Dropout pnnx_unique_80 1 1 8284 8285 #8284=(36,6,64,64)f32 #8285=(36,6,64,64)f32 Tensor.select Tensor.select_760 3 1 qkv1.3 22346 22347 v.75 $input=qkv1.3 $dim=22346 $index=22347 #qkv1.3=(3,36,6,64,32)f32 #v.75=(36,6,64,32)f32 pnnx.Expression pnnx_expr_11562 0 1 22357 expr=1 pnnx.Expression pnnx_expr_11561 0 1 22358 expr=2 torch.matmul torch.matmul_2277 2 1 8285 v.75 8286 $input=8285 $other=v.75 #8285=(36,6,64,64)f32 #v.75=(36,6,64,32)f32 #8286=(36,6,64,32)f32 pnnx.Expression pnnx_expr_11560 1 1 x6.3 8288 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x6.3=(36,64,192)f32 torch.transpose torch.transpose_3048 3 1 8286 22357 22358 8287 $input=8286 $dim0=22357 $dim1=22358 #8286=(36,6,64,32)f32 #8287=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_507 2 1 8287 8288 input1.5 $input=8287 $shape=8288 #8287=(36,64,6,32)f32 #input1.5=(36,64,192)f32 nn.Linear pnnx_unique_81 1 1 input1.5 8290 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.5=(36,64,192)f32 #8290=(36,64,192)f32 nn.Dropout pnnx_unique_82 1 1 8290 8291 #8290=(36,64,192)f32 #8291=(36,64,192)f32 pnnx.Expression pnnx_expr_11556 1 1 1327 8292 expr=[-1,8,8,int(size(@0,2))] #1327=(1,2304,192)f32 pnnx.Expression pnnx_expr_11543 1 1 157 8298 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_11536 0 1 8300 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1383 2 1 8291 8292 windows.75 $input=8291 $shape=8292 #8291=(36,64,192)f32 #windows.75=(36,8,8,192)f32 Tensor.view Tensor.view_1384 2 1 windows.75 8298 x7.3 $input=windows.75 $shape=8298 #windows.75=(36,8,8,192)f32 #x7.3=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_11533 1 1 157 8303 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2675 2 1 x7.3 8300 8301 $input=x7.3 $dims=8300 #x7.3=(1,6,6,8,8,192)f32 #8301=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_112 1 1 8301 8302 memory_format=torch.contiguous_format $input=8301 #8301=(1,6,8,6,8,192)f32 #8302=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_11530 2 1 1327 157 8307 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #1327=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1385 2 1 8302 8303 x8.3 $input=8302 $shape=8303 #8302=(1,6,8,6,8,192)f32 #x8.3=(1,48,48,192)f32 Tensor.view Tensor.view_1386 2 1 x8.3 8307 x9.3 $input=x8.3 $shape=8307 #x8.3=(1,48,48,192)f32 #x9.3=(1,2304,192)f32 pnnx.Expression pnnx_expr_11528 2 1 1327 x9.3 input.173 expr=add(@0,@1) #1327=(1,2304,192)f32 #x9.3=(1,2304,192)f32 #input.173=(1,2304,192)f32 nn.LayerNorm pnnx_unique_83 1 1 input.173 8311 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.173=(1,2304,192)f32 #8311=(1,2304,192)f32 nn.Linear pnnx_unique_84 1 1 8311 8316 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #8311=(1,2304,192)f32 #8316=(1,2304,384)f32 nn.GELU pnnx_unique_85 1 1 8316 8317 #8316=(1,2304,384)f32 #8317=(1,2304,384)f32 nn.Dropout pnnx_unique_86 1 1 8317 8318 #8317=(1,2304,384)f32 #8318=(1,2304,384)f32 nn.Linear pnnx_unique_87 1 1 8318 8319 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #8318=(1,2304,384)f32 #8319=(1,2304,192)f32 nn.Dropout pnnx_unique_88 1 1 8319 8320 #8319=(1,2304,192)f32 #8320=(1,2304,192)f32 pnnx.Expression pnnx_expr_11526 2 1 input.173 8320 8322 expr=add(@0,@1) #input.173=(1,2304,192)f32 #8320=(1,2304,192)f32 #8322=(1,2304,192)f32 pnnx.Attribute pnnx_unique_89 0 1 attn_mask.39 @attn_mask=(36,64,64)f32 #attn_mask.39=(36,64,64)f32 nn.LayerNorm pnnx_unique_90 1 1 8322 8351 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #8322=(1,2304,192)f32 #8351=(1,2304,192)f32 pnnx.Expression pnnx_expr_11503 2 1 8322 157 8352 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #8322=(1,2304,192)f32 #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_11501 0 1 8354 expr=[-4,-4] pnnx.Expression pnnx_expr_11499 0 1 8355 expr=[1,2] Tensor.view Tensor.view_1387 2 1 8351 8352 x.77 $input=8351 $shape=8352 #8351=(1,2304,192)f32 #x.77=(1,48,48,192)f32 torch.roll torch.roll_2456 3 1 x.77 8354 8355 x6.5 $input=x.77 $shifts=8354 $dims=8355 #x.77=(1,48,48,192)f32 #x6.5=(1,48,48,192)f32 pnnx.Expression pnnx_expr_11477 1 1 x6.5 8372 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x6.5=(1,48,48,192)f32 pnnx.Expression pnnx_expr_11472 0 1 8374 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1388 2 1 x6.5 8372 x7.5 $input=x6.5 $shape=8372 #x6.5=(1,48,48,192)f32 #x7.5=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_11469 1 1 x6.5 8377 expr=[-1,8,8,int(size(@0,3))] #x6.5=(1,48,48,192)f32 torch.permute torch.permute_2676 2 1 x7.5 8374 8375 $input=x7.5 $dims=8374 #x7.5=(1,6,8,6,8,192)f32 #8375=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_113 1 1 8375 8376 memory_format=torch.contiguous_format $input=8375 #8375=(1,6,6,8,8,192)f32 #8376=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_11467 1 1 8322 8379 expr=[-1,64,int(size(@0,2))] #8322=(1,2304,192)f32 pnnx.Expression pnnx_expr_11457 0 1 8390 expr=-2 pnnx.Expression pnnx_expr_11456 0 1 8391 expr=-1 pnnx.Attribute pnnx_unique_91 0 1 relative_position_bias_table.77 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.77=(225,6)f32 pnnx.Attribute pnnx_unique_92 0 1 relative_position_index.77 @relative_position_index=(64,64)i64 #relative_position_index.77=(64,64)i64 Tensor.view Tensor.view_1389 2 1 8376 8377 x_windows.77 $input=8376 $shape=8377 #8376=(1,6,6,8,8,192)f32 #x_windows.77=(36,8,8,192)f32 Tensor.view Tensor.view_1390 2 1 x_windows.77 8379 x8.5 $input=x_windows.77 $shape=8379 #x_windows.77=(36,8,8,192)f32 #x8.5=(36,64,192)f32 nn.Linear pnnx_unique_93 1 1 x8.5 8415 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x8.5=(36,64,192)f32 #8415=(36,64,576)f32 pnnx.Expression pnnx_expr_11437 1 1 x8.5 8418 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x8.5=(36,64,192)f32 pnnx.Expression pnnx_expr_11432 0 1 8420 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_508 2 1 8415 8418 8419 $input=8415 $shape=8418 #8415=(36,64,576)f32 #8419=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_11431 0 1 22401 expr=0 pnnx.Expression pnnx_expr_11430 0 1 22402 expr=0 pnnx.Expression pnnx_expr_11429 0 1 22403 expr=0 pnnx.Expression pnnx_expr_11428 0 1 22404 expr=1 pnnx.Expression pnnx_expr_11427 0 1 22405 expr=0 pnnx.Expression pnnx_expr_11426 0 1 22406 expr=2 torch.permute torch.permute_2677 2 1 8419 8420 qkv1.5 $input=8419 $dims=8420 #8419=(36,64,3,6,32)f32 #qkv1.5=(3,36,6,64,32)f32 Tensor.select Tensor.select_761 3 1 qkv1.5 22401 22402 q.77 $input=qkv1.5 $dim=22401 $index=22402 #qkv1.5=(3,36,6,64,32)f32 #q.77=(36,6,64,32)f32 pnnx.Expression pnnx_expr_11425 1 1 q.77 q1.5 expr=mul(@0,1.767767e-01) #q.77=(36,6,64,32)f32 #q1.5=(36,6,64,32)f32 Tensor.select Tensor.select_762 3 1 qkv1.5 22403 22404 k.77 $input=qkv1.5 $dim=22403 $index=22404 #qkv1.5=(3,36,6,64,32)f32 #k.77=(36,6,64,32)f32 pnnx.Expression pnnx_expr_11423 0 1 8428 expr=[-1] Tensor.view Tensor.view_1391 2 1 relative_position_index.77 8428 8429 $input=relative_position_index.77 $shape=8428 #relative_position_index.77=(64,64)i64 #8429=(4096)i64 pnnx.Expression pnnx_expr_11422 1 1 8429 8430 expr=[@0] #8429=(4096)i64 pnnx.Expression pnnx_expr_11419 0 1 8432 expr=[64,64,-1] Tensor.index Tensor.index_363 2 1 relative_position_bias_table.77 8430 8431 $input=relative_position_bias_table.77 $expr=8430 #relative_position_bias_table.77=(225,6)f32 #8431=(4096,6)f32 pnnx.Expression pnnx_expr_11415 0 1 8434 expr=[2,0,1] Tensor.view Tensor.view_1392 2 1 8431 8432 relative_position_bias.77 $input=8431 $shape=8432 #8431=(4096,6)f32 #relative_position_bias.77=(64,64,6)f32 pnnx.Expression pnnx_expr_11414 0 1 22414 expr=0 torch.permute torch.permute_2678 2 1 relative_position_bias.77 8434 8435 $input=relative_position_bias.77 $dims=8434 #relative_position_bias.77=(64,64,6)f32 #8435=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_114 1 1 8435 relative_position_bias1.5 memory_format=torch.contiguous_format $input=8435 #8435=(6,64,64)f32 #relative_position_bias1.5=(6,64,64)f32 torch.transpose torch.transpose_3049 3 1 k.77 8390 8391 8426 $input=k.77 $dim0=8390 $dim1=8391 #k.77=(36,6,64,32)f32 #8426=(36,6,32,64)f32 torch.matmul torch.matmul_2278 2 1 q1.5 8426 attn.155 $input=q1.5 $other=8426 #q1.5=(36,6,64,32)f32 #8426=(36,6,32,64)f32 #attn.155=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3291 2 1 relative_position_bias1.5 22414 8437 $input=relative_position_bias1.5 $dim=22414 #relative_position_bias1.5=(6,64,64)f32 #8437=(1,6,64,64)f32 pnnx.Expression pnnx_expr_11412 2 1 attn.155 8437 attn2.3 expr=add(@0,@1) #attn.155=(36,6,64,64)f32 #8437=(1,6,64,64)f32 #attn2.3=(36,6,64,64)f32 pnnx.Expression pnnx_expr_11403 2 1 x8.5 attn_mask.39 8444 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x8.5=(36,64,192)f32 #attn_mask.39=(36,64,64)f32 pnnx.Expression pnnx_expr_11402 0 1 22419 expr=1 pnnx.Expression pnnx_expr_11401 0 1 22420 expr=0 Tensor.view Tensor.view_1393 2 1 attn2.3 8444 8445 $input=attn2.3 $shape=8444 #attn2.3=(36,6,64,64)f32 #8445=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3292 2 1 attn_mask.39 22419 8446 $input=attn_mask.39 $dim=22419 #attn_mask.39=(36,64,64)f32 #8446=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3293 2 1 8446 22420 8447 $input=8446 $dim=22420 #8446=(36,1,64,64)f32 #8447=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_11399 2 1 8445 8447 attn3.3 expr=add(@0,@1) #8445=(1,36,6,64,64)f32 #8447=(1,36,1,64,64)f32 #attn3.3=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_11396 1 1 x8.5 8449 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x8.5=(36,64,192)f32 Tensor.view Tensor.view_1394 2 1 attn3.3 8449 input.175 $input=attn3.3 $shape=8449 #attn3.3=(1,36,6,64,64)f32 #input.175=(36,6,64,64)f32 nn.Softmax pnnx_unique_94 1 1 input.175 8451 dim=-1 #input.175=(36,6,64,64)f32 #8451=(36,6,64,64)f32 nn.Dropout pnnx_unique_95 1 1 8451 8452 #8451=(36,6,64,64)f32 #8452=(36,6,64,64)f32 Tensor.select Tensor.select_763 3 1 qkv1.5 22405 22406 v.77 $input=qkv1.5 $dim=22405 $index=22406 #qkv1.5=(3,36,6,64,32)f32 #v.77=(36,6,64,32)f32 pnnx.Expression pnnx_expr_11395 0 1 22424 expr=1 pnnx.Expression pnnx_expr_11394 0 1 22425 expr=2 torch.matmul torch.matmul_2279 2 1 8452 v.77 8453 $input=8452 $other=v.77 #8452=(36,6,64,64)f32 #v.77=(36,6,64,32)f32 #8453=(36,6,64,32)f32 pnnx.Expression pnnx_expr_11393 1 1 x8.5 8455 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x8.5=(36,64,192)f32 torch.transpose torch.transpose_3050 3 1 8453 22424 22425 8454 $input=8453 $dim0=22424 $dim1=22425 #8453=(36,6,64,32)f32 #8454=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_509 2 1 8454 8455 input1.7 $input=8454 $shape=8455 #8454=(36,64,6,32)f32 #input1.7=(36,64,192)f32 nn.Linear pnnx_unique_96 1 1 input1.7 8457 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.7=(36,64,192)f32 #8457=(36,64,192)f32 nn.Dropout pnnx_unique_97 1 1 8457 8458 #8457=(36,64,192)f32 #8458=(36,64,192)f32 pnnx.Expression pnnx_expr_11389 1 1 8322 8459 expr=[-1,8,8,int(size(@0,2))] #8322=(1,2304,192)f32 pnnx.Expression pnnx_expr_11376 1 1 157 8465 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_11369 0 1 8467 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1395 2 1 8458 8459 windows.77 $input=8458 $shape=8459 #8458=(36,64,192)f32 #windows.77=(36,8,8,192)f32 Tensor.view Tensor.view_1396 2 1 windows.77 8465 x9.5 $input=windows.77 $shape=8465 #windows.77=(36,8,8,192)f32 #x9.5=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_11366 1 1 157 8470 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2679 2 1 x9.5 8467 8468 $input=x9.5 $dims=8467 #x9.5=(1,6,6,8,8,192)f32 #8468=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_115 1 1 8468 8469 memory_format=torch.contiguous_format $input=8468 #8468=(1,6,8,6,8,192)f32 #8469=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_11363 0 1 8472 expr=[4,4] pnnx.Expression pnnx_expr_11360 0 1 8473 expr=[1,2] Tensor.view Tensor.view_1397 2 1 8469 8470 shifted_x.39 $input=8469 $shape=8470 #8469=(1,6,8,6,8,192)f32 #shifted_x.39=(1,48,48,192)f32 pnnx.Expression pnnx_expr_11357 2 1 8322 157 8477 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #8322=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.roll torch.roll_2457 3 1 shifted_x.39 8472 8473 x10.3 $input=shifted_x.39 $shifts=8472 $dims=8473 #shifted_x.39=(1,48,48,192)f32 #x10.3=(1,48,48,192)f32 Tensor.view Tensor.view_1398 2 1 x10.3 8477 x11.3 $input=x10.3 $shape=8477 #x10.3=(1,48,48,192)f32 #x11.3=(1,2304,192)f32 pnnx.Expression pnnx_expr_11355 2 1 8322 x11.3 input.177 expr=add(@0,@1) #8322=(1,2304,192)f32 #x11.3=(1,2304,192)f32 #input.177=(1,2304,192)f32 nn.LayerNorm pnnx_unique_98 1 1 input.177 8481 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.177=(1,2304,192)f32 #8481=(1,2304,192)f32 nn.Linear pnnx_unique_99 1 1 8481 8486 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #8481=(1,2304,192)f32 #8486=(1,2304,384)f32 nn.GELU pnnx_unique_100 1 1 8486 8487 #8486=(1,2304,384)f32 #8487=(1,2304,384)f32 nn.Dropout pnnx_unique_101 1 1 8487 8488 #8487=(1,2304,384)f32 #8488=(1,2304,384)f32 nn.Linear pnnx_unique_102 1 1 8488 8489 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #8488=(1,2304,384)f32 #8489=(1,2304,192)f32 nn.Dropout pnnx_unique_103 1 1 8489 8490 #8489=(1,2304,192)f32 #8490=(1,2304,192)f32 pnnx.Expression pnnx_expr_11353 2 1 input.177 8490 8492 expr=add(@0,@1) #input.177=(1,2304,192)f32 #8490=(1,2304,192)f32 #8492=(1,2304,192)f32 nn.LayerNorm pnnx_unique_104 1 1 8492 8519 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #8492=(1,2304,192)f32 #8519=(1,2304,192)f32 pnnx.Expression pnnx_expr_11331 2 1 8492 157 8520 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #8492=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1399 2 1 8519 8520 x.79 $input=8519 $shape=8520 #8519=(1,2304,192)f32 #x.79=(1,48,48,192)f32 pnnx.Expression pnnx_expr_11310 1 1 x.79 8537 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.79=(1,48,48,192)f32 pnnx.Expression pnnx_expr_11305 0 1 8539 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1400 2 1 x.79 8537 x5.41 $input=x.79 $shape=8537 #x.79=(1,48,48,192)f32 #x5.41=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_11302 1 1 x.79 8542 expr=[-1,8,8,int(size(@0,3))] #x.79=(1,48,48,192)f32 torch.permute torch.permute_2680 2 1 x5.41 8539 8540 $input=x5.41 $dims=8539 #x5.41=(1,6,8,6,8,192)f32 #8540=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_116 1 1 8540 8541 memory_format=torch.contiguous_format $input=8540 #8540=(1,6,6,8,8,192)f32 #8541=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_11300 1 1 8492 8544 expr=[-1,64,int(size(@0,2))] #8492=(1,2304,192)f32 pnnx.Expression pnnx_expr_11290 0 1 8555 expr=-2 pnnx.Expression pnnx_expr_11289 0 1 8556 expr=-1 pnnx.Attribute pnnx_unique_105 0 1 relative_position_bias_table.79 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.79=(225,6)f32 pnnx.Attribute pnnx_unique_106 0 1 relative_position_index.79 @relative_position_index=(64,64)i64 #relative_position_index.79=(64,64)i64 Tensor.view Tensor.view_1401 2 1 8541 8542 x_windows.79 $input=8541 $shape=8542 #8541=(1,6,6,8,8,192)f32 #x_windows.79=(36,8,8,192)f32 Tensor.view Tensor.view_1402 2 1 x_windows.79 8544 x6.7 $input=x_windows.79 $shape=8544 #x_windows.79=(36,8,8,192)f32 #x6.7=(36,64,192)f32 nn.Linear pnnx_unique_107 1 1 x6.7 8576 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x6.7=(36,64,192)f32 #8576=(36,64,576)f32 pnnx.Expression pnnx_expr_11274 1 1 x6.7 8579 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x6.7=(36,64,192)f32 pnnx.Expression pnnx_expr_11269 0 1 8581 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_510 2 1 8576 8579 8580 $input=8576 $shape=8579 #8576=(36,64,576)f32 #8580=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_11268 0 1 22469 expr=0 pnnx.Expression pnnx_expr_11267 0 1 22470 expr=0 pnnx.Expression pnnx_expr_11266 0 1 22471 expr=0 pnnx.Expression pnnx_expr_11265 0 1 22472 expr=1 pnnx.Expression pnnx_expr_11264 0 1 22473 expr=0 pnnx.Expression pnnx_expr_11263 0 1 22474 expr=2 torch.permute torch.permute_2681 2 1 8580 8581 qkv1.7 $input=8580 $dims=8581 #8580=(36,64,3,6,32)f32 #qkv1.7=(3,36,6,64,32)f32 Tensor.select Tensor.select_764 3 1 qkv1.7 22469 22470 q.79 $input=qkv1.7 $dim=22469 $index=22470 #qkv1.7=(3,36,6,64,32)f32 #q.79=(36,6,64,32)f32 pnnx.Expression pnnx_expr_11262 1 1 q.79 q1.7 expr=mul(@0,1.767767e-01) #q.79=(36,6,64,32)f32 #q1.7=(36,6,64,32)f32 Tensor.select Tensor.select_765 3 1 qkv1.7 22471 22472 k.79 $input=qkv1.7 $dim=22471 $index=22472 #qkv1.7=(3,36,6,64,32)f32 #k.79=(36,6,64,32)f32 pnnx.Expression pnnx_expr_11260 0 1 8589 expr=[-1] Tensor.view Tensor.view_1403 2 1 relative_position_index.79 8589 8590 $input=relative_position_index.79 $shape=8589 #relative_position_index.79=(64,64)i64 #8590=(4096)i64 pnnx.Expression pnnx_expr_11259 1 1 8590 8591 expr=[@0] #8590=(4096)i64 pnnx.Expression pnnx_expr_11256 0 1 8593 expr=[64,64,-1] Tensor.index Tensor.index_364 2 1 relative_position_bias_table.79 8591 8592 $input=relative_position_bias_table.79 $expr=8591 #relative_position_bias_table.79=(225,6)f32 #8592=(4096,6)f32 pnnx.Expression pnnx_expr_11252 0 1 8595 expr=[2,0,1] Tensor.view Tensor.view_1404 2 1 8592 8593 relative_position_bias.79 $input=8592 $shape=8593 #8592=(4096,6)f32 #relative_position_bias.79=(64,64,6)f32 pnnx.Expression pnnx_expr_11251 0 1 22482 expr=0 torch.permute torch.permute_2682 2 1 relative_position_bias.79 8595 8596 $input=relative_position_bias.79 $dims=8595 #relative_position_bias.79=(64,64,6)f32 #8596=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_117 1 1 8596 relative_position_bias1.7 memory_format=torch.contiguous_format $input=8596 #8596=(6,64,64)f32 #relative_position_bias1.7=(6,64,64)f32 torch.transpose torch.transpose_3051 3 1 k.79 8555 8556 8587 $input=k.79 $dim0=8555 $dim1=8556 #k.79=(36,6,64,32)f32 #8587=(36,6,32,64)f32 torch.matmul torch.matmul_2280 2 1 q1.7 8587 attn.159 $input=q1.7 $other=8587 #q1.7=(36,6,64,32)f32 #8587=(36,6,32,64)f32 #attn.159=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3294 2 1 relative_position_bias1.7 22482 8598 $input=relative_position_bias1.7 $dim=22482 #relative_position_bias1.7=(6,64,64)f32 #8598=(1,6,64,64)f32 pnnx.Expression pnnx_expr_11249 2 1 attn.159 8598 input.179 expr=add(@0,@1) #attn.159=(36,6,64,64)f32 #8598=(1,6,64,64)f32 #input.179=(36,6,64,64)f32 nn.Softmax pnnx_unique_108 1 1 input.179 8600 dim=-1 #input.179=(36,6,64,64)f32 #8600=(36,6,64,64)f32 nn.Dropout pnnx_unique_109 1 1 8600 8601 #8600=(36,6,64,64)f32 #8601=(36,6,64,64)f32 Tensor.select Tensor.select_766 3 1 qkv1.7 22473 22474 v.79 $input=qkv1.7 $dim=22473 $index=22474 #qkv1.7=(3,36,6,64,32)f32 #v.79=(36,6,64,32)f32 pnnx.Expression pnnx_expr_11248 0 1 22484 expr=1 pnnx.Expression pnnx_expr_11247 0 1 22485 expr=2 torch.matmul torch.matmul_2281 2 1 8601 v.79 8602 $input=8601 $other=v.79 #8601=(36,6,64,64)f32 #v.79=(36,6,64,32)f32 #8602=(36,6,64,32)f32 pnnx.Expression pnnx_expr_11246 1 1 x6.7 8604 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x6.7=(36,64,192)f32 torch.transpose torch.transpose_3052 3 1 8602 22484 22485 8603 $input=8602 $dim0=22484 $dim1=22485 #8602=(36,6,64,32)f32 #8603=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_511 2 1 8603 8604 input1.9 $input=8603 $shape=8604 #8603=(36,64,6,32)f32 #input1.9=(36,64,192)f32 nn.Linear pnnx_unique_110 1 1 input1.9 8606 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.9=(36,64,192)f32 #8606=(36,64,192)f32 nn.Dropout pnnx_unique_111 1 1 8606 8607 #8606=(36,64,192)f32 #8607=(36,64,192)f32 pnnx.Expression pnnx_expr_11242 1 1 8492 8608 expr=[-1,8,8,int(size(@0,2))] #8492=(1,2304,192)f32 pnnx.Expression pnnx_expr_11229 1 1 157 8614 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_11222 0 1 8616 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1405 2 1 8607 8608 windows.79 $input=8607 $shape=8608 #8607=(36,64,192)f32 #windows.79=(36,8,8,192)f32 Tensor.view Tensor.view_1406 2 1 windows.79 8614 x7.7 $input=windows.79 $shape=8614 #windows.79=(36,8,8,192)f32 #x7.7=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_11219 1 1 157 8619 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2683 2 1 x7.7 8616 8617 $input=x7.7 $dims=8616 #x7.7=(1,6,6,8,8,192)f32 #8617=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_118 1 1 8617 8618 memory_format=torch.contiguous_format $input=8617 #8617=(1,6,8,6,8,192)f32 #8618=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_11216 2 1 8492 157 8623 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #8492=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1407 2 1 8618 8619 x8.7 $input=8618 $shape=8619 #8618=(1,6,8,6,8,192)f32 #x8.7=(1,48,48,192)f32 Tensor.view Tensor.view_1408 2 1 x8.7 8623 x9.7 $input=x8.7 $shape=8623 #x8.7=(1,48,48,192)f32 #x9.7=(1,2304,192)f32 pnnx.Expression pnnx_expr_11214 2 1 8492 x9.7 input.181 expr=add(@0,@1) #8492=(1,2304,192)f32 #x9.7=(1,2304,192)f32 #input.181=(1,2304,192)f32 nn.LayerNorm pnnx_unique_112 1 1 input.181 8627 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.181=(1,2304,192)f32 #8627=(1,2304,192)f32 nn.Linear pnnx_unique_113 1 1 8627 8632 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #8627=(1,2304,192)f32 #8632=(1,2304,384)f32 nn.GELU pnnx_unique_114 1 1 8632 8633 #8632=(1,2304,384)f32 #8633=(1,2304,384)f32 nn.Dropout pnnx_unique_115 1 1 8633 8634 #8633=(1,2304,384)f32 #8634=(1,2304,384)f32 nn.Linear pnnx_unique_116 1 1 8634 8635 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #8634=(1,2304,384)f32 #8635=(1,2304,192)f32 nn.Dropout pnnx_unique_117 1 1 8635 8636 #8635=(1,2304,192)f32 #8636=(1,2304,192)f32 pnnx.Expression pnnx_expr_11212 2 1 input.181 8636 8638 expr=add(@0,@1) #input.181=(1,2304,192)f32 #8636=(1,2304,192)f32 #8638=(1,2304,192)f32 pnnx.Attribute pnnx_unique_118 0 1 attn_mask.41 @attn_mask=(36,64,64)f32 #attn_mask.41=(36,64,64)f32 nn.LayerNorm pnnx_unique_119 1 1 8638 8667 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #8638=(1,2304,192)f32 #8667=(1,2304,192)f32 pnnx.Expression pnnx_expr_11189 2 1 8638 157 8668 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #8638=(1,2304,192)f32 #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_11187 0 1 8670 expr=[-4,-4] pnnx.Expression pnnx_expr_11185 0 1 8671 expr=[1,2] Tensor.view Tensor.view_1409 2 1 8667 8668 x.81 $input=8667 $shape=8668 #8667=(1,2304,192)f32 #x.81=(1,48,48,192)f32 torch.roll torch.roll_2458 3 1 x.81 8670 8671 x6.9 $input=x.81 $shifts=8670 $dims=8671 #x.81=(1,48,48,192)f32 #x6.9=(1,48,48,192)f32 pnnx.Expression pnnx_expr_11163 1 1 x6.9 8688 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x6.9=(1,48,48,192)f32 pnnx.Expression pnnx_expr_11158 0 1 8690 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1410 2 1 x6.9 8688 x7.9 $input=x6.9 $shape=8688 #x6.9=(1,48,48,192)f32 #x7.9=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_11155 1 1 x6.9 8693 expr=[-1,8,8,int(size(@0,3))] #x6.9=(1,48,48,192)f32 torch.permute torch.permute_2684 2 1 x7.9 8690 8691 $input=x7.9 $dims=8690 #x7.9=(1,6,8,6,8,192)f32 #8691=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_119 1 1 8691 8692 memory_format=torch.contiguous_format $input=8691 #8691=(1,6,6,8,8,192)f32 #8692=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_11153 1 1 8638 8695 expr=[-1,64,int(size(@0,2))] #8638=(1,2304,192)f32 pnnx.Expression pnnx_expr_11143 0 1 8706 expr=-2 pnnx.Expression pnnx_expr_11142 0 1 8707 expr=-1 pnnx.Attribute pnnx_unique_120 0 1 relative_position_bias_table.81 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.81=(225,6)f32 pnnx.Attribute pnnx_unique_121 0 1 relative_position_index.81 @relative_position_index=(64,64)i64 #relative_position_index.81=(64,64)i64 Tensor.view Tensor.view_1411 2 1 8692 8693 x_windows.81 $input=8692 $shape=8693 #8692=(1,6,6,8,8,192)f32 #x_windows.81=(36,8,8,192)f32 Tensor.view Tensor.view_1412 2 1 x_windows.81 8695 x8.9 $input=x_windows.81 $shape=8695 #x_windows.81=(36,8,8,192)f32 #x8.9=(36,64,192)f32 nn.Linear pnnx_unique_122 1 1 x8.9 8731 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x8.9=(36,64,192)f32 #8731=(36,64,576)f32 pnnx.Expression pnnx_expr_11123 1 1 x8.9 8734 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x8.9=(36,64,192)f32 pnnx.Expression pnnx_expr_11118 0 1 8736 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_512 2 1 8731 8734 8735 $input=8731 $shape=8734 #8731=(36,64,576)f32 #8735=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_11117 0 1 22528 expr=0 pnnx.Expression pnnx_expr_11116 0 1 22529 expr=0 pnnx.Expression pnnx_expr_11115 0 1 22530 expr=0 pnnx.Expression pnnx_expr_11114 0 1 22531 expr=1 pnnx.Expression pnnx_expr_11113 0 1 22532 expr=0 pnnx.Expression pnnx_expr_11112 0 1 22533 expr=2 torch.permute torch.permute_2685 2 1 8735 8736 qkv1.9 $input=8735 $dims=8736 #8735=(36,64,3,6,32)f32 #qkv1.9=(3,36,6,64,32)f32 Tensor.select Tensor.select_767 3 1 qkv1.9 22528 22529 q.81 $input=qkv1.9 $dim=22528 $index=22529 #qkv1.9=(3,36,6,64,32)f32 #q.81=(36,6,64,32)f32 pnnx.Expression pnnx_expr_11111 1 1 q.81 q1.9 expr=mul(@0,1.767767e-01) #q.81=(36,6,64,32)f32 #q1.9=(36,6,64,32)f32 Tensor.select Tensor.select_768 3 1 qkv1.9 22530 22531 k.81 $input=qkv1.9 $dim=22530 $index=22531 #qkv1.9=(3,36,6,64,32)f32 #k.81=(36,6,64,32)f32 pnnx.Expression pnnx_expr_11109 0 1 8744 expr=[-1] Tensor.view Tensor.view_1413 2 1 relative_position_index.81 8744 8745 $input=relative_position_index.81 $shape=8744 #relative_position_index.81=(64,64)i64 #8745=(4096)i64 pnnx.Expression pnnx_expr_11108 1 1 8745 8746 expr=[@0] #8745=(4096)i64 pnnx.Expression pnnx_expr_11105 0 1 8748 expr=[64,64,-1] Tensor.index Tensor.index_365 2 1 relative_position_bias_table.81 8746 8747 $input=relative_position_bias_table.81 $expr=8746 #relative_position_bias_table.81=(225,6)f32 #8747=(4096,6)f32 pnnx.Expression pnnx_expr_11101 0 1 8750 expr=[2,0,1] Tensor.view Tensor.view_1414 2 1 8747 8748 relative_position_bias.81 $input=8747 $shape=8748 #8747=(4096,6)f32 #relative_position_bias.81=(64,64,6)f32 pnnx.Expression pnnx_expr_11100 0 1 22541 expr=0 torch.permute torch.permute_2686 2 1 relative_position_bias.81 8750 8751 $input=relative_position_bias.81 $dims=8750 #relative_position_bias.81=(64,64,6)f32 #8751=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_120 1 1 8751 relative_position_bias1.9 memory_format=torch.contiguous_format $input=8751 #8751=(6,64,64)f32 #relative_position_bias1.9=(6,64,64)f32 torch.transpose torch.transpose_3053 3 1 k.81 8706 8707 8742 $input=k.81 $dim0=8706 $dim1=8707 #k.81=(36,6,64,32)f32 #8742=(36,6,32,64)f32 torch.matmul torch.matmul_2282 2 1 q1.9 8742 attn.163 $input=q1.9 $other=8742 #q1.9=(36,6,64,32)f32 #8742=(36,6,32,64)f32 #attn.163=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3295 2 1 relative_position_bias1.9 22541 8753 $input=relative_position_bias1.9 $dim=22541 #relative_position_bias1.9=(6,64,64)f32 #8753=(1,6,64,64)f32 pnnx.Expression pnnx_expr_11098 2 1 attn.163 8753 attn2.5 expr=add(@0,@1) #attn.163=(36,6,64,64)f32 #8753=(1,6,64,64)f32 #attn2.5=(36,6,64,64)f32 pnnx.Expression pnnx_expr_11089 2 1 x8.9 attn_mask.41 8760 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x8.9=(36,64,192)f32 #attn_mask.41=(36,64,64)f32 pnnx.Expression pnnx_expr_11088 0 1 22546 expr=1 pnnx.Expression pnnx_expr_11087 0 1 22547 expr=0 Tensor.view Tensor.view_1415 2 1 attn2.5 8760 8761 $input=attn2.5 $shape=8760 #attn2.5=(36,6,64,64)f32 #8761=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3296 2 1 attn_mask.41 22546 8762 $input=attn_mask.41 $dim=22546 #attn_mask.41=(36,64,64)f32 #8762=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3297 2 1 8762 22547 8763 $input=8762 $dim=22547 #8762=(36,1,64,64)f32 #8763=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_11085 2 1 8761 8763 attn3.5 expr=add(@0,@1) #8761=(1,36,6,64,64)f32 #8763=(1,36,1,64,64)f32 #attn3.5=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_11082 1 1 x8.9 8765 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x8.9=(36,64,192)f32 Tensor.view Tensor.view_1416 2 1 attn3.5 8765 input.183 $input=attn3.5 $shape=8765 #attn3.5=(1,36,6,64,64)f32 #input.183=(36,6,64,64)f32 nn.Softmax pnnx_unique_123 1 1 input.183 8767 dim=-1 #input.183=(36,6,64,64)f32 #8767=(36,6,64,64)f32 nn.Dropout pnnx_unique_124 1 1 8767 8768 #8767=(36,6,64,64)f32 #8768=(36,6,64,64)f32 Tensor.select Tensor.select_769 3 1 qkv1.9 22532 22533 v.81 $input=qkv1.9 $dim=22532 $index=22533 #qkv1.9=(3,36,6,64,32)f32 #v.81=(36,6,64,32)f32 pnnx.Expression pnnx_expr_11081 0 1 22551 expr=1 pnnx.Expression pnnx_expr_11080 0 1 22552 expr=2 torch.matmul torch.matmul_2283 2 1 8768 v.81 8769 $input=8768 $other=v.81 #8768=(36,6,64,64)f32 #v.81=(36,6,64,32)f32 #8769=(36,6,64,32)f32 pnnx.Expression pnnx_expr_11079 1 1 x8.9 8771 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x8.9=(36,64,192)f32 torch.transpose torch.transpose_3054 3 1 8769 22551 22552 8770 $input=8769 $dim0=22551 $dim1=22552 #8769=(36,6,64,32)f32 #8770=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_513 2 1 8770 8771 input1.11 $input=8770 $shape=8771 #8770=(36,64,6,32)f32 #input1.11=(36,64,192)f32 nn.Linear pnnx_unique_125 1 1 input1.11 8773 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.11=(36,64,192)f32 #8773=(36,64,192)f32 nn.Dropout pnnx_unique_126 1 1 8773 8774 #8773=(36,64,192)f32 #8774=(36,64,192)f32 pnnx.Expression pnnx_expr_11075 1 1 8638 8775 expr=[-1,8,8,int(size(@0,2))] #8638=(1,2304,192)f32 pnnx.Expression pnnx_expr_11062 1 1 157 8781 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_11055 0 1 8783 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1417 2 1 8774 8775 windows.81 $input=8774 $shape=8775 #8774=(36,64,192)f32 #windows.81=(36,8,8,192)f32 Tensor.view Tensor.view_1418 2 1 windows.81 8781 x9.9 $input=windows.81 $shape=8781 #windows.81=(36,8,8,192)f32 #x9.9=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_11052 1 1 157 8786 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2687 2 1 x9.9 8783 8784 $input=x9.9 $dims=8783 #x9.9=(1,6,6,8,8,192)f32 #8784=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_121 1 1 8784 8785 memory_format=torch.contiguous_format $input=8784 #8784=(1,6,8,6,8,192)f32 #8785=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_11049 0 1 8788 expr=[4,4] pnnx.Expression pnnx_expr_11046 0 1 8789 expr=[1,2] Tensor.view Tensor.view_1419 2 1 8785 8786 shifted_x.41 $input=8785 $shape=8786 #8785=(1,6,8,6,8,192)f32 #shifted_x.41=(1,48,48,192)f32 pnnx.Expression pnnx_expr_11043 2 1 8638 157 8793 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #8638=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.roll torch.roll_2459 3 1 shifted_x.41 8788 8789 x10.5 $input=shifted_x.41 $shifts=8788 $dims=8789 #shifted_x.41=(1,48,48,192)f32 #x10.5=(1,48,48,192)f32 Tensor.view Tensor.view_1420 2 1 x10.5 8793 x11.5 $input=x10.5 $shape=8793 #x10.5=(1,48,48,192)f32 #x11.5=(1,2304,192)f32 pnnx.Expression pnnx_expr_11041 2 1 8638 x11.5 input.185 expr=add(@0,@1) #8638=(1,2304,192)f32 #x11.5=(1,2304,192)f32 #input.185=(1,2304,192)f32 nn.LayerNorm pnnx_unique_127 1 1 input.185 8797 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.185=(1,2304,192)f32 #8797=(1,2304,192)f32 nn.Linear pnnx_unique_128 1 1 8797 8802 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #8797=(1,2304,192)f32 #8802=(1,2304,384)f32 nn.GELU pnnx_unique_129 1 1 8802 8803 #8802=(1,2304,384)f32 #8803=(1,2304,384)f32 nn.Dropout pnnx_unique_130 1 1 8803 8804 #8803=(1,2304,384)f32 #8804=(1,2304,384)f32 nn.Linear pnnx_unique_131 1 1 8804 8805 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #8804=(1,2304,384)f32 #8805=(1,2304,192)f32 nn.Dropout pnnx_unique_132 1 1 8805 8806 #8805=(1,2304,192)f32 #8806=(1,2304,192)f32 pnnx.Expression pnnx_expr_11039 2 1 input.185 8806 8808 expr=add(@0,@1) #input.185=(1,2304,192)f32 #8806=(1,2304,192)f32 #8808=(1,2304,192)f32 nn.LayerNorm pnnx_unique_133 1 1 8808 8835 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #8808=(1,2304,192)f32 #8835=(1,2304,192)f32 pnnx.Expression pnnx_expr_11017 2 1 8808 157 8836 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #8808=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1421 2 1 8835 8836 x.83 $input=8835 $shape=8836 #8835=(1,2304,192)f32 #x.83=(1,48,48,192)f32 pnnx.Expression pnnx_expr_10996 1 1 x.83 8853 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.83=(1,48,48,192)f32 pnnx.Expression pnnx_expr_10991 0 1 8855 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1422 2 1 x.83 8853 x5.43 $input=x.83 $shape=8853 #x.83=(1,48,48,192)f32 #x5.43=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_10988 1 1 x.83 8858 expr=[-1,8,8,int(size(@0,3))] #x.83=(1,48,48,192)f32 torch.permute torch.permute_2688 2 1 x5.43 8855 8856 $input=x5.43 $dims=8855 #x5.43=(1,6,8,6,8,192)f32 #8856=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_122 1 1 8856 8857 memory_format=torch.contiguous_format $input=8856 #8856=(1,6,6,8,8,192)f32 #8857=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_10986 1 1 8808 8860 expr=[-1,64,int(size(@0,2))] #8808=(1,2304,192)f32 pnnx.Expression pnnx_expr_10976 0 1 8871 expr=-2 pnnx.Expression pnnx_expr_10975 0 1 8872 expr=-1 pnnx.Attribute pnnx_unique_134 0 1 relative_position_bias_table.83 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.83=(225,6)f32 pnnx.Attribute pnnx_unique_135 0 1 relative_position_index.83 @relative_position_index=(64,64)i64 #relative_position_index.83=(64,64)i64 Tensor.view Tensor.view_1423 2 1 8857 8858 x_windows.83 $input=8857 $shape=8858 #8857=(1,6,6,8,8,192)f32 #x_windows.83=(36,8,8,192)f32 Tensor.view Tensor.view_1424 2 1 x_windows.83 8860 x6.11 $input=x_windows.83 $shape=8860 #x_windows.83=(36,8,8,192)f32 #x6.11=(36,64,192)f32 nn.Linear pnnx_unique_136 1 1 x6.11 8892 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x6.11=(36,64,192)f32 #8892=(36,64,576)f32 pnnx.Expression pnnx_expr_10960 1 1 x6.11 8895 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x6.11=(36,64,192)f32 pnnx.Expression pnnx_expr_10955 0 1 8897 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_514 2 1 8892 8895 8896 $input=8892 $shape=8895 #8892=(36,64,576)f32 #8896=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_10954 0 1 22596 expr=0 pnnx.Expression pnnx_expr_10953 0 1 22597 expr=0 pnnx.Expression pnnx_expr_10952 0 1 22598 expr=0 pnnx.Expression pnnx_expr_10951 0 1 22599 expr=1 pnnx.Expression pnnx_expr_10950 0 1 22600 expr=0 pnnx.Expression pnnx_expr_10949 0 1 22601 expr=2 torch.permute torch.permute_2689 2 1 8896 8897 qkv1.11 $input=8896 $dims=8897 #8896=(36,64,3,6,32)f32 #qkv1.11=(3,36,6,64,32)f32 Tensor.select Tensor.select_770 3 1 qkv1.11 22596 22597 q.83 $input=qkv1.11 $dim=22596 $index=22597 #qkv1.11=(3,36,6,64,32)f32 #q.83=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10948 1 1 q.83 q1.11 expr=mul(@0,1.767767e-01) #q.83=(36,6,64,32)f32 #q1.11=(36,6,64,32)f32 Tensor.select Tensor.select_771 3 1 qkv1.11 22598 22599 k.83 $input=qkv1.11 $dim=22598 $index=22599 #qkv1.11=(3,36,6,64,32)f32 #k.83=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10946 0 1 8905 expr=[-1] Tensor.view Tensor.view_1425 2 1 relative_position_index.83 8905 8906 $input=relative_position_index.83 $shape=8905 #relative_position_index.83=(64,64)i64 #8906=(4096)i64 pnnx.Expression pnnx_expr_10945 1 1 8906 8907 expr=[@0] #8906=(4096)i64 pnnx.Expression pnnx_expr_10942 0 1 8909 expr=[64,64,-1] Tensor.index Tensor.index_366 2 1 relative_position_bias_table.83 8907 8908 $input=relative_position_bias_table.83 $expr=8907 #relative_position_bias_table.83=(225,6)f32 #8908=(4096,6)f32 pnnx.Expression pnnx_expr_10938 0 1 8911 expr=[2,0,1] Tensor.view Tensor.view_1426 2 1 8908 8909 relative_position_bias.83 $input=8908 $shape=8909 #8908=(4096,6)f32 #relative_position_bias.83=(64,64,6)f32 pnnx.Expression pnnx_expr_10937 0 1 22609 expr=0 torch.permute torch.permute_2690 2 1 relative_position_bias.83 8911 8912 $input=relative_position_bias.83 $dims=8911 #relative_position_bias.83=(64,64,6)f32 #8912=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_123 1 1 8912 relative_position_bias1.11 memory_format=torch.contiguous_format $input=8912 #8912=(6,64,64)f32 #relative_position_bias1.11=(6,64,64)f32 torch.transpose torch.transpose_3055 3 1 k.83 8871 8872 8903 $input=k.83 $dim0=8871 $dim1=8872 #k.83=(36,6,64,32)f32 #8903=(36,6,32,64)f32 torch.matmul torch.matmul_2284 2 1 q1.11 8903 attn.167 $input=q1.11 $other=8903 #q1.11=(36,6,64,32)f32 #8903=(36,6,32,64)f32 #attn.167=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3298 2 1 relative_position_bias1.11 22609 8914 $input=relative_position_bias1.11 $dim=22609 #relative_position_bias1.11=(6,64,64)f32 #8914=(1,6,64,64)f32 pnnx.Expression pnnx_expr_10935 2 1 attn.167 8914 input.187 expr=add(@0,@1) #attn.167=(36,6,64,64)f32 #8914=(1,6,64,64)f32 #input.187=(36,6,64,64)f32 nn.Softmax pnnx_unique_137 1 1 input.187 8916 dim=-1 #input.187=(36,6,64,64)f32 #8916=(36,6,64,64)f32 nn.Dropout pnnx_unique_138 1 1 8916 8917 #8916=(36,6,64,64)f32 #8917=(36,6,64,64)f32 Tensor.select Tensor.select_772 3 1 qkv1.11 22600 22601 v.83 $input=qkv1.11 $dim=22600 $index=22601 #qkv1.11=(3,36,6,64,32)f32 #v.83=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10934 0 1 22611 expr=1 pnnx.Expression pnnx_expr_10933 0 1 22612 expr=2 torch.matmul torch.matmul_2285 2 1 8917 v.83 8918 $input=8917 $other=v.83 #8917=(36,6,64,64)f32 #v.83=(36,6,64,32)f32 #8918=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10932 1 1 x6.11 8920 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x6.11=(36,64,192)f32 torch.transpose torch.transpose_3056 3 1 8918 22611 22612 8919 $input=8918 $dim0=22611 $dim1=22612 #8918=(36,6,64,32)f32 #8919=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_515 2 1 8919 8920 input1.13 $input=8919 $shape=8920 #8919=(36,64,6,32)f32 #input1.13=(36,64,192)f32 nn.Linear pnnx_unique_139 1 1 input1.13 8922 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.13=(36,64,192)f32 #8922=(36,64,192)f32 nn.Dropout pnnx_unique_140 1 1 8922 8923 #8922=(36,64,192)f32 #8923=(36,64,192)f32 pnnx.Expression pnnx_expr_10928 1 1 8808 8924 expr=[-1,8,8,int(size(@0,2))] #8808=(1,2304,192)f32 pnnx.Expression pnnx_expr_10915 1 1 157 8930 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_10908 0 1 8932 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1427 2 1 8923 8924 windows.83 $input=8923 $shape=8924 #8923=(36,64,192)f32 #windows.83=(36,8,8,192)f32 Tensor.view Tensor.view_1428 2 1 windows.83 8930 x7.11 $input=windows.83 $shape=8930 #windows.83=(36,8,8,192)f32 #x7.11=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_10905 1 1 157 8935 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2691 2 1 x7.11 8932 8933 $input=x7.11 $dims=8932 #x7.11=(1,6,6,8,8,192)f32 #8933=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_124 1 1 8933 8934 memory_format=torch.contiguous_format $input=8933 #8933=(1,6,8,6,8,192)f32 #8934=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_10902 2 1 8808 157 8939 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #8808=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1429 2 1 8934 8935 x8.11 $input=8934 $shape=8935 #8934=(1,6,8,6,8,192)f32 #x8.11=(1,48,48,192)f32 Tensor.view Tensor.view_1430 2 1 x8.11 8939 x9.11 $input=x8.11 $shape=8939 #x8.11=(1,48,48,192)f32 #x9.11=(1,2304,192)f32 pnnx.Expression pnnx_expr_10900 2 1 8808 x9.11 input.189 expr=add(@0,@1) #8808=(1,2304,192)f32 #x9.11=(1,2304,192)f32 #input.189=(1,2304,192)f32 nn.LayerNorm pnnx_unique_141 1 1 input.189 8943 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.189=(1,2304,192)f32 #8943=(1,2304,192)f32 nn.Linear pnnx_unique_142 1 1 8943 8948 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #8943=(1,2304,192)f32 #8948=(1,2304,384)f32 nn.GELU pnnx_unique_143 1 1 8948 8949 #8948=(1,2304,384)f32 #8949=(1,2304,384)f32 nn.Dropout pnnx_unique_144 1 1 8949 8950 #8949=(1,2304,384)f32 #8950=(1,2304,384)f32 nn.Linear pnnx_unique_145 1 1 8950 8951 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #8950=(1,2304,384)f32 #8951=(1,2304,192)f32 nn.Dropout pnnx_unique_146 1 1 8951 8952 #8951=(1,2304,192)f32 #8952=(1,2304,192)f32 pnnx.Expression pnnx_expr_10898 2 1 input.189 8952 8954 expr=add(@0,@1) #input.189=(1,2304,192)f32 #8952=(1,2304,192)f32 #8954=(1,2304,192)f32 pnnx.Attribute pnnx_unique_147 0 1 attn_mask.43 @attn_mask=(36,64,64)f32 #attn_mask.43=(36,64,64)f32 nn.LayerNorm pnnx_unique_148 1 1 8954 8983 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #8954=(1,2304,192)f32 #8983=(1,2304,192)f32 pnnx.Expression pnnx_expr_10875 2 1 8954 157 8984 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #8954=(1,2304,192)f32 #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_10873 0 1 8986 expr=[-4,-4] pnnx.Expression pnnx_expr_10871 0 1 8987 expr=[1,2] Tensor.view Tensor.view_1431 2 1 8983 8984 x.85 $input=8983 $shape=8984 #8983=(1,2304,192)f32 #x.85=(1,48,48,192)f32 torch.roll torch.roll_2460 3 1 x.85 8986 8987 x6.13 $input=x.85 $shifts=8986 $dims=8987 #x.85=(1,48,48,192)f32 #x6.13=(1,48,48,192)f32 pnnx.Expression pnnx_expr_10849 1 1 x6.13 9004 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x6.13=(1,48,48,192)f32 pnnx.Expression pnnx_expr_10844 0 1 9006 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1432 2 1 x6.13 9004 x7.13 $input=x6.13 $shape=9004 #x6.13=(1,48,48,192)f32 #x7.13=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_10841 1 1 x6.13 9009 expr=[-1,8,8,int(size(@0,3))] #x6.13=(1,48,48,192)f32 torch.permute torch.permute_2692 2 1 x7.13 9006 9007 $input=x7.13 $dims=9006 #x7.13=(1,6,8,6,8,192)f32 #9007=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_125 1 1 9007 9008 memory_format=torch.contiguous_format $input=9007 #9007=(1,6,6,8,8,192)f32 #9008=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_10839 1 1 8954 9011 expr=[-1,64,int(size(@0,2))] #8954=(1,2304,192)f32 pnnx.Expression pnnx_expr_10829 0 1 9022 expr=-2 pnnx.Expression pnnx_expr_10828 0 1 9023 expr=-1 pnnx.Attribute pnnx_unique_149 0 1 relative_position_bias_table.85 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.85=(225,6)f32 pnnx.Attribute pnnx_unique_150 0 1 relative_position_index.85 @relative_position_index=(64,64)i64 #relative_position_index.85=(64,64)i64 Tensor.view Tensor.view_1433 2 1 9008 9009 x_windows.85 $input=9008 $shape=9009 #9008=(1,6,6,8,8,192)f32 #x_windows.85=(36,8,8,192)f32 Tensor.view Tensor.view_1434 2 1 x_windows.85 9011 x8.13 $input=x_windows.85 $shape=9011 #x_windows.85=(36,8,8,192)f32 #x8.13=(36,64,192)f32 nn.Linear pnnx_unique_151 1 1 x8.13 9047 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x8.13=(36,64,192)f32 #9047=(36,64,576)f32 pnnx.Expression pnnx_expr_10809 1 1 x8.13 9050 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x8.13=(36,64,192)f32 pnnx.Expression pnnx_expr_10804 0 1 9052 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_516 2 1 9047 9050 9051 $input=9047 $shape=9050 #9047=(36,64,576)f32 #9051=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_10803 0 1 22655 expr=0 pnnx.Expression pnnx_expr_10802 0 1 22656 expr=0 pnnx.Expression pnnx_expr_10801 0 1 22657 expr=0 pnnx.Expression pnnx_expr_10800 0 1 22658 expr=1 pnnx.Expression pnnx_expr_10799 0 1 22659 expr=0 pnnx.Expression pnnx_expr_10798 0 1 22660 expr=2 torch.permute torch.permute_2693 2 1 9051 9052 qkv1.13 $input=9051 $dims=9052 #9051=(36,64,3,6,32)f32 #qkv1.13=(3,36,6,64,32)f32 Tensor.select Tensor.select_773 3 1 qkv1.13 22655 22656 q.85 $input=qkv1.13 $dim=22655 $index=22656 #qkv1.13=(3,36,6,64,32)f32 #q.85=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10797 1 1 q.85 q1.13 expr=mul(@0,1.767767e-01) #q.85=(36,6,64,32)f32 #q1.13=(36,6,64,32)f32 Tensor.select Tensor.select_774 3 1 qkv1.13 22657 22658 k.85 $input=qkv1.13 $dim=22657 $index=22658 #qkv1.13=(3,36,6,64,32)f32 #k.85=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10795 0 1 9060 expr=[-1] Tensor.view Tensor.view_1435 2 1 relative_position_index.85 9060 9061 $input=relative_position_index.85 $shape=9060 #relative_position_index.85=(64,64)i64 #9061=(4096)i64 pnnx.Expression pnnx_expr_10794 1 1 9061 9062 expr=[@0] #9061=(4096)i64 pnnx.Expression pnnx_expr_10791 0 1 9064 expr=[64,64,-1] Tensor.index Tensor.index_367 2 1 relative_position_bias_table.85 9062 9063 $input=relative_position_bias_table.85 $expr=9062 #relative_position_bias_table.85=(225,6)f32 #9063=(4096,6)f32 pnnx.Expression pnnx_expr_10787 0 1 9066 expr=[2,0,1] Tensor.view Tensor.view_1436 2 1 9063 9064 relative_position_bias.85 $input=9063 $shape=9064 #9063=(4096,6)f32 #relative_position_bias.85=(64,64,6)f32 pnnx.Expression pnnx_expr_10786 0 1 22668 expr=0 torch.permute torch.permute_2694 2 1 relative_position_bias.85 9066 9067 $input=relative_position_bias.85 $dims=9066 #relative_position_bias.85=(64,64,6)f32 #9067=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_126 1 1 9067 relative_position_bias1.13 memory_format=torch.contiguous_format $input=9067 #9067=(6,64,64)f32 #relative_position_bias1.13=(6,64,64)f32 torch.transpose torch.transpose_3057 3 1 k.85 9022 9023 9058 $input=k.85 $dim0=9022 $dim1=9023 #k.85=(36,6,64,32)f32 #9058=(36,6,32,64)f32 torch.matmul torch.matmul_2286 2 1 q1.13 9058 attn.171 $input=q1.13 $other=9058 #q1.13=(36,6,64,32)f32 #9058=(36,6,32,64)f32 #attn.171=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3299 2 1 relative_position_bias1.13 22668 9069 $input=relative_position_bias1.13 $dim=22668 #relative_position_bias1.13=(6,64,64)f32 #9069=(1,6,64,64)f32 pnnx.Expression pnnx_expr_10784 2 1 attn.171 9069 attn2.7 expr=add(@0,@1) #attn.171=(36,6,64,64)f32 #9069=(1,6,64,64)f32 #attn2.7=(36,6,64,64)f32 pnnx.Expression pnnx_expr_10775 2 1 x8.13 attn_mask.43 9076 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x8.13=(36,64,192)f32 #attn_mask.43=(36,64,64)f32 pnnx.Expression pnnx_expr_10774 0 1 22673 expr=1 pnnx.Expression pnnx_expr_10773 0 1 22674 expr=0 Tensor.view Tensor.view_1437 2 1 attn2.7 9076 9077 $input=attn2.7 $shape=9076 #attn2.7=(36,6,64,64)f32 #9077=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3300 2 1 attn_mask.43 22673 9078 $input=attn_mask.43 $dim=22673 #attn_mask.43=(36,64,64)f32 #9078=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3301 2 1 9078 22674 9079 $input=9078 $dim=22674 #9078=(36,1,64,64)f32 #9079=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_10771 2 1 9077 9079 attn3.7 expr=add(@0,@1) #9077=(1,36,6,64,64)f32 #9079=(1,36,1,64,64)f32 #attn3.7=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_10768 1 1 x8.13 9081 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x8.13=(36,64,192)f32 Tensor.view Tensor.view_1438 2 1 attn3.7 9081 input.191 $input=attn3.7 $shape=9081 #attn3.7=(1,36,6,64,64)f32 #input.191=(36,6,64,64)f32 nn.Softmax pnnx_unique_152 1 1 input.191 9083 dim=-1 #input.191=(36,6,64,64)f32 #9083=(36,6,64,64)f32 nn.Dropout pnnx_unique_153 1 1 9083 9084 #9083=(36,6,64,64)f32 #9084=(36,6,64,64)f32 Tensor.select Tensor.select_775 3 1 qkv1.13 22659 22660 v.85 $input=qkv1.13 $dim=22659 $index=22660 #qkv1.13=(3,36,6,64,32)f32 #v.85=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10767 0 1 22678 expr=1 pnnx.Expression pnnx_expr_10766 0 1 22679 expr=2 torch.matmul torch.matmul_2287 2 1 9084 v.85 9085 $input=9084 $other=v.85 #9084=(36,6,64,64)f32 #v.85=(36,6,64,32)f32 #9085=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10765 1 1 x8.13 9087 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x8.13=(36,64,192)f32 torch.transpose torch.transpose_3058 3 1 9085 22678 22679 9086 $input=9085 $dim0=22678 $dim1=22679 #9085=(36,6,64,32)f32 #9086=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_517 2 1 9086 9087 input1.15 $input=9086 $shape=9087 #9086=(36,64,6,32)f32 #input1.15=(36,64,192)f32 nn.Linear pnnx_unique_154 1 1 input1.15 9089 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.15=(36,64,192)f32 #9089=(36,64,192)f32 nn.Dropout pnnx_unique_155 1 1 9089 9090 #9089=(36,64,192)f32 #9090=(36,64,192)f32 pnnx.Expression pnnx_expr_10761 1 1 8954 9091 expr=[-1,8,8,int(size(@0,2))] #8954=(1,2304,192)f32 pnnx.Expression pnnx_expr_10748 1 1 157 9097 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_10741 0 1 9099 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1439 2 1 9090 9091 windows.85 $input=9090 $shape=9091 #9090=(36,64,192)f32 #windows.85=(36,8,8,192)f32 Tensor.view Tensor.view_1440 2 1 windows.85 9097 x9.13 $input=windows.85 $shape=9097 #windows.85=(36,8,8,192)f32 #x9.13=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_10738 1 1 157 9102 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2695 2 1 x9.13 9099 9100 $input=x9.13 $dims=9099 #x9.13=(1,6,6,8,8,192)f32 #9100=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_127 1 1 9100 9101 memory_format=torch.contiguous_format $input=9100 #9100=(1,6,8,6,8,192)f32 #9101=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_10735 0 1 9104 expr=[4,4] pnnx.Expression pnnx_expr_10732 0 1 9105 expr=[1,2] Tensor.view Tensor.view_1441 2 1 9101 9102 shifted_x.43 $input=9101 $shape=9102 #9101=(1,6,8,6,8,192)f32 #shifted_x.43=(1,48,48,192)f32 pnnx.Expression pnnx_expr_10729 2 1 8954 157 9109 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #8954=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.roll torch.roll_2461 3 1 shifted_x.43 9104 9105 x10.7 $input=shifted_x.43 $shifts=9104 $dims=9105 #shifted_x.43=(1,48,48,192)f32 #x10.7=(1,48,48,192)f32 Tensor.view Tensor.view_1442 2 1 x10.7 9109 x11.7 $input=x10.7 $shape=9109 #x10.7=(1,48,48,192)f32 #x11.7=(1,2304,192)f32 pnnx.Expression pnnx_expr_10727 2 1 8954 x11.7 input.193 expr=add(@0,@1) #8954=(1,2304,192)f32 #x11.7=(1,2304,192)f32 #input.193=(1,2304,192)f32 nn.LayerNorm pnnx_unique_156 1 1 input.193 9113 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.193=(1,2304,192)f32 #9113=(1,2304,192)f32 nn.Linear pnnx_unique_157 1 1 9113 9118 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #9113=(1,2304,192)f32 #9118=(1,2304,384)f32 nn.GELU pnnx_unique_158 1 1 9118 9119 #9118=(1,2304,384)f32 #9119=(1,2304,384)f32 nn.Dropout pnnx_unique_159 1 1 9119 9120 #9119=(1,2304,384)f32 #9120=(1,2304,384)f32 nn.Linear pnnx_unique_160 1 1 9120 9121 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #9120=(1,2304,384)f32 #9121=(1,2304,192)f32 nn.Dropout pnnx_unique_161 1 1 9121 9122 #9121=(1,2304,192)f32 #9122=(1,2304,192)f32 pnnx.Expression pnnx_expr_10725 2 1 input.193 9122 9124 expr=add(@0,@1) #input.193=(1,2304,192)f32 #9122=(1,2304,192)f32 #9124=(1,2304,192)f32 pnnx.Expression pnnx_expr_10723 0 1 9126 expr=1 pnnx.Expression pnnx_expr_10722 0 1 9127 expr=2 pnnx.Expression pnnx_expr_10717 2 1 9124 157 9133 expr=[int(size(@0,0)),192,int(size(@1,2)),int(size(@1,3))] #9124=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.transpose torch.transpose_3059 3 1 9124 9126 9127 9132 $input=9124 $dim0=9126 $dim1=9127 #9124=(1,2304,192)f32 #9132=(1,192,2304)f32 Tensor.view Tensor.view_1443 2 1 9132 9133 input.195 $input=9132 $shape=9133 #9132=(1,192,2304)f32 #input.195=(1,192,48,48)f32 nn.Conv2d pnnx_unique_162 1 1 input.195 9135 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(192)f32 @weight=(192,192,3,3)f32 #input.195=(1,192,48,48)f32 #9135=(1,192,48,48)f32 pnnx.Expression pnnx_expr_10716 0 1 9136 expr=-1 pnnx.Expression pnnx_expr_10715 0 1 9137 expr=2 pnnx.Expression pnnx_expr_10714 0 1 9138 expr=1 pnnx.Expression pnnx_expr_10713 0 1 22706 expr=2 torch.flatten torch.flatten_2191 3 1 9135 9137 9136 9139 $input=9135 $start_dim=9137 $end_dim=9136 #9135=(1,192,48,48)f32 #9139=(1,192,2304)f32 torch.transpose torch.transpose_3060 3 1 9139 9138 22706 9140 $input=9139 $dim0=9138 $dim1=22706 #9139=(1,192,2304)f32 #9140=(1,2304,192)f32 pnnx.Expression pnnx_expr_10712 2 1 9140 1327 9141 expr=add(@0,@1) #9140=(1,2304,192)f32 #1327=(1,2304,192)f32 #9141=(1,2304,192)f32 nn.LayerNorm pnnx_unique_163 1 1 9141 9185 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #9141=(1,2304,192)f32 #9185=(1,2304,192)f32 pnnx.Expression pnnx_expr_10689 2 1 9141 157 9186 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #9141=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1444 2 1 9185 9186 x.87 $input=9185 $shape=9186 #9185=(1,2304,192)f32 #x.87=(1,48,48,192)f32 pnnx.Expression pnnx_expr_10668 1 1 x.87 9203 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.87=(1,48,48,192)f32 pnnx.Expression pnnx_expr_10663 0 1 9205 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1445 2 1 x.87 9203 x5.45 $input=x.87 $shape=9203 #x.87=(1,48,48,192)f32 #x5.45=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_10660 1 1 x.87 9208 expr=[-1,8,8,int(size(@0,3))] #x.87=(1,48,48,192)f32 torch.permute torch.permute_2696 2 1 x5.45 9205 9206 $input=x5.45 $dims=9205 #x5.45=(1,6,8,6,8,192)f32 #9206=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_128 1 1 9206 9207 memory_format=torch.contiguous_format $input=9206 #9206=(1,6,6,8,8,192)f32 #9207=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_10658 1 1 9141 9210 expr=[-1,64,int(size(@0,2))] #9141=(1,2304,192)f32 pnnx.Expression pnnx_expr_10648 0 1 9221 expr=-2 pnnx.Expression pnnx_expr_10647 0 1 9222 expr=-1 pnnx.Attribute pnnx_unique_164 0 1 relative_position_bias_table.87 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.87=(225,6)f32 pnnx.Attribute pnnx_unique_165 0 1 relative_position_index.87 @relative_position_index=(64,64)i64 #relative_position_index.87=(64,64)i64 Tensor.view Tensor.view_1446 2 1 9207 9208 x_windows.87 $input=9207 $shape=9208 #9207=(1,6,6,8,8,192)f32 #x_windows.87=(36,8,8,192)f32 Tensor.view Tensor.view_1447 2 1 x_windows.87 9210 x6.15 $input=x_windows.87 $shape=9210 #x_windows.87=(36,8,8,192)f32 #x6.15=(36,64,192)f32 nn.Linear pnnx_unique_166 1 1 x6.15 9242 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x6.15=(36,64,192)f32 #9242=(36,64,576)f32 pnnx.Expression pnnx_expr_10632 1 1 x6.15 9245 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x6.15=(36,64,192)f32 pnnx.Expression pnnx_expr_10627 0 1 9247 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_518 2 1 9242 9245 9246 $input=9242 $shape=9245 #9242=(36,64,576)f32 #9246=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_10626 0 1 22724 expr=0 pnnx.Expression pnnx_expr_10625 0 1 22725 expr=0 pnnx.Expression pnnx_expr_10624 0 1 22726 expr=0 pnnx.Expression pnnx_expr_10623 0 1 22727 expr=1 pnnx.Expression pnnx_expr_10622 0 1 22728 expr=0 pnnx.Expression pnnx_expr_10621 0 1 22729 expr=2 torch.permute torch.permute_2697 2 1 9246 9247 qkv1.15 $input=9246 $dims=9247 #9246=(36,64,3,6,32)f32 #qkv1.15=(3,36,6,64,32)f32 Tensor.select Tensor.select_776 3 1 qkv1.15 22724 22725 q.87 $input=qkv1.15 $dim=22724 $index=22725 #qkv1.15=(3,36,6,64,32)f32 #q.87=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10620 1 1 q.87 q1.15 expr=mul(@0,1.767767e-01) #q.87=(36,6,64,32)f32 #q1.15=(36,6,64,32)f32 Tensor.select Tensor.select_777 3 1 qkv1.15 22726 22727 k.87 $input=qkv1.15 $dim=22726 $index=22727 #qkv1.15=(3,36,6,64,32)f32 #k.87=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10618 0 1 9255 expr=[-1] Tensor.view Tensor.view_1448 2 1 relative_position_index.87 9255 9256 $input=relative_position_index.87 $shape=9255 #relative_position_index.87=(64,64)i64 #9256=(4096)i64 pnnx.Expression pnnx_expr_10617 1 1 9256 9257 expr=[@0] #9256=(4096)i64 pnnx.Expression pnnx_expr_10614 0 1 9259 expr=[64,64,-1] Tensor.index Tensor.index_368 2 1 relative_position_bias_table.87 9257 9258 $input=relative_position_bias_table.87 $expr=9257 #relative_position_bias_table.87=(225,6)f32 #9258=(4096,6)f32 pnnx.Expression pnnx_expr_10610 0 1 9261 expr=[2,0,1] Tensor.view Tensor.view_1449 2 1 9258 9259 relative_position_bias.87 $input=9258 $shape=9259 #9258=(4096,6)f32 #relative_position_bias.87=(64,64,6)f32 pnnx.Expression pnnx_expr_10609 0 1 22737 expr=0 torch.permute torch.permute_2698 2 1 relative_position_bias.87 9261 9262 $input=relative_position_bias.87 $dims=9261 #relative_position_bias.87=(64,64,6)f32 #9262=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_129 1 1 9262 relative_position_bias1.15 memory_format=torch.contiguous_format $input=9262 #9262=(6,64,64)f32 #relative_position_bias1.15=(6,64,64)f32 torch.transpose torch.transpose_3061 3 1 k.87 9221 9222 9253 $input=k.87 $dim0=9221 $dim1=9222 #k.87=(36,6,64,32)f32 #9253=(36,6,32,64)f32 torch.matmul torch.matmul_2288 2 1 q1.15 9253 attn.175 $input=q1.15 $other=9253 #q1.15=(36,6,64,32)f32 #9253=(36,6,32,64)f32 #attn.175=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3302 2 1 relative_position_bias1.15 22737 9264 $input=relative_position_bias1.15 $dim=22737 #relative_position_bias1.15=(6,64,64)f32 #9264=(1,6,64,64)f32 pnnx.Expression pnnx_expr_10607 2 1 attn.175 9264 input.197 expr=add(@0,@1) #attn.175=(36,6,64,64)f32 #9264=(1,6,64,64)f32 #input.197=(36,6,64,64)f32 nn.Softmax pnnx_unique_167 1 1 input.197 9266 dim=-1 #input.197=(36,6,64,64)f32 #9266=(36,6,64,64)f32 nn.Dropout pnnx_unique_168 1 1 9266 9267 #9266=(36,6,64,64)f32 #9267=(36,6,64,64)f32 Tensor.select Tensor.select_778 3 1 qkv1.15 22728 22729 v.87 $input=qkv1.15 $dim=22728 $index=22729 #qkv1.15=(3,36,6,64,32)f32 #v.87=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10606 0 1 22739 expr=1 pnnx.Expression pnnx_expr_10605 0 1 22740 expr=2 torch.matmul torch.matmul_2289 2 1 9267 v.87 9268 $input=9267 $other=v.87 #9267=(36,6,64,64)f32 #v.87=(36,6,64,32)f32 #9268=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10604 1 1 x6.15 9270 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x6.15=(36,64,192)f32 torch.transpose torch.transpose_3062 3 1 9268 22739 22740 9269 $input=9268 $dim0=22739 $dim1=22740 #9268=(36,6,64,32)f32 #9269=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_519 2 1 9269 9270 input1.17 $input=9269 $shape=9270 #9269=(36,64,6,32)f32 #input1.17=(36,64,192)f32 nn.Linear pnnx_unique_169 1 1 input1.17 9272 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.17=(36,64,192)f32 #9272=(36,64,192)f32 nn.Dropout pnnx_unique_170 1 1 9272 9273 #9272=(36,64,192)f32 #9273=(36,64,192)f32 pnnx.Expression pnnx_expr_10600 1 1 9141 9274 expr=[-1,8,8,int(size(@0,2))] #9141=(1,2304,192)f32 pnnx.Expression pnnx_expr_10587 1 1 157 9280 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_10580 0 1 9282 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1450 2 1 9273 9274 windows.87 $input=9273 $shape=9274 #9273=(36,64,192)f32 #windows.87=(36,8,8,192)f32 Tensor.view Tensor.view_1451 2 1 windows.87 9280 x7.15 $input=windows.87 $shape=9280 #windows.87=(36,8,8,192)f32 #x7.15=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_10577 1 1 157 9285 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2699 2 1 x7.15 9282 9283 $input=x7.15 $dims=9282 #x7.15=(1,6,6,8,8,192)f32 #9283=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_130 1 1 9283 9284 memory_format=torch.contiguous_format $input=9283 #9283=(1,6,8,6,8,192)f32 #9284=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_10574 2 1 9141 157 9289 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #9141=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1452 2 1 9284 9285 x8.15 $input=9284 $shape=9285 #9284=(1,6,8,6,8,192)f32 #x8.15=(1,48,48,192)f32 Tensor.view Tensor.view_1453 2 1 x8.15 9289 x9.15 $input=x8.15 $shape=9289 #x8.15=(1,48,48,192)f32 #x9.15=(1,2304,192)f32 pnnx.Expression pnnx_expr_10572 2 1 9141 x9.15 input.199 expr=add(@0,@1) #9141=(1,2304,192)f32 #x9.15=(1,2304,192)f32 #input.199=(1,2304,192)f32 nn.LayerNorm pnnx_unique_171 1 1 input.199 9293 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.199=(1,2304,192)f32 #9293=(1,2304,192)f32 nn.Linear pnnx_unique_172 1 1 9293 9298 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #9293=(1,2304,192)f32 #9298=(1,2304,384)f32 nn.GELU pnnx_unique_173 1 1 9298 9299 #9298=(1,2304,384)f32 #9299=(1,2304,384)f32 nn.Dropout pnnx_unique_174 1 1 9299 9300 #9299=(1,2304,384)f32 #9300=(1,2304,384)f32 nn.Linear pnnx_unique_175 1 1 9300 9301 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #9300=(1,2304,384)f32 #9301=(1,2304,192)f32 nn.Dropout pnnx_unique_176 1 1 9301 9302 #9301=(1,2304,192)f32 #9302=(1,2304,192)f32 pnnx.Expression pnnx_expr_10570 2 1 input.199 9302 9304 expr=add(@0,@1) #input.199=(1,2304,192)f32 #9302=(1,2304,192)f32 #9304=(1,2304,192)f32 pnnx.Attribute pnnx_unique_177 0 1 attn_mask.45 @attn_mask=(36,64,64)f32 #attn_mask.45=(36,64,64)f32 nn.LayerNorm pnnx_unique_178 1 1 9304 9333 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #9304=(1,2304,192)f32 #9333=(1,2304,192)f32 pnnx.Expression pnnx_expr_10547 2 1 9304 157 9334 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #9304=(1,2304,192)f32 #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_10545 0 1 9336 expr=[-4,-4] pnnx.Expression pnnx_expr_10543 0 1 9337 expr=[1,2] Tensor.view Tensor.view_1454 2 1 9333 9334 x.89 $input=9333 $shape=9334 #9333=(1,2304,192)f32 #x.89=(1,48,48,192)f32 torch.roll torch.roll_2462 3 1 x.89 9336 9337 x6.17 $input=x.89 $shifts=9336 $dims=9337 #x.89=(1,48,48,192)f32 #x6.17=(1,48,48,192)f32 pnnx.Expression pnnx_expr_10521 1 1 x6.17 9354 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x6.17=(1,48,48,192)f32 pnnx.Expression pnnx_expr_10516 0 1 9356 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1455 2 1 x6.17 9354 x7.17 $input=x6.17 $shape=9354 #x6.17=(1,48,48,192)f32 #x7.17=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_10513 1 1 x6.17 9359 expr=[-1,8,8,int(size(@0,3))] #x6.17=(1,48,48,192)f32 torch.permute torch.permute_2700 2 1 x7.17 9356 9357 $input=x7.17 $dims=9356 #x7.17=(1,6,8,6,8,192)f32 #9357=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_131 1 1 9357 9358 memory_format=torch.contiguous_format $input=9357 #9357=(1,6,6,8,8,192)f32 #9358=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_10511 1 1 9304 9361 expr=[-1,64,int(size(@0,2))] #9304=(1,2304,192)f32 pnnx.Expression pnnx_expr_10501 0 1 9372 expr=-2 pnnx.Expression pnnx_expr_10500 0 1 9373 expr=-1 pnnx.Attribute pnnx_unique_179 0 1 relative_position_bias_table.89 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.89=(225,6)f32 pnnx.Attribute pnnx_unique_180 0 1 relative_position_index.89 @relative_position_index=(64,64)i64 #relative_position_index.89=(64,64)i64 Tensor.view Tensor.view_1456 2 1 9358 9359 x_windows.89 $input=9358 $shape=9359 #9358=(1,6,6,8,8,192)f32 #x_windows.89=(36,8,8,192)f32 Tensor.view Tensor.view_1457 2 1 x_windows.89 9361 x8.17 $input=x_windows.89 $shape=9361 #x_windows.89=(36,8,8,192)f32 #x8.17=(36,64,192)f32 nn.Linear pnnx_unique_181 1 1 x8.17 9397 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x8.17=(36,64,192)f32 #9397=(36,64,576)f32 pnnx.Expression pnnx_expr_10481 1 1 x8.17 9400 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x8.17=(36,64,192)f32 pnnx.Expression pnnx_expr_10476 0 1 9402 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_520 2 1 9397 9400 9401 $input=9397 $shape=9400 #9397=(36,64,576)f32 #9401=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_10475 0 1 22783 expr=0 pnnx.Expression pnnx_expr_10474 0 1 22784 expr=0 pnnx.Expression pnnx_expr_10473 0 1 22785 expr=0 pnnx.Expression pnnx_expr_10472 0 1 22786 expr=1 pnnx.Expression pnnx_expr_10471 0 1 22787 expr=0 pnnx.Expression pnnx_expr_10470 0 1 22788 expr=2 torch.permute torch.permute_2701 2 1 9401 9402 qkv1.17 $input=9401 $dims=9402 #9401=(36,64,3,6,32)f32 #qkv1.17=(3,36,6,64,32)f32 Tensor.select Tensor.select_779 3 1 qkv1.17 22783 22784 q.89 $input=qkv1.17 $dim=22783 $index=22784 #qkv1.17=(3,36,6,64,32)f32 #q.89=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10469 1 1 q.89 q1.17 expr=mul(@0,1.767767e-01) #q.89=(36,6,64,32)f32 #q1.17=(36,6,64,32)f32 Tensor.select Tensor.select_780 3 1 qkv1.17 22785 22786 k.89 $input=qkv1.17 $dim=22785 $index=22786 #qkv1.17=(3,36,6,64,32)f32 #k.89=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10467 0 1 9410 expr=[-1] Tensor.view Tensor.view_1458 2 1 relative_position_index.89 9410 9411 $input=relative_position_index.89 $shape=9410 #relative_position_index.89=(64,64)i64 #9411=(4096)i64 pnnx.Expression pnnx_expr_10466 1 1 9411 9412 expr=[@0] #9411=(4096)i64 pnnx.Expression pnnx_expr_10463 0 1 9414 expr=[64,64,-1] Tensor.index Tensor.index_369 2 1 relative_position_bias_table.89 9412 9413 $input=relative_position_bias_table.89 $expr=9412 #relative_position_bias_table.89=(225,6)f32 #9413=(4096,6)f32 pnnx.Expression pnnx_expr_10459 0 1 9416 expr=[2,0,1] Tensor.view Tensor.view_1459 2 1 9413 9414 relative_position_bias.89 $input=9413 $shape=9414 #9413=(4096,6)f32 #relative_position_bias.89=(64,64,6)f32 pnnx.Expression pnnx_expr_10458 0 1 22796 expr=0 torch.permute torch.permute_2702 2 1 relative_position_bias.89 9416 9417 $input=relative_position_bias.89 $dims=9416 #relative_position_bias.89=(64,64,6)f32 #9417=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_132 1 1 9417 relative_position_bias1.17 memory_format=torch.contiguous_format $input=9417 #9417=(6,64,64)f32 #relative_position_bias1.17=(6,64,64)f32 torch.transpose torch.transpose_3063 3 1 k.89 9372 9373 9408 $input=k.89 $dim0=9372 $dim1=9373 #k.89=(36,6,64,32)f32 #9408=(36,6,32,64)f32 torch.matmul torch.matmul_2290 2 1 q1.17 9408 attn.179 $input=q1.17 $other=9408 #q1.17=(36,6,64,32)f32 #9408=(36,6,32,64)f32 #attn.179=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3303 2 1 relative_position_bias1.17 22796 9419 $input=relative_position_bias1.17 $dim=22796 #relative_position_bias1.17=(6,64,64)f32 #9419=(1,6,64,64)f32 pnnx.Expression pnnx_expr_10456 2 1 attn.179 9419 attn2.9 expr=add(@0,@1) #attn.179=(36,6,64,64)f32 #9419=(1,6,64,64)f32 #attn2.9=(36,6,64,64)f32 pnnx.Expression pnnx_expr_10447 2 1 x8.17 attn_mask.45 9426 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x8.17=(36,64,192)f32 #attn_mask.45=(36,64,64)f32 pnnx.Expression pnnx_expr_10446 0 1 22801 expr=1 pnnx.Expression pnnx_expr_10445 0 1 22802 expr=0 Tensor.view Tensor.view_1460 2 1 attn2.9 9426 9427 $input=attn2.9 $shape=9426 #attn2.9=(36,6,64,64)f32 #9427=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3304 2 1 attn_mask.45 22801 9428 $input=attn_mask.45 $dim=22801 #attn_mask.45=(36,64,64)f32 #9428=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3305 2 1 9428 22802 9429 $input=9428 $dim=22802 #9428=(36,1,64,64)f32 #9429=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_10443 2 1 9427 9429 attn3.9 expr=add(@0,@1) #9427=(1,36,6,64,64)f32 #9429=(1,36,1,64,64)f32 #attn3.9=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_10440 1 1 x8.17 9431 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x8.17=(36,64,192)f32 Tensor.view Tensor.view_1461 2 1 attn3.9 9431 input.201 $input=attn3.9 $shape=9431 #attn3.9=(1,36,6,64,64)f32 #input.201=(36,6,64,64)f32 nn.Softmax pnnx_unique_182 1 1 input.201 9433 dim=-1 #input.201=(36,6,64,64)f32 #9433=(36,6,64,64)f32 nn.Dropout pnnx_unique_183 1 1 9433 9434 #9433=(36,6,64,64)f32 #9434=(36,6,64,64)f32 Tensor.select Tensor.select_781 3 1 qkv1.17 22787 22788 v.89 $input=qkv1.17 $dim=22787 $index=22788 #qkv1.17=(3,36,6,64,32)f32 #v.89=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10439 0 1 22806 expr=1 pnnx.Expression pnnx_expr_10438 0 1 22807 expr=2 torch.matmul torch.matmul_2291 2 1 9434 v.89 9435 $input=9434 $other=v.89 #9434=(36,6,64,64)f32 #v.89=(36,6,64,32)f32 #9435=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10437 1 1 x8.17 9437 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x8.17=(36,64,192)f32 torch.transpose torch.transpose_3064 3 1 9435 22806 22807 9436 $input=9435 $dim0=22806 $dim1=22807 #9435=(36,6,64,32)f32 #9436=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_521 2 1 9436 9437 input1.19 $input=9436 $shape=9437 #9436=(36,64,6,32)f32 #input1.19=(36,64,192)f32 nn.Linear pnnx_unique_184 1 1 input1.19 9439 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.19=(36,64,192)f32 #9439=(36,64,192)f32 nn.Dropout pnnx_unique_185 1 1 9439 9440 #9439=(36,64,192)f32 #9440=(36,64,192)f32 pnnx.Expression pnnx_expr_10433 1 1 9304 9441 expr=[-1,8,8,int(size(@0,2))] #9304=(1,2304,192)f32 pnnx.Expression pnnx_expr_10420 1 1 157 9447 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_10413 0 1 9449 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1462 2 1 9440 9441 windows.89 $input=9440 $shape=9441 #9440=(36,64,192)f32 #windows.89=(36,8,8,192)f32 Tensor.view Tensor.view_1463 2 1 windows.89 9447 x9.17 $input=windows.89 $shape=9447 #windows.89=(36,8,8,192)f32 #x9.17=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_10410 1 1 157 9452 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2703 2 1 x9.17 9449 9450 $input=x9.17 $dims=9449 #x9.17=(1,6,6,8,8,192)f32 #9450=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_133 1 1 9450 9451 memory_format=torch.contiguous_format $input=9450 #9450=(1,6,8,6,8,192)f32 #9451=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_10407 0 1 9454 expr=[4,4] pnnx.Expression pnnx_expr_10404 0 1 9455 expr=[1,2] Tensor.view Tensor.view_1464 2 1 9451 9452 shifted_x.45 $input=9451 $shape=9452 #9451=(1,6,8,6,8,192)f32 #shifted_x.45=(1,48,48,192)f32 pnnx.Expression pnnx_expr_10401 2 1 9304 157 9459 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #9304=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.roll torch.roll_2463 3 1 shifted_x.45 9454 9455 x10.9 $input=shifted_x.45 $shifts=9454 $dims=9455 #shifted_x.45=(1,48,48,192)f32 #x10.9=(1,48,48,192)f32 Tensor.view Tensor.view_1465 2 1 x10.9 9459 x11.9 $input=x10.9 $shape=9459 #x10.9=(1,48,48,192)f32 #x11.9=(1,2304,192)f32 pnnx.Expression pnnx_expr_10399 2 1 9304 x11.9 input.203 expr=add(@0,@1) #9304=(1,2304,192)f32 #x11.9=(1,2304,192)f32 #input.203=(1,2304,192)f32 nn.LayerNorm pnnx_unique_186 1 1 input.203 9463 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.203=(1,2304,192)f32 #9463=(1,2304,192)f32 nn.Linear pnnx_unique_187 1 1 9463 9468 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #9463=(1,2304,192)f32 #9468=(1,2304,384)f32 nn.GELU pnnx_unique_188 1 1 9468 9469 #9468=(1,2304,384)f32 #9469=(1,2304,384)f32 nn.Dropout pnnx_unique_189 1 1 9469 9470 #9469=(1,2304,384)f32 #9470=(1,2304,384)f32 nn.Linear pnnx_unique_190 1 1 9470 9471 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #9470=(1,2304,384)f32 #9471=(1,2304,192)f32 nn.Dropout pnnx_unique_191 1 1 9471 9472 #9471=(1,2304,192)f32 #9472=(1,2304,192)f32 pnnx.Expression pnnx_expr_10397 2 1 input.203 9472 9474 expr=add(@0,@1) #input.203=(1,2304,192)f32 #9472=(1,2304,192)f32 #9474=(1,2304,192)f32 nn.LayerNorm pnnx_unique_192 1 1 9474 9501 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #9474=(1,2304,192)f32 #9501=(1,2304,192)f32 pnnx.Expression pnnx_expr_10375 2 1 9474 157 9502 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #9474=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1466 2 1 9501 9502 x.91 $input=9501 $shape=9502 #9501=(1,2304,192)f32 #x.91=(1,48,48,192)f32 pnnx.Expression pnnx_expr_10354 1 1 x.91 9519 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.91=(1,48,48,192)f32 pnnx.Expression pnnx_expr_10349 0 1 9521 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1467 2 1 x.91 9519 x5.47 $input=x.91 $shape=9519 #x.91=(1,48,48,192)f32 #x5.47=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_10346 1 1 x.91 9524 expr=[-1,8,8,int(size(@0,3))] #x.91=(1,48,48,192)f32 torch.permute torch.permute_2704 2 1 x5.47 9521 9522 $input=x5.47 $dims=9521 #x5.47=(1,6,8,6,8,192)f32 #9522=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_134 1 1 9522 9523 memory_format=torch.contiguous_format $input=9522 #9522=(1,6,6,8,8,192)f32 #9523=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_10344 1 1 9474 9526 expr=[-1,64,int(size(@0,2))] #9474=(1,2304,192)f32 pnnx.Expression pnnx_expr_10334 0 1 9537 expr=-2 pnnx.Expression pnnx_expr_10333 0 1 9538 expr=-1 pnnx.Attribute pnnx_unique_193 0 1 relative_position_bias_table.91 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.91=(225,6)f32 pnnx.Attribute pnnx_unique_194 0 1 relative_position_index.91 @relative_position_index=(64,64)i64 #relative_position_index.91=(64,64)i64 Tensor.view Tensor.view_1468 2 1 9523 9524 x_windows.91 $input=9523 $shape=9524 #9523=(1,6,6,8,8,192)f32 #x_windows.91=(36,8,8,192)f32 Tensor.view Tensor.view_1469 2 1 x_windows.91 9526 x6.19 $input=x_windows.91 $shape=9526 #x_windows.91=(36,8,8,192)f32 #x6.19=(36,64,192)f32 nn.Linear pnnx_unique_195 1 1 x6.19 9558 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x6.19=(36,64,192)f32 #9558=(36,64,576)f32 pnnx.Expression pnnx_expr_10318 1 1 x6.19 9561 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x6.19=(36,64,192)f32 pnnx.Expression pnnx_expr_10313 0 1 9563 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_522 2 1 9558 9561 9562 $input=9558 $shape=9561 #9558=(36,64,576)f32 #9562=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_10312 0 1 22851 expr=0 pnnx.Expression pnnx_expr_10311 0 1 22852 expr=0 pnnx.Expression pnnx_expr_10310 0 1 22853 expr=0 pnnx.Expression pnnx_expr_10309 0 1 22854 expr=1 pnnx.Expression pnnx_expr_10308 0 1 22855 expr=0 pnnx.Expression pnnx_expr_10307 0 1 22856 expr=2 torch.permute torch.permute_2705 2 1 9562 9563 qkv1.19 $input=9562 $dims=9563 #9562=(36,64,3,6,32)f32 #qkv1.19=(3,36,6,64,32)f32 Tensor.select Tensor.select_782 3 1 qkv1.19 22851 22852 q.91 $input=qkv1.19 $dim=22851 $index=22852 #qkv1.19=(3,36,6,64,32)f32 #q.91=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10306 1 1 q.91 q1.19 expr=mul(@0,1.767767e-01) #q.91=(36,6,64,32)f32 #q1.19=(36,6,64,32)f32 Tensor.select Tensor.select_783 3 1 qkv1.19 22853 22854 k.91 $input=qkv1.19 $dim=22853 $index=22854 #qkv1.19=(3,36,6,64,32)f32 #k.91=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10304 0 1 9571 expr=[-1] Tensor.view Tensor.view_1470 2 1 relative_position_index.91 9571 9572 $input=relative_position_index.91 $shape=9571 #relative_position_index.91=(64,64)i64 #9572=(4096)i64 pnnx.Expression pnnx_expr_10303 1 1 9572 9573 expr=[@0] #9572=(4096)i64 pnnx.Expression pnnx_expr_10300 0 1 9575 expr=[64,64,-1] Tensor.index Tensor.index_370 2 1 relative_position_bias_table.91 9573 9574 $input=relative_position_bias_table.91 $expr=9573 #relative_position_bias_table.91=(225,6)f32 #9574=(4096,6)f32 pnnx.Expression pnnx_expr_10296 0 1 9577 expr=[2,0,1] Tensor.view Tensor.view_1471 2 1 9574 9575 relative_position_bias.91 $input=9574 $shape=9575 #9574=(4096,6)f32 #relative_position_bias.91=(64,64,6)f32 pnnx.Expression pnnx_expr_10295 0 1 22864 expr=0 torch.permute torch.permute_2706 2 1 relative_position_bias.91 9577 9578 $input=relative_position_bias.91 $dims=9577 #relative_position_bias.91=(64,64,6)f32 #9578=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_135 1 1 9578 relative_position_bias1.19 memory_format=torch.contiguous_format $input=9578 #9578=(6,64,64)f32 #relative_position_bias1.19=(6,64,64)f32 torch.transpose torch.transpose_3065 3 1 k.91 9537 9538 9569 $input=k.91 $dim0=9537 $dim1=9538 #k.91=(36,6,64,32)f32 #9569=(36,6,32,64)f32 torch.matmul torch.matmul_2292 2 1 q1.19 9569 attn.183 $input=q1.19 $other=9569 #q1.19=(36,6,64,32)f32 #9569=(36,6,32,64)f32 #attn.183=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3306 2 1 relative_position_bias1.19 22864 9580 $input=relative_position_bias1.19 $dim=22864 #relative_position_bias1.19=(6,64,64)f32 #9580=(1,6,64,64)f32 pnnx.Expression pnnx_expr_10293 2 1 attn.183 9580 input.205 expr=add(@0,@1) #attn.183=(36,6,64,64)f32 #9580=(1,6,64,64)f32 #input.205=(36,6,64,64)f32 nn.Softmax pnnx_unique_196 1 1 input.205 9582 dim=-1 #input.205=(36,6,64,64)f32 #9582=(36,6,64,64)f32 nn.Dropout pnnx_unique_197 1 1 9582 9583 #9582=(36,6,64,64)f32 #9583=(36,6,64,64)f32 Tensor.select Tensor.select_784 3 1 qkv1.19 22855 22856 v.91 $input=qkv1.19 $dim=22855 $index=22856 #qkv1.19=(3,36,6,64,32)f32 #v.91=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10292 0 1 22866 expr=1 pnnx.Expression pnnx_expr_10291 0 1 22867 expr=2 torch.matmul torch.matmul_2293 2 1 9583 v.91 9584 $input=9583 $other=v.91 #9583=(36,6,64,64)f32 #v.91=(36,6,64,32)f32 #9584=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10290 1 1 x6.19 9586 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x6.19=(36,64,192)f32 torch.transpose torch.transpose_3066 3 1 9584 22866 22867 9585 $input=9584 $dim0=22866 $dim1=22867 #9584=(36,6,64,32)f32 #9585=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_523 2 1 9585 9586 input1.21 $input=9585 $shape=9586 #9585=(36,64,6,32)f32 #input1.21=(36,64,192)f32 nn.Linear pnnx_unique_198 1 1 input1.21 9588 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.21=(36,64,192)f32 #9588=(36,64,192)f32 nn.Dropout pnnx_unique_199 1 1 9588 9589 #9588=(36,64,192)f32 #9589=(36,64,192)f32 pnnx.Expression pnnx_expr_10286 1 1 9474 9590 expr=[-1,8,8,int(size(@0,2))] #9474=(1,2304,192)f32 pnnx.Expression pnnx_expr_10273 1 1 157 9596 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_10266 0 1 9598 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1472 2 1 9589 9590 windows.91 $input=9589 $shape=9590 #9589=(36,64,192)f32 #windows.91=(36,8,8,192)f32 Tensor.view Tensor.view_1473 2 1 windows.91 9596 x7.19 $input=windows.91 $shape=9596 #windows.91=(36,8,8,192)f32 #x7.19=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_10263 1 1 157 9601 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2707 2 1 x7.19 9598 9599 $input=x7.19 $dims=9598 #x7.19=(1,6,6,8,8,192)f32 #9599=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_136 1 1 9599 9600 memory_format=torch.contiguous_format $input=9599 #9599=(1,6,8,6,8,192)f32 #9600=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_10260 2 1 9474 157 9605 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #9474=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1474 2 1 9600 9601 x8.19 $input=9600 $shape=9601 #9600=(1,6,8,6,8,192)f32 #x8.19=(1,48,48,192)f32 Tensor.view Tensor.view_1475 2 1 x8.19 9605 x9.19 $input=x8.19 $shape=9605 #x8.19=(1,48,48,192)f32 #x9.19=(1,2304,192)f32 pnnx.Expression pnnx_expr_10258 2 1 9474 x9.19 input.207 expr=add(@0,@1) #9474=(1,2304,192)f32 #x9.19=(1,2304,192)f32 #input.207=(1,2304,192)f32 nn.LayerNorm pnnx_unique_200 1 1 input.207 9609 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.207=(1,2304,192)f32 #9609=(1,2304,192)f32 nn.Linear pnnx_unique_201 1 1 9609 9614 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #9609=(1,2304,192)f32 #9614=(1,2304,384)f32 nn.GELU pnnx_unique_202 1 1 9614 9615 #9614=(1,2304,384)f32 #9615=(1,2304,384)f32 nn.Dropout pnnx_unique_203 1 1 9615 9616 #9615=(1,2304,384)f32 #9616=(1,2304,384)f32 nn.Linear pnnx_unique_204 1 1 9616 9617 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #9616=(1,2304,384)f32 #9617=(1,2304,192)f32 nn.Dropout pnnx_unique_205 1 1 9617 9618 #9617=(1,2304,192)f32 #9618=(1,2304,192)f32 pnnx.Expression pnnx_expr_10256 2 1 input.207 9618 9620 expr=add(@0,@1) #input.207=(1,2304,192)f32 #9618=(1,2304,192)f32 #9620=(1,2304,192)f32 pnnx.Attribute pnnx_unique_206 0 1 attn_mask.47 @attn_mask=(36,64,64)f32 #attn_mask.47=(36,64,64)f32 nn.LayerNorm pnnx_unique_207 1 1 9620 9649 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #9620=(1,2304,192)f32 #9649=(1,2304,192)f32 pnnx.Expression pnnx_expr_10233 2 1 9620 157 9650 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #9620=(1,2304,192)f32 #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_10231 0 1 9652 expr=[-4,-4] pnnx.Expression pnnx_expr_10229 0 1 9653 expr=[1,2] Tensor.view Tensor.view_1476 2 1 9649 9650 x.93 $input=9649 $shape=9650 #9649=(1,2304,192)f32 #x.93=(1,48,48,192)f32 torch.roll torch.roll_2464 3 1 x.93 9652 9653 x6.21 $input=x.93 $shifts=9652 $dims=9653 #x.93=(1,48,48,192)f32 #x6.21=(1,48,48,192)f32 pnnx.Expression pnnx_expr_10207 1 1 x6.21 9670 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x6.21=(1,48,48,192)f32 pnnx.Expression pnnx_expr_10202 0 1 9672 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1477 2 1 x6.21 9670 x7.21 $input=x6.21 $shape=9670 #x6.21=(1,48,48,192)f32 #x7.21=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_10199 1 1 x6.21 9675 expr=[-1,8,8,int(size(@0,3))] #x6.21=(1,48,48,192)f32 torch.permute torch.permute_2708 2 1 x7.21 9672 9673 $input=x7.21 $dims=9672 #x7.21=(1,6,8,6,8,192)f32 #9673=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_137 1 1 9673 9674 memory_format=torch.contiguous_format $input=9673 #9673=(1,6,6,8,8,192)f32 #9674=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_10197 1 1 9620 9677 expr=[-1,64,int(size(@0,2))] #9620=(1,2304,192)f32 pnnx.Expression pnnx_expr_10187 0 1 9688 expr=-2 pnnx.Expression pnnx_expr_10186 0 1 9689 expr=-1 pnnx.Attribute pnnx_unique_208 0 1 relative_position_bias_table.93 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.93=(225,6)f32 pnnx.Attribute pnnx_unique_209 0 1 relative_position_index.93 @relative_position_index=(64,64)i64 #relative_position_index.93=(64,64)i64 Tensor.view Tensor.view_1478 2 1 9674 9675 x_windows.93 $input=9674 $shape=9675 #9674=(1,6,6,8,8,192)f32 #x_windows.93=(36,8,8,192)f32 Tensor.view Tensor.view_1479 2 1 x_windows.93 9677 x8.21 $input=x_windows.93 $shape=9677 #x_windows.93=(36,8,8,192)f32 #x8.21=(36,64,192)f32 nn.Linear pnnx_unique_210 1 1 x8.21 9713 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x8.21=(36,64,192)f32 #9713=(36,64,576)f32 pnnx.Expression pnnx_expr_10167 1 1 x8.21 9716 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x8.21=(36,64,192)f32 pnnx.Expression pnnx_expr_10162 0 1 9718 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_524 2 1 9713 9716 9717 $input=9713 $shape=9716 #9713=(36,64,576)f32 #9717=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_10161 0 1 22910 expr=0 pnnx.Expression pnnx_expr_10160 0 1 22911 expr=0 pnnx.Expression pnnx_expr_10159 0 1 22912 expr=0 pnnx.Expression pnnx_expr_10158 0 1 22913 expr=1 pnnx.Expression pnnx_expr_10157 0 1 22914 expr=0 pnnx.Expression pnnx_expr_10156 0 1 22915 expr=2 torch.permute torch.permute_2709 2 1 9717 9718 qkv1.21 $input=9717 $dims=9718 #9717=(36,64,3,6,32)f32 #qkv1.21=(3,36,6,64,32)f32 Tensor.select Tensor.select_785 3 1 qkv1.21 22910 22911 q.93 $input=qkv1.21 $dim=22910 $index=22911 #qkv1.21=(3,36,6,64,32)f32 #q.93=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10155 1 1 q.93 q1.21 expr=mul(@0,1.767767e-01) #q.93=(36,6,64,32)f32 #q1.21=(36,6,64,32)f32 Tensor.select Tensor.select_786 3 1 qkv1.21 22912 22913 k.93 $input=qkv1.21 $dim=22912 $index=22913 #qkv1.21=(3,36,6,64,32)f32 #k.93=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10153 0 1 9726 expr=[-1] Tensor.view Tensor.view_1480 2 1 relative_position_index.93 9726 9727 $input=relative_position_index.93 $shape=9726 #relative_position_index.93=(64,64)i64 #9727=(4096)i64 pnnx.Expression pnnx_expr_10152 1 1 9727 9728 expr=[@0] #9727=(4096)i64 pnnx.Expression pnnx_expr_10149 0 1 9730 expr=[64,64,-1] Tensor.index Tensor.index_371 2 1 relative_position_bias_table.93 9728 9729 $input=relative_position_bias_table.93 $expr=9728 #relative_position_bias_table.93=(225,6)f32 #9729=(4096,6)f32 pnnx.Expression pnnx_expr_10145 0 1 9732 expr=[2,0,1] Tensor.view Tensor.view_1481 2 1 9729 9730 relative_position_bias.93 $input=9729 $shape=9730 #9729=(4096,6)f32 #relative_position_bias.93=(64,64,6)f32 pnnx.Expression pnnx_expr_10144 0 1 22923 expr=0 torch.permute torch.permute_2710 2 1 relative_position_bias.93 9732 9733 $input=relative_position_bias.93 $dims=9732 #relative_position_bias.93=(64,64,6)f32 #9733=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_138 1 1 9733 relative_position_bias1.21 memory_format=torch.contiguous_format $input=9733 #9733=(6,64,64)f32 #relative_position_bias1.21=(6,64,64)f32 torch.transpose torch.transpose_3067 3 1 k.93 9688 9689 9724 $input=k.93 $dim0=9688 $dim1=9689 #k.93=(36,6,64,32)f32 #9724=(36,6,32,64)f32 torch.matmul torch.matmul_2294 2 1 q1.21 9724 attn.187 $input=q1.21 $other=9724 #q1.21=(36,6,64,32)f32 #9724=(36,6,32,64)f32 #attn.187=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3307 2 1 relative_position_bias1.21 22923 9735 $input=relative_position_bias1.21 $dim=22923 #relative_position_bias1.21=(6,64,64)f32 #9735=(1,6,64,64)f32 pnnx.Expression pnnx_expr_10142 2 1 attn.187 9735 attn2.11 expr=add(@0,@1) #attn.187=(36,6,64,64)f32 #9735=(1,6,64,64)f32 #attn2.11=(36,6,64,64)f32 pnnx.Expression pnnx_expr_10133 2 1 x8.21 attn_mask.47 9742 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x8.21=(36,64,192)f32 #attn_mask.47=(36,64,64)f32 pnnx.Expression pnnx_expr_10132 0 1 22928 expr=1 pnnx.Expression pnnx_expr_10131 0 1 22929 expr=0 Tensor.view Tensor.view_1482 2 1 attn2.11 9742 9743 $input=attn2.11 $shape=9742 #attn2.11=(36,6,64,64)f32 #9743=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3308 2 1 attn_mask.47 22928 9744 $input=attn_mask.47 $dim=22928 #attn_mask.47=(36,64,64)f32 #9744=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3309 2 1 9744 22929 9745 $input=9744 $dim=22929 #9744=(36,1,64,64)f32 #9745=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_10129 2 1 9743 9745 attn3.11 expr=add(@0,@1) #9743=(1,36,6,64,64)f32 #9745=(1,36,1,64,64)f32 #attn3.11=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_10126 1 1 x8.21 9747 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x8.21=(36,64,192)f32 Tensor.view Tensor.view_1483 2 1 attn3.11 9747 input.209 $input=attn3.11 $shape=9747 #attn3.11=(1,36,6,64,64)f32 #input.209=(36,6,64,64)f32 nn.Softmax pnnx_unique_211 1 1 input.209 9749 dim=-1 #input.209=(36,6,64,64)f32 #9749=(36,6,64,64)f32 nn.Dropout pnnx_unique_212 1 1 9749 9750 #9749=(36,6,64,64)f32 #9750=(36,6,64,64)f32 Tensor.select Tensor.select_787 3 1 qkv1.21 22914 22915 v.93 $input=qkv1.21 $dim=22914 $index=22915 #qkv1.21=(3,36,6,64,32)f32 #v.93=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10125 0 1 22933 expr=1 pnnx.Expression pnnx_expr_10124 0 1 22934 expr=2 torch.matmul torch.matmul_2295 2 1 9750 v.93 9751 $input=9750 $other=v.93 #9750=(36,6,64,64)f32 #v.93=(36,6,64,32)f32 #9751=(36,6,64,32)f32 pnnx.Expression pnnx_expr_10123 1 1 x8.21 9753 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x8.21=(36,64,192)f32 torch.transpose torch.transpose_3068 3 1 9751 22933 22934 9752 $input=9751 $dim0=22933 $dim1=22934 #9751=(36,6,64,32)f32 #9752=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_525 2 1 9752 9753 input1.23 $input=9752 $shape=9753 #9752=(36,64,6,32)f32 #input1.23=(36,64,192)f32 nn.Linear pnnx_unique_213 1 1 input1.23 9755 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.23=(36,64,192)f32 #9755=(36,64,192)f32 nn.Dropout pnnx_unique_214 1 1 9755 9756 #9755=(36,64,192)f32 #9756=(36,64,192)f32 pnnx.Expression pnnx_expr_10119 1 1 9620 9757 expr=[-1,8,8,int(size(@0,2))] #9620=(1,2304,192)f32 pnnx.Expression pnnx_expr_10106 1 1 157 9763 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_10099 0 1 9765 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1484 2 1 9756 9757 windows.93 $input=9756 $shape=9757 #9756=(36,64,192)f32 #windows.93=(36,8,8,192)f32 Tensor.view Tensor.view_1485 2 1 windows.93 9763 x9.21 $input=windows.93 $shape=9763 #windows.93=(36,8,8,192)f32 #x9.21=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_10096 1 1 157 9768 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2711 2 1 x9.21 9765 9766 $input=x9.21 $dims=9765 #x9.21=(1,6,6,8,8,192)f32 #9766=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_139 1 1 9766 9767 memory_format=torch.contiguous_format $input=9766 #9766=(1,6,8,6,8,192)f32 #9767=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_10093 0 1 9770 expr=[4,4] pnnx.Expression pnnx_expr_10090 0 1 9771 expr=[1,2] Tensor.view Tensor.view_1486 2 1 9767 9768 shifted_x.47 $input=9767 $shape=9768 #9767=(1,6,8,6,8,192)f32 #shifted_x.47=(1,48,48,192)f32 pnnx.Expression pnnx_expr_10087 2 1 9620 157 9775 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #9620=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.roll torch.roll_2465 3 1 shifted_x.47 9770 9771 x10.11 $input=shifted_x.47 $shifts=9770 $dims=9771 #shifted_x.47=(1,48,48,192)f32 #x10.11=(1,48,48,192)f32 Tensor.view Tensor.view_1487 2 1 x10.11 9775 x11.11 $input=x10.11 $shape=9775 #x10.11=(1,48,48,192)f32 #x11.11=(1,2304,192)f32 pnnx.Expression pnnx_expr_10085 2 1 9620 x11.11 input.211 expr=add(@0,@1) #9620=(1,2304,192)f32 #x11.11=(1,2304,192)f32 #input.211=(1,2304,192)f32 nn.LayerNorm pnnx_unique_215 1 1 input.211 9779 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.211=(1,2304,192)f32 #9779=(1,2304,192)f32 nn.Linear pnnx_unique_216 1 1 9779 9784 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #9779=(1,2304,192)f32 #9784=(1,2304,384)f32 nn.GELU pnnx_unique_217 1 1 9784 9785 #9784=(1,2304,384)f32 #9785=(1,2304,384)f32 nn.Dropout pnnx_unique_218 1 1 9785 9786 #9785=(1,2304,384)f32 #9786=(1,2304,384)f32 nn.Linear pnnx_unique_219 1 1 9786 9787 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #9786=(1,2304,384)f32 #9787=(1,2304,192)f32 nn.Dropout pnnx_unique_220 1 1 9787 9788 #9787=(1,2304,192)f32 #9788=(1,2304,192)f32 pnnx.Expression pnnx_expr_10083 2 1 input.211 9788 9790 expr=add(@0,@1) #input.211=(1,2304,192)f32 #9788=(1,2304,192)f32 #9790=(1,2304,192)f32 nn.LayerNorm pnnx_unique_221 1 1 9790 9817 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #9790=(1,2304,192)f32 #9817=(1,2304,192)f32 pnnx.Expression pnnx_expr_10061 2 1 9790 157 9818 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #9790=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1488 2 1 9817 9818 x.95 $input=9817 $shape=9818 #9817=(1,2304,192)f32 #x.95=(1,48,48,192)f32 pnnx.Expression pnnx_expr_10040 1 1 x.95 9835 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.95=(1,48,48,192)f32 pnnx.Expression pnnx_expr_10035 0 1 9837 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1489 2 1 x.95 9835 x5.49 $input=x.95 $shape=9835 #x.95=(1,48,48,192)f32 #x5.49=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_10032 1 1 x.95 9840 expr=[-1,8,8,int(size(@0,3))] #x.95=(1,48,48,192)f32 torch.permute torch.permute_2712 2 1 x5.49 9837 9838 $input=x5.49 $dims=9837 #x5.49=(1,6,8,6,8,192)f32 #9838=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_140 1 1 9838 9839 memory_format=torch.contiguous_format $input=9838 #9838=(1,6,6,8,8,192)f32 #9839=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_10030 1 1 9790 9842 expr=[-1,64,int(size(@0,2))] #9790=(1,2304,192)f32 pnnx.Expression pnnx_expr_10020 0 1 9853 expr=-2 pnnx.Expression pnnx_expr_10019 0 1 9854 expr=-1 pnnx.Attribute pnnx_unique_222 0 1 relative_position_bias_table.95 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.95=(225,6)f32 pnnx.Attribute pnnx_unique_223 0 1 relative_position_index.95 @relative_position_index=(64,64)i64 #relative_position_index.95=(64,64)i64 Tensor.view Tensor.view_1490 2 1 9839 9840 x_windows.95 $input=9839 $shape=9840 #9839=(1,6,6,8,8,192)f32 #x_windows.95=(36,8,8,192)f32 Tensor.view Tensor.view_1491 2 1 x_windows.95 9842 x6.23 $input=x_windows.95 $shape=9842 #x_windows.95=(36,8,8,192)f32 #x6.23=(36,64,192)f32 nn.Linear pnnx_unique_224 1 1 x6.23 9874 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x6.23=(36,64,192)f32 #9874=(36,64,576)f32 pnnx.Expression pnnx_expr_10004 1 1 x6.23 9877 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x6.23=(36,64,192)f32 pnnx.Expression pnnx_expr_9999 0 1 9879 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_526 2 1 9874 9877 9878 $input=9874 $shape=9877 #9874=(36,64,576)f32 #9878=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_9998 0 1 22978 expr=0 pnnx.Expression pnnx_expr_9997 0 1 22979 expr=0 pnnx.Expression pnnx_expr_9996 0 1 22980 expr=0 pnnx.Expression pnnx_expr_9995 0 1 22981 expr=1 pnnx.Expression pnnx_expr_9994 0 1 22982 expr=0 pnnx.Expression pnnx_expr_9993 0 1 22983 expr=2 torch.permute torch.permute_2713 2 1 9878 9879 qkv1.23 $input=9878 $dims=9879 #9878=(36,64,3,6,32)f32 #qkv1.23=(3,36,6,64,32)f32 Tensor.select Tensor.select_788 3 1 qkv1.23 22978 22979 q.95 $input=qkv1.23 $dim=22978 $index=22979 #qkv1.23=(3,36,6,64,32)f32 #q.95=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9992 1 1 q.95 q1.23 expr=mul(@0,1.767767e-01) #q.95=(36,6,64,32)f32 #q1.23=(36,6,64,32)f32 Tensor.select Tensor.select_789 3 1 qkv1.23 22980 22981 k.95 $input=qkv1.23 $dim=22980 $index=22981 #qkv1.23=(3,36,6,64,32)f32 #k.95=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9990 0 1 9887 expr=[-1] Tensor.view Tensor.view_1492 2 1 relative_position_index.95 9887 9888 $input=relative_position_index.95 $shape=9887 #relative_position_index.95=(64,64)i64 #9888=(4096)i64 pnnx.Expression pnnx_expr_9989 1 1 9888 9889 expr=[@0] #9888=(4096)i64 pnnx.Expression pnnx_expr_9986 0 1 9891 expr=[64,64,-1] Tensor.index Tensor.index_372 2 1 relative_position_bias_table.95 9889 9890 $input=relative_position_bias_table.95 $expr=9889 #relative_position_bias_table.95=(225,6)f32 #9890=(4096,6)f32 pnnx.Expression pnnx_expr_9982 0 1 9893 expr=[2,0,1] Tensor.view Tensor.view_1493 2 1 9890 9891 relative_position_bias.95 $input=9890 $shape=9891 #9890=(4096,6)f32 #relative_position_bias.95=(64,64,6)f32 pnnx.Expression pnnx_expr_9981 0 1 22991 expr=0 torch.permute torch.permute_2714 2 1 relative_position_bias.95 9893 9894 $input=relative_position_bias.95 $dims=9893 #relative_position_bias.95=(64,64,6)f32 #9894=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_141 1 1 9894 relative_position_bias1.23 memory_format=torch.contiguous_format $input=9894 #9894=(6,64,64)f32 #relative_position_bias1.23=(6,64,64)f32 torch.transpose torch.transpose_3069 3 1 k.95 9853 9854 9885 $input=k.95 $dim0=9853 $dim1=9854 #k.95=(36,6,64,32)f32 #9885=(36,6,32,64)f32 torch.matmul torch.matmul_2296 2 1 q1.23 9885 attn.191 $input=q1.23 $other=9885 #q1.23=(36,6,64,32)f32 #9885=(36,6,32,64)f32 #attn.191=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3310 2 1 relative_position_bias1.23 22991 9896 $input=relative_position_bias1.23 $dim=22991 #relative_position_bias1.23=(6,64,64)f32 #9896=(1,6,64,64)f32 pnnx.Expression pnnx_expr_9979 2 1 attn.191 9896 input.213 expr=add(@0,@1) #attn.191=(36,6,64,64)f32 #9896=(1,6,64,64)f32 #input.213=(36,6,64,64)f32 nn.Softmax pnnx_unique_225 1 1 input.213 9898 dim=-1 #input.213=(36,6,64,64)f32 #9898=(36,6,64,64)f32 nn.Dropout pnnx_unique_226 1 1 9898 9899 #9898=(36,6,64,64)f32 #9899=(36,6,64,64)f32 Tensor.select Tensor.select_790 3 1 qkv1.23 22982 22983 v.95 $input=qkv1.23 $dim=22982 $index=22983 #qkv1.23=(3,36,6,64,32)f32 #v.95=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9978 0 1 22993 expr=1 pnnx.Expression pnnx_expr_9977 0 1 22994 expr=2 torch.matmul torch.matmul_2297 2 1 9899 v.95 9900 $input=9899 $other=v.95 #9899=(36,6,64,64)f32 #v.95=(36,6,64,32)f32 #9900=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9976 1 1 x6.23 9902 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x6.23=(36,64,192)f32 torch.transpose torch.transpose_3070 3 1 9900 22993 22994 9901 $input=9900 $dim0=22993 $dim1=22994 #9900=(36,6,64,32)f32 #9901=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_527 2 1 9901 9902 input1.25 $input=9901 $shape=9902 #9901=(36,64,6,32)f32 #input1.25=(36,64,192)f32 nn.Linear pnnx_unique_227 1 1 input1.25 9904 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.25=(36,64,192)f32 #9904=(36,64,192)f32 nn.Dropout pnnx_unique_228 1 1 9904 9905 #9904=(36,64,192)f32 #9905=(36,64,192)f32 pnnx.Expression pnnx_expr_9972 1 1 9790 9906 expr=[-1,8,8,int(size(@0,2))] #9790=(1,2304,192)f32 pnnx.Expression pnnx_expr_9959 1 1 157 9912 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_9952 0 1 9914 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1494 2 1 9905 9906 windows.95 $input=9905 $shape=9906 #9905=(36,64,192)f32 #windows.95=(36,8,8,192)f32 Tensor.view Tensor.view_1495 2 1 windows.95 9912 x7.23 $input=windows.95 $shape=9912 #windows.95=(36,8,8,192)f32 #x7.23=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_9949 1 1 157 9917 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2715 2 1 x7.23 9914 9915 $input=x7.23 $dims=9914 #x7.23=(1,6,6,8,8,192)f32 #9915=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_142 1 1 9915 9916 memory_format=torch.contiguous_format $input=9915 #9915=(1,6,8,6,8,192)f32 #9916=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_9946 2 1 9790 157 9921 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #9790=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1496 2 1 9916 9917 x8.23 $input=9916 $shape=9917 #9916=(1,6,8,6,8,192)f32 #x8.23=(1,48,48,192)f32 Tensor.view Tensor.view_1497 2 1 x8.23 9921 x9.23 $input=x8.23 $shape=9921 #x8.23=(1,48,48,192)f32 #x9.23=(1,2304,192)f32 pnnx.Expression pnnx_expr_9944 2 1 9790 x9.23 input.215 expr=add(@0,@1) #9790=(1,2304,192)f32 #x9.23=(1,2304,192)f32 #input.215=(1,2304,192)f32 nn.LayerNorm pnnx_unique_229 1 1 input.215 9925 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.215=(1,2304,192)f32 #9925=(1,2304,192)f32 nn.Linear pnnx_unique_230 1 1 9925 9930 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #9925=(1,2304,192)f32 #9930=(1,2304,384)f32 nn.GELU pnnx_unique_231 1 1 9930 9931 #9930=(1,2304,384)f32 #9931=(1,2304,384)f32 nn.Dropout pnnx_unique_232 1 1 9931 9932 #9931=(1,2304,384)f32 #9932=(1,2304,384)f32 nn.Linear pnnx_unique_233 1 1 9932 9933 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #9932=(1,2304,384)f32 #9933=(1,2304,192)f32 nn.Dropout pnnx_unique_234 1 1 9933 9934 #9933=(1,2304,192)f32 #9934=(1,2304,192)f32 pnnx.Expression pnnx_expr_9942 2 1 input.215 9934 9936 expr=add(@0,@1) #input.215=(1,2304,192)f32 #9934=(1,2304,192)f32 #9936=(1,2304,192)f32 pnnx.Attribute pnnx_unique_235 0 1 attn_mask.49 @attn_mask=(36,64,64)f32 #attn_mask.49=(36,64,64)f32 nn.LayerNorm pnnx_unique_236 1 1 9936 9965 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #9936=(1,2304,192)f32 #9965=(1,2304,192)f32 pnnx.Expression pnnx_expr_9919 2 1 9936 157 9966 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #9936=(1,2304,192)f32 #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_9917 0 1 9968 expr=[-4,-4] pnnx.Expression pnnx_expr_9915 0 1 9969 expr=[1,2] Tensor.view Tensor.view_1498 2 1 9965 9966 x.97 $input=9965 $shape=9966 #9965=(1,2304,192)f32 #x.97=(1,48,48,192)f32 torch.roll torch.roll_2466 3 1 x.97 9968 9969 x6.25 $input=x.97 $shifts=9968 $dims=9969 #x.97=(1,48,48,192)f32 #x6.25=(1,48,48,192)f32 pnnx.Expression pnnx_expr_9893 1 1 x6.25 9986 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x6.25=(1,48,48,192)f32 pnnx.Expression pnnx_expr_9888 0 1 9988 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1499 2 1 x6.25 9986 x7.25 $input=x6.25 $shape=9986 #x6.25=(1,48,48,192)f32 #x7.25=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_9885 1 1 x6.25 9991 expr=[-1,8,8,int(size(@0,3))] #x6.25=(1,48,48,192)f32 torch.permute torch.permute_2716 2 1 x7.25 9988 9989 $input=x7.25 $dims=9988 #x7.25=(1,6,8,6,8,192)f32 #9989=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_143 1 1 9989 9990 memory_format=torch.contiguous_format $input=9989 #9989=(1,6,6,8,8,192)f32 #9990=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_9883 1 1 9936 9993 expr=[-1,64,int(size(@0,2))] #9936=(1,2304,192)f32 pnnx.Expression pnnx_expr_9873 0 1 10004 expr=-2 pnnx.Expression pnnx_expr_9872 0 1 10005 expr=-1 pnnx.Attribute pnnx_unique_237 0 1 relative_position_bias_table.97 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.97=(225,6)f32 pnnx.Attribute pnnx_unique_238 0 1 relative_position_index.97 @relative_position_index=(64,64)i64 #relative_position_index.97=(64,64)i64 Tensor.view Tensor.view_1500 2 1 9990 9991 x_windows.97 $input=9990 $shape=9991 #9990=(1,6,6,8,8,192)f32 #x_windows.97=(36,8,8,192)f32 Tensor.view Tensor.view_1501 2 1 x_windows.97 9993 x8.25 $input=x_windows.97 $shape=9993 #x_windows.97=(36,8,8,192)f32 #x8.25=(36,64,192)f32 nn.Linear pnnx_unique_239 1 1 x8.25 10029 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x8.25=(36,64,192)f32 #10029=(36,64,576)f32 pnnx.Expression pnnx_expr_9853 1 1 x8.25 10032 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x8.25=(36,64,192)f32 pnnx.Expression pnnx_expr_9848 0 1 10034 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_528 2 1 10029 10032 10033 $input=10029 $shape=10032 #10029=(36,64,576)f32 #10033=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_9847 0 1 23037 expr=0 pnnx.Expression pnnx_expr_9846 0 1 23038 expr=0 pnnx.Expression pnnx_expr_9845 0 1 23039 expr=0 pnnx.Expression pnnx_expr_9844 0 1 23040 expr=1 pnnx.Expression pnnx_expr_9843 0 1 23041 expr=0 pnnx.Expression pnnx_expr_9842 0 1 23042 expr=2 torch.permute torch.permute_2717 2 1 10033 10034 qkv1.25 $input=10033 $dims=10034 #10033=(36,64,3,6,32)f32 #qkv1.25=(3,36,6,64,32)f32 Tensor.select Tensor.select_791 3 1 qkv1.25 23037 23038 q.97 $input=qkv1.25 $dim=23037 $index=23038 #qkv1.25=(3,36,6,64,32)f32 #q.97=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9841 1 1 q.97 q1.25 expr=mul(@0,1.767767e-01) #q.97=(36,6,64,32)f32 #q1.25=(36,6,64,32)f32 Tensor.select Tensor.select_792 3 1 qkv1.25 23039 23040 k.97 $input=qkv1.25 $dim=23039 $index=23040 #qkv1.25=(3,36,6,64,32)f32 #k.97=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9839 0 1 10042 expr=[-1] Tensor.view Tensor.view_1502 2 1 relative_position_index.97 10042 10043 $input=relative_position_index.97 $shape=10042 #relative_position_index.97=(64,64)i64 #10043=(4096)i64 pnnx.Expression pnnx_expr_9838 1 1 10043 10044 expr=[@0] #10043=(4096)i64 pnnx.Expression pnnx_expr_9835 0 1 10046 expr=[64,64,-1] Tensor.index Tensor.index_373 2 1 relative_position_bias_table.97 10044 10045 $input=relative_position_bias_table.97 $expr=10044 #relative_position_bias_table.97=(225,6)f32 #10045=(4096,6)f32 pnnx.Expression pnnx_expr_9831 0 1 10048 expr=[2,0,1] Tensor.view Tensor.view_1503 2 1 10045 10046 relative_position_bias.97 $input=10045 $shape=10046 #10045=(4096,6)f32 #relative_position_bias.97=(64,64,6)f32 pnnx.Expression pnnx_expr_9830 0 1 23050 expr=0 torch.permute torch.permute_2718 2 1 relative_position_bias.97 10048 10049 $input=relative_position_bias.97 $dims=10048 #relative_position_bias.97=(64,64,6)f32 #10049=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_144 1 1 10049 relative_position_bias1.25 memory_format=torch.contiguous_format $input=10049 #10049=(6,64,64)f32 #relative_position_bias1.25=(6,64,64)f32 torch.transpose torch.transpose_3071 3 1 k.97 10004 10005 10040 $input=k.97 $dim0=10004 $dim1=10005 #k.97=(36,6,64,32)f32 #10040=(36,6,32,64)f32 torch.matmul torch.matmul_2298 2 1 q1.25 10040 attn.195 $input=q1.25 $other=10040 #q1.25=(36,6,64,32)f32 #10040=(36,6,32,64)f32 #attn.195=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3311 2 1 relative_position_bias1.25 23050 10051 $input=relative_position_bias1.25 $dim=23050 #relative_position_bias1.25=(6,64,64)f32 #10051=(1,6,64,64)f32 pnnx.Expression pnnx_expr_9828 2 1 attn.195 10051 attn2.13 expr=add(@0,@1) #attn.195=(36,6,64,64)f32 #10051=(1,6,64,64)f32 #attn2.13=(36,6,64,64)f32 pnnx.Expression pnnx_expr_9819 2 1 x8.25 attn_mask.49 10058 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x8.25=(36,64,192)f32 #attn_mask.49=(36,64,64)f32 pnnx.Expression pnnx_expr_9818 0 1 23055 expr=1 pnnx.Expression pnnx_expr_9817 0 1 23056 expr=0 Tensor.view Tensor.view_1504 2 1 attn2.13 10058 10059 $input=attn2.13 $shape=10058 #attn2.13=(36,6,64,64)f32 #10059=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3312 2 1 attn_mask.49 23055 10060 $input=attn_mask.49 $dim=23055 #attn_mask.49=(36,64,64)f32 #10060=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3313 2 1 10060 23056 10061 $input=10060 $dim=23056 #10060=(36,1,64,64)f32 #10061=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_9815 2 1 10059 10061 attn3.13 expr=add(@0,@1) #10059=(1,36,6,64,64)f32 #10061=(1,36,1,64,64)f32 #attn3.13=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_9812 1 1 x8.25 10063 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x8.25=(36,64,192)f32 Tensor.view Tensor.view_1505 2 1 attn3.13 10063 input.217 $input=attn3.13 $shape=10063 #attn3.13=(1,36,6,64,64)f32 #input.217=(36,6,64,64)f32 nn.Softmax pnnx_unique_240 1 1 input.217 10065 dim=-1 #input.217=(36,6,64,64)f32 #10065=(36,6,64,64)f32 nn.Dropout pnnx_unique_241 1 1 10065 10066 #10065=(36,6,64,64)f32 #10066=(36,6,64,64)f32 Tensor.select Tensor.select_793 3 1 qkv1.25 23041 23042 v.97 $input=qkv1.25 $dim=23041 $index=23042 #qkv1.25=(3,36,6,64,32)f32 #v.97=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9811 0 1 23060 expr=1 pnnx.Expression pnnx_expr_9810 0 1 23061 expr=2 torch.matmul torch.matmul_2299 2 1 10066 v.97 10067 $input=10066 $other=v.97 #10066=(36,6,64,64)f32 #v.97=(36,6,64,32)f32 #10067=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9809 1 1 x8.25 10069 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x8.25=(36,64,192)f32 torch.transpose torch.transpose_3072 3 1 10067 23060 23061 10068 $input=10067 $dim0=23060 $dim1=23061 #10067=(36,6,64,32)f32 #10068=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_529 2 1 10068 10069 input1.27 $input=10068 $shape=10069 #10068=(36,64,6,32)f32 #input1.27=(36,64,192)f32 nn.Linear pnnx_unique_242 1 1 input1.27 10071 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.27=(36,64,192)f32 #10071=(36,64,192)f32 nn.Dropout pnnx_unique_243 1 1 10071 10072 #10071=(36,64,192)f32 #10072=(36,64,192)f32 pnnx.Expression pnnx_expr_9805 1 1 9936 10073 expr=[-1,8,8,int(size(@0,2))] #9936=(1,2304,192)f32 pnnx.Expression pnnx_expr_9792 1 1 157 10079 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_9785 0 1 10081 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1506 2 1 10072 10073 windows.97 $input=10072 $shape=10073 #10072=(36,64,192)f32 #windows.97=(36,8,8,192)f32 Tensor.view Tensor.view_1507 2 1 windows.97 10079 x9.25 $input=windows.97 $shape=10079 #windows.97=(36,8,8,192)f32 #x9.25=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_9782 1 1 157 10084 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2719 2 1 x9.25 10081 10082 $input=x9.25 $dims=10081 #x9.25=(1,6,6,8,8,192)f32 #10082=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_145 1 1 10082 10083 memory_format=torch.contiguous_format $input=10082 #10082=(1,6,8,6,8,192)f32 #10083=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_9779 0 1 10086 expr=[4,4] pnnx.Expression pnnx_expr_9776 0 1 10087 expr=[1,2] Tensor.view Tensor.view_1508 2 1 10083 10084 shifted_x.49 $input=10083 $shape=10084 #10083=(1,6,8,6,8,192)f32 #shifted_x.49=(1,48,48,192)f32 pnnx.Expression pnnx_expr_9773 2 1 9936 157 10091 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #9936=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.roll torch.roll_2467 3 1 shifted_x.49 10086 10087 x10.13 $input=shifted_x.49 $shifts=10086 $dims=10087 #shifted_x.49=(1,48,48,192)f32 #x10.13=(1,48,48,192)f32 Tensor.view Tensor.view_1509 2 1 x10.13 10091 x11.13 $input=x10.13 $shape=10091 #x10.13=(1,48,48,192)f32 #x11.13=(1,2304,192)f32 pnnx.Expression pnnx_expr_9771 2 1 9936 x11.13 input.219 expr=add(@0,@1) #9936=(1,2304,192)f32 #x11.13=(1,2304,192)f32 #input.219=(1,2304,192)f32 nn.LayerNorm pnnx_unique_244 1 1 input.219 10095 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.219=(1,2304,192)f32 #10095=(1,2304,192)f32 nn.Linear pnnx_unique_245 1 1 10095 10100 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #10095=(1,2304,192)f32 #10100=(1,2304,384)f32 nn.GELU pnnx_unique_246 1 1 10100 10101 #10100=(1,2304,384)f32 #10101=(1,2304,384)f32 nn.Dropout pnnx_unique_247 1 1 10101 10102 #10101=(1,2304,384)f32 #10102=(1,2304,384)f32 nn.Linear pnnx_unique_248 1 1 10102 10103 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #10102=(1,2304,384)f32 #10103=(1,2304,192)f32 nn.Dropout pnnx_unique_249 1 1 10103 10104 #10103=(1,2304,192)f32 #10104=(1,2304,192)f32 pnnx.Expression pnnx_expr_9769 2 1 input.219 10104 10106 expr=add(@0,@1) #input.219=(1,2304,192)f32 #10104=(1,2304,192)f32 #10106=(1,2304,192)f32 pnnx.Expression pnnx_expr_9767 0 1 10108 expr=1 pnnx.Expression pnnx_expr_9766 0 1 10109 expr=2 pnnx.Expression pnnx_expr_9761 2 1 10106 157 10115 expr=[int(size(@0,0)),192,int(size(@1,2)),int(size(@1,3))] #10106=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.transpose torch.transpose_3073 3 1 10106 10108 10109 10114 $input=10106 $dim0=10108 $dim1=10109 #10106=(1,2304,192)f32 #10114=(1,192,2304)f32 Tensor.view Tensor.view_1510 2 1 10114 10115 input.221 $input=10114 $shape=10115 #10114=(1,192,2304)f32 #input.221=(1,192,48,48)f32 nn.Conv2d pnnx_unique_250 1 1 input.221 10117 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(192)f32 @weight=(192,192,3,3)f32 #input.221=(1,192,48,48)f32 #10117=(1,192,48,48)f32 pnnx.Expression pnnx_expr_9760 0 1 10118 expr=-1 pnnx.Expression pnnx_expr_9759 0 1 10119 expr=2 pnnx.Expression pnnx_expr_9758 0 1 10120 expr=1 pnnx.Expression pnnx_expr_9757 0 1 23088 expr=2 torch.flatten torch.flatten_2192 3 1 10117 10119 10118 10121 $input=10117 $start_dim=10119 $end_dim=10118 #10117=(1,192,48,48)f32 #10121=(1,192,2304)f32 torch.transpose torch.transpose_3074 3 1 10121 10120 23088 10122 $input=10121 $dim0=10120 $dim1=23088 #10121=(1,192,2304)f32 #10122=(1,2304,192)f32 pnnx.Expression pnnx_expr_9756 2 1 10122 9141 10123 expr=add(@0,@1) #10122=(1,2304,192)f32 #9141=(1,2304,192)f32 #10123=(1,2304,192)f32 nn.LayerNorm pnnx_unique_251 1 1 10123 10167 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #10123=(1,2304,192)f32 #10167=(1,2304,192)f32 pnnx.Expression pnnx_expr_9733 2 1 10123 157 10168 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #10123=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1511 2 1 10167 10168 x.99 $input=10167 $shape=10168 #10167=(1,2304,192)f32 #x.99=(1,48,48,192)f32 pnnx.Expression pnnx_expr_9712 1 1 x.99 10185 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.99=(1,48,48,192)f32 pnnx.Expression pnnx_expr_9707 0 1 10187 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1512 2 1 x.99 10185 x5.51 $input=x.99 $shape=10185 #x.99=(1,48,48,192)f32 #x5.51=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_9704 1 1 x.99 10190 expr=[-1,8,8,int(size(@0,3))] #x.99=(1,48,48,192)f32 torch.permute torch.permute_2720 2 1 x5.51 10187 10188 $input=x5.51 $dims=10187 #x5.51=(1,6,8,6,8,192)f32 #10188=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_146 1 1 10188 10189 memory_format=torch.contiguous_format $input=10188 #10188=(1,6,6,8,8,192)f32 #10189=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_9702 1 1 10123 10192 expr=[-1,64,int(size(@0,2))] #10123=(1,2304,192)f32 pnnx.Expression pnnx_expr_9692 0 1 10203 expr=-2 pnnx.Expression pnnx_expr_9691 0 1 10204 expr=-1 pnnx.Attribute pnnx_unique_252 0 1 relative_position_bias_table.99 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.99=(225,6)f32 pnnx.Attribute pnnx_unique_253 0 1 relative_position_index.99 @relative_position_index=(64,64)i64 #relative_position_index.99=(64,64)i64 Tensor.view Tensor.view_1513 2 1 10189 10190 x_windows.99 $input=10189 $shape=10190 #10189=(1,6,6,8,8,192)f32 #x_windows.99=(36,8,8,192)f32 Tensor.view Tensor.view_1514 2 1 x_windows.99 10192 x6.27 $input=x_windows.99 $shape=10192 #x_windows.99=(36,8,8,192)f32 #x6.27=(36,64,192)f32 nn.Linear pnnx_unique_254 1 1 x6.27 10224 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x6.27=(36,64,192)f32 #10224=(36,64,576)f32 pnnx.Expression pnnx_expr_9676 1 1 x6.27 10227 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x6.27=(36,64,192)f32 pnnx.Expression pnnx_expr_9671 0 1 10229 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_530 2 1 10224 10227 10228 $input=10224 $shape=10227 #10224=(36,64,576)f32 #10228=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_9670 0 1 23106 expr=0 pnnx.Expression pnnx_expr_9669 0 1 23107 expr=0 pnnx.Expression pnnx_expr_9668 0 1 23108 expr=0 pnnx.Expression pnnx_expr_9667 0 1 23109 expr=1 pnnx.Expression pnnx_expr_9666 0 1 23110 expr=0 pnnx.Expression pnnx_expr_9665 0 1 23111 expr=2 torch.permute torch.permute_2721 2 1 10228 10229 qkv1.27 $input=10228 $dims=10229 #10228=(36,64,3,6,32)f32 #qkv1.27=(3,36,6,64,32)f32 Tensor.select Tensor.select_794 3 1 qkv1.27 23106 23107 q.99 $input=qkv1.27 $dim=23106 $index=23107 #qkv1.27=(3,36,6,64,32)f32 #q.99=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9664 1 1 q.99 q1.27 expr=mul(@0,1.767767e-01) #q.99=(36,6,64,32)f32 #q1.27=(36,6,64,32)f32 Tensor.select Tensor.select_795 3 1 qkv1.27 23108 23109 k.99 $input=qkv1.27 $dim=23108 $index=23109 #qkv1.27=(3,36,6,64,32)f32 #k.99=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9662 0 1 10237 expr=[-1] Tensor.view Tensor.view_1515 2 1 relative_position_index.99 10237 10238 $input=relative_position_index.99 $shape=10237 #relative_position_index.99=(64,64)i64 #10238=(4096)i64 pnnx.Expression pnnx_expr_9661 1 1 10238 10239 expr=[@0] #10238=(4096)i64 pnnx.Expression pnnx_expr_9658 0 1 10241 expr=[64,64,-1] Tensor.index Tensor.index_374 2 1 relative_position_bias_table.99 10239 10240 $input=relative_position_bias_table.99 $expr=10239 #relative_position_bias_table.99=(225,6)f32 #10240=(4096,6)f32 pnnx.Expression pnnx_expr_9654 0 1 10243 expr=[2,0,1] Tensor.view Tensor.view_1516 2 1 10240 10241 relative_position_bias.99 $input=10240 $shape=10241 #10240=(4096,6)f32 #relative_position_bias.99=(64,64,6)f32 pnnx.Expression pnnx_expr_9653 0 1 23119 expr=0 torch.permute torch.permute_2722 2 1 relative_position_bias.99 10243 10244 $input=relative_position_bias.99 $dims=10243 #relative_position_bias.99=(64,64,6)f32 #10244=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_147 1 1 10244 relative_position_bias1.27 memory_format=torch.contiguous_format $input=10244 #10244=(6,64,64)f32 #relative_position_bias1.27=(6,64,64)f32 torch.transpose torch.transpose_3075 3 1 k.99 10203 10204 10235 $input=k.99 $dim0=10203 $dim1=10204 #k.99=(36,6,64,32)f32 #10235=(36,6,32,64)f32 torch.matmul torch.matmul_2300 2 1 q1.27 10235 attn.199 $input=q1.27 $other=10235 #q1.27=(36,6,64,32)f32 #10235=(36,6,32,64)f32 #attn.199=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3314 2 1 relative_position_bias1.27 23119 10246 $input=relative_position_bias1.27 $dim=23119 #relative_position_bias1.27=(6,64,64)f32 #10246=(1,6,64,64)f32 pnnx.Expression pnnx_expr_9651 2 1 attn.199 10246 input.223 expr=add(@0,@1) #attn.199=(36,6,64,64)f32 #10246=(1,6,64,64)f32 #input.223=(36,6,64,64)f32 nn.Softmax pnnx_unique_255 1 1 input.223 10248 dim=-1 #input.223=(36,6,64,64)f32 #10248=(36,6,64,64)f32 nn.Dropout pnnx_unique_256 1 1 10248 10249 #10248=(36,6,64,64)f32 #10249=(36,6,64,64)f32 Tensor.select Tensor.select_796 3 1 qkv1.27 23110 23111 v.99 $input=qkv1.27 $dim=23110 $index=23111 #qkv1.27=(3,36,6,64,32)f32 #v.99=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9650 0 1 23121 expr=1 pnnx.Expression pnnx_expr_9649 0 1 23122 expr=2 torch.matmul torch.matmul_2301 2 1 10249 v.99 10250 $input=10249 $other=v.99 #10249=(36,6,64,64)f32 #v.99=(36,6,64,32)f32 #10250=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9648 1 1 x6.27 10252 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x6.27=(36,64,192)f32 torch.transpose torch.transpose_3076 3 1 10250 23121 23122 10251 $input=10250 $dim0=23121 $dim1=23122 #10250=(36,6,64,32)f32 #10251=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_531 2 1 10251 10252 input1.29 $input=10251 $shape=10252 #10251=(36,64,6,32)f32 #input1.29=(36,64,192)f32 nn.Linear pnnx_unique_257 1 1 input1.29 10254 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.29=(36,64,192)f32 #10254=(36,64,192)f32 nn.Dropout pnnx_unique_258 1 1 10254 10255 #10254=(36,64,192)f32 #10255=(36,64,192)f32 pnnx.Expression pnnx_expr_9644 1 1 10123 10256 expr=[-1,8,8,int(size(@0,2))] #10123=(1,2304,192)f32 pnnx.Expression pnnx_expr_9631 1 1 157 10262 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_9624 0 1 10264 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1517 2 1 10255 10256 windows.99 $input=10255 $shape=10256 #10255=(36,64,192)f32 #windows.99=(36,8,8,192)f32 Tensor.view Tensor.view_1518 2 1 windows.99 10262 x7.27 $input=windows.99 $shape=10262 #windows.99=(36,8,8,192)f32 #x7.27=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_9621 1 1 157 10267 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2723 2 1 x7.27 10264 10265 $input=x7.27 $dims=10264 #x7.27=(1,6,6,8,8,192)f32 #10265=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_148 1 1 10265 10266 memory_format=torch.contiguous_format $input=10265 #10265=(1,6,8,6,8,192)f32 #10266=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_9618 2 1 10123 157 10271 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #10123=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1519 2 1 10266 10267 x8.27 $input=10266 $shape=10267 #10266=(1,6,8,6,8,192)f32 #x8.27=(1,48,48,192)f32 Tensor.view Tensor.view_1520 2 1 x8.27 10271 x9.27 $input=x8.27 $shape=10271 #x8.27=(1,48,48,192)f32 #x9.27=(1,2304,192)f32 pnnx.Expression pnnx_expr_9616 2 1 10123 x9.27 input.225 expr=add(@0,@1) #10123=(1,2304,192)f32 #x9.27=(1,2304,192)f32 #input.225=(1,2304,192)f32 nn.LayerNorm pnnx_unique_259 1 1 input.225 10275 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.225=(1,2304,192)f32 #10275=(1,2304,192)f32 nn.Linear pnnx_unique_260 1 1 10275 10280 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #10275=(1,2304,192)f32 #10280=(1,2304,384)f32 nn.GELU pnnx_unique_261 1 1 10280 10281 #10280=(1,2304,384)f32 #10281=(1,2304,384)f32 nn.Dropout pnnx_unique_262 1 1 10281 10282 #10281=(1,2304,384)f32 #10282=(1,2304,384)f32 nn.Linear pnnx_unique_263 1 1 10282 10283 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #10282=(1,2304,384)f32 #10283=(1,2304,192)f32 nn.Dropout pnnx_unique_264 1 1 10283 10284 #10283=(1,2304,192)f32 #10284=(1,2304,192)f32 pnnx.Expression pnnx_expr_9614 2 1 input.225 10284 10286 expr=add(@0,@1) #input.225=(1,2304,192)f32 #10284=(1,2304,192)f32 #10286=(1,2304,192)f32 pnnx.Attribute pnnx_unique_265 0 1 attn_mask.51 @attn_mask=(36,64,64)f32 #attn_mask.51=(36,64,64)f32 nn.LayerNorm pnnx_unique_266 1 1 10286 10315 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #10286=(1,2304,192)f32 #10315=(1,2304,192)f32 pnnx.Expression pnnx_expr_9591 2 1 10286 157 10316 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #10286=(1,2304,192)f32 #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_9589 0 1 10318 expr=[-4,-4] pnnx.Expression pnnx_expr_9587 0 1 10319 expr=[1,2] Tensor.view Tensor.view_1521 2 1 10315 10316 x.101 $input=10315 $shape=10316 #10315=(1,2304,192)f32 #x.101=(1,48,48,192)f32 torch.roll torch.roll_2468 3 1 x.101 10318 10319 x6.29 $input=x.101 $shifts=10318 $dims=10319 #x.101=(1,48,48,192)f32 #x6.29=(1,48,48,192)f32 pnnx.Expression pnnx_expr_9565 1 1 x6.29 10336 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x6.29=(1,48,48,192)f32 pnnx.Expression pnnx_expr_9560 0 1 10338 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1522 2 1 x6.29 10336 x7.29 $input=x6.29 $shape=10336 #x6.29=(1,48,48,192)f32 #x7.29=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_9557 1 1 x6.29 10341 expr=[-1,8,8,int(size(@0,3))] #x6.29=(1,48,48,192)f32 torch.permute torch.permute_2724 2 1 x7.29 10338 10339 $input=x7.29 $dims=10338 #x7.29=(1,6,8,6,8,192)f32 #10339=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_149 1 1 10339 10340 memory_format=torch.contiguous_format $input=10339 #10339=(1,6,6,8,8,192)f32 #10340=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_9555 1 1 10286 10343 expr=[-1,64,int(size(@0,2))] #10286=(1,2304,192)f32 pnnx.Expression pnnx_expr_9545 0 1 10354 expr=-2 pnnx.Expression pnnx_expr_9544 0 1 10355 expr=-1 pnnx.Attribute pnnx_unique_267 0 1 relative_position_bias_table.101 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.101=(225,6)f32 pnnx.Attribute pnnx_unique_268 0 1 relative_position_index.101 @relative_position_index=(64,64)i64 #relative_position_index.101=(64,64)i64 Tensor.view Tensor.view_1523 2 1 10340 10341 x_windows.101 $input=10340 $shape=10341 #10340=(1,6,6,8,8,192)f32 #x_windows.101=(36,8,8,192)f32 Tensor.view Tensor.view_1524 2 1 x_windows.101 10343 x8.29 $input=x_windows.101 $shape=10343 #x_windows.101=(36,8,8,192)f32 #x8.29=(36,64,192)f32 nn.Linear pnnx_unique_269 1 1 x8.29 10379 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x8.29=(36,64,192)f32 #10379=(36,64,576)f32 pnnx.Expression pnnx_expr_9525 1 1 x8.29 10382 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x8.29=(36,64,192)f32 pnnx.Expression pnnx_expr_9520 0 1 10384 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_532 2 1 10379 10382 10383 $input=10379 $shape=10382 #10379=(36,64,576)f32 #10383=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_9519 0 1 23165 expr=0 pnnx.Expression pnnx_expr_9518 0 1 23166 expr=0 pnnx.Expression pnnx_expr_9517 0 1 23167 expr=0 pnnx.Expression pnnx_expr_9516 0 1 23168 expr=1 pnnx.Expression pnnx_expr_9515 0 1 23169 expr=0 pnnx.Expression pnnx_expr_9514 0 1 23170 expr=2 torch.permute torch.permute_2725 2 1 10383 10384 qkv1.29 $input=10383 $dims=10384 #10383=(36,64,3,6,32)f32 #qkv1.29=(3,36,6,64,32)f32 Tensor.select Tensor.select_797 3 1 qkv1.29 23165 23166 q.101 $input=qkv1.29 $dim=23165 $index=23166 #qkv1.29=(3,36,6,64,32)f32 #q.101=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9513 1 1 q.101 q1.29 expr=mul(@0,1.767767e-01) #q.101=(36,6,64,32)f32 #q1.29=(36,6,64,32)f32 Tensor.select Tensor.select_798 3 1 qkv1.29 23167 23168 k.101 $input=qkv1.29 $dim=23167 $index=23168 #qkv1.29=(3,36,6,64,32)f32 #k.101=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9511 0 1 10392 expr=[-1] Tensor.view Tensor.view_1525 2 1 relative_position_index.101 10392 10393 $input=relative_position_index.101 $shape=10392 #relative_position_index.101=(64,64)i64 #10393=(4096)i64 pnnx.Expression pnnx_expr_9510 1 1 10393 10394 expr=[@0] #10393=(4096)i64 pnnx.Expression pnnx_expr_9507 0 1 10396 expr=[64,64,-1] Tensor.index Tensor.index_375 2 1 relative_position_bias_table.101 10394 10395 $input=relative_position_bias_table.101 $expr=10394 #relative_position_bias_table.101=(225,6)f32 #10395=(4096,6)f32 pnnx.Expression pnnx_expr_9503 0 1 10398 expr=[2,0,1] Tensor.view Tensor.view_1526 2 1 10395 10396 relative_position_bias.101 $input=10395 $shape=10396 #10395=(4096,6)f32 #relative_position_bias.101=(64,64,6)f32 pnnx.Expression pnnx_expr_9502 0 1 23178 expr=0 torch.permute torch.permute_2726 2 1 relative_position_bias.101 10398 10399 $input=relative_position_bias.101 $dims=10398 #relative_position_bias.101=(64,64,6)f32 #10399=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_150 1 1 10399 relative_position_bias1.29 memory_format=torch.contiguous_format $input=10399 #10399=(6,64,64)f32 #relative_position_bias1.29=(6,64,64)f32 torch.transpose torch.transpose_3077 3 1 k.101 10354 10355 10390 $input=k.101 $dim0=10354 $dim1=10355 #k.101=(36,6,64,32)f32 #10390=(36,6,32,64)f32 torch.matmul torch.matmul_2302 2 1 q1.29 10390 attn.203 $input=q1.29 $other=10390 #q1.29=(36,6,64,32)f32 #10390=(36,6,32,64)f32 #attn.203=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3315 2 1 relative_position_bias1.29 23178 10401 $input=relative_position_bias1.29 $dim=23178 #relative_position_bias1.29=(6,64,64)f32 #10401=(1,6,64,64)f32 pnnx.Expression pnnx_expr_9500 2 1 attn.203 10401 attn2.15 expr=add(@0,@1) #attn.203=(36,6,64,64)f32 #10401=(1,6,64,64)f32 #attn2.15=(36,6,64,64)f32 pnnx.Expression pnnx_expr_9491 2 1 x8.29 attn_mask.51 10408 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x8.29=(36,64,192)f32 #attn_mask.51=(36,64,64)f32 pnnx.Expression pnnx_expr_9490 0 1 23183 expr=1 pnnx.Expression pnnx_expr_9489 0 1 23184 expr=0 Tensor.view Tensor.view_1527 2 1 attn2.15 10408 10409 $input=attn2.15 $shape=10408 #attn2.15=(36,6,64,64)f32 #10409=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3316 2 1 attn_mask.51 23183 10410 $input=attn_mask.51 $dim=23183 #attn_mask.51=(36,64,64)f32 #10410=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3317 2 1 10410 23184 10411 $input=10410 $dim=23184 #10410=(36,1,64,64)f32 #10411=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_9487 2 1 10409 10411 attn3.15 expr=add(@0,@1) #10409=(1,36,6,64,64)f32 #10411=(1,36,1,64,64)f32 #attn3.15=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_9484 1 1 x8.29 10413 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x8.29=(36,64,192)f32 Tensor.view Tensor.view_1528 2 1 attn3.15 10413 input.227 $input=attn3.15 $shape=10413 #attn3.15=(1,36,6,64,64)f32 #input.227=(36,6,64,64)f32 nn.Softmax pnnx_unique_270 1 1 input.227 10415 dim=-1 #input.227=(36,6,64,64)f32 #10415=(36,6,64,64)f32 nn.Dropout pnnx_unique_271 1 1 10415 10416 #10415=(36,6,64,64)f32 #10416=(36,6,64,64)f32 Tensor.select Tensor.select_799 3 1 qkv1.29 23169 23170 v.101 $input=qkv1.29 $dim=23169 $index=23170 #qkv1.29=(3,36,6,64,32)f32 #v.101=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9483 0 1 23188 expr=1 pnnx.Expression pnnx_expr_9482 0 1 23189 expr=2 torch.matmul torch.matmul_2303 2 1 10416 v.101 10417 $input=10416 $other=v.101 #10416=(36,6,64,64)f32 #v.101=(36,6,64,32)f32 #10417=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9481 1 1 x8.29 10419 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x8.29=(36,64,192)f32 torch.transpose torch.transpose_3078 3 1 10417 23188 23189 10418 $input=10417 $dim0=23188 $dim1=23189 #10417=(36,6,64,32)f32 #10418=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_533 2 1 10418 10419 input1.31 $input=10418 $shape=10419 #10418=(36,64,6,32)f32 #input1.31=(36,64,192)f32 nn.Linear pnnx_unique_272 1 1 input1.31 10421 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.31=(36,64,192)f32 #10421=(36,64,192)f32 nn.Dropout pnnx_unique_273 1 1 10421 10422 #10421=(36,64,192)f32 #10422=(36,64,192)f32 pnnx.Expression pnnx_expr_9477 1 1 10286 10423 expr=[-1,8,8,int(size(@0,2))] #10286=(1,2304,192)f32 pnnx.Expression pnnx_expr_9464 1 1 157 10429 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_9457 0 1 10431 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1529 2 1 10422 10423 windows.101 $input=10422 $shape=10423 #10422=(36,64,192)f32 #windows.101=(36,8,8,192)f32 Tensor.view Tensor.view_1530 2 1 windows.101 10429 x9.29 $input=windows.101 $shape=10429 #windows.101=(36,8,8,192)f32 #x9.29=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_9454 1 1 157 10434 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2727 2 1 x9.29 10431 10432 $input=x9.29 $dims=10431 #x9.29=(1,6,6,8,8,192)f32 #10432=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_151 1 1 10432 10433 memory_format=torch.contiguous_format $input=10432 #10432=(1,6,8,6,8,192)f32 #10433=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_9451 0 1 10436 expr=[4,4] pnnx.Expression pnnx_expr_9448 0 1 10437 expr=[1,2] Tensor.view Tensor.view_1531 2 1 10433 10434 shifted_x.51 $input=10433 $shape=10434 #10433=(1,6,8,6,8,192)f32 #shifted_x.51=(1,48,48,192)f32 pnnx.Expression pnnx_expr_9445 2 1 10286 157 10441 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #10286=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.roll torch.roll_2469 3 1 shifted_x.51 10436 10437 x10.15 $input=shifted_x.51 $shifts=10436 $dims=10437 #shifted_x.51=(1,48,48,192)f32 #x10.15=(1,48,48,192)f32 Tensor.view Tensor.view_1532 2 1 x10.15 10441 x11.15 $input=x10.15 $shape=10441 #x10.15=(1,48,48,192)f32 #x11.15=(1,2304,192)f32 pnnx.Expression pnnx_expr_9443 2 1 10286 x11.15 input.229 expr=add(@0,@1) #10286=(1,2304,192)f32 #x11.15=(1,2304,192)f32 #input.229=(1,2304,192)f32 nn.LayerNorm pnnx_unique_274 1 1 input.229 10445 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.229=(1,2304,192)f32 #10445=(1,2304,192)f32 nn.Linear pnnx_unique_275 1 1 10445 10450 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #10445=(1,2304,192)f32 #10450=(1,2304,384)f32 nn.GELU pnnx_unique_276 1 1 10450 10451 #10450=(1,2304,384)f32 #10451=(1,2304,384)f32 nn.Dropout pnnx_unique_277 1 1 10451 10452 #10451=(1,2304,384)f32 #10452=(1,2304,384)f32 nn.Linear pnnx_unique_278 1 1 10452 10453 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #10452=(1,2304,384)f32 #10453=(1,2304,192)f32 nn.Dropout pnnx_unique_279 1 1 10453 10454 #10453=(1,2304,192)f32 #10454=(1,2304,192)f32 pnnx.Expression pnnx_expr_9441 2 1 input.229 10454 10456 expr=add(@0,@1) #input.229=(1,2304,192)f32 #10454=(1,2304,192)f32 #10456=(1,2304,192)f32 nn.LayerNorm pnnx_unique_280 1 1 10456 10483 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #10456=(1,2304,192)f32 #10483=(1,2304,192)f32 pnnx.Expression pnnx_expr_9419 2 1 10456 157 10484 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #10456=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1533 2 1 10483 10484 x.103 $input=10483 $shape=10484 #10483=(1,2304,192)f32 #x.103=(1,48,48,192)f32 pnnx.Expression pnnx_expr_9398 1 1 x.103 10501 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.103=(1,48,48,192)f32 pnnx.Expression pnnx_expr_9393 0 1 10503 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1534 2 1 x.103 10501 x5.53 $input=x.103 $shape=10501 #x.103=(1,48,48,192)f32 #x5.53=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_9390 1 1 x.103 10506 expr=[-1,8,8,int(size(@0,3))] #x.103=(1,48,48,192)f32 torch.permute torch.permute_2728 2 1 x5.53 10503 10504 $input=x5.53 $dims=10503 #x5.53=(1,6,8,6,8,192)f32 #10504=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_152 1 1 10504 10505 memory_format=torch.contiguous_format $input=10504 #10504=(1,6,6,8,8,192)f32 #10505=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_9388 1 1 10456 10508 expr=[-1,64,int(size(@0,2))] #10456=(1,2304,192)f32 pnnx.Expression pnnx_expr_9378 0 1 10519 expr=-2 pnnx.Expression pnnx_expr_9377 0 1 10520 expr=-1 pnnx.Attribute pnnx_unique_281 0 1 relative_position_bias_table.103 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.103=(225,6)f32 pnnx.Attribute pnnx_unique_282 0 1 relative_position_index.103 @relative_position_index=(64,64)i64 #relative_position_index.103=(64,64)i64 Tensor.view Tensor.view_1535 2 1 10505 10506 x_windows.103 $input=10505 $shape=10506 #10505=(1,6,6,8,8,192)f32 #x_windows.103=(36,8,8,192)f32 Tensor.view Tensor.view_1536 2 1 x_windows.103 10508 x6.31 $input=x_windows.103 $shape=10508 #x_windows.103=(36,8,8,192)f32 #x6.31=(36,64,192)f32 nn.Linear pnnx_unique_283 1 1 x6.31 10540 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x6.31=(36,64,192)f32 #10540=(36,64,576)f32 pnnx.Expression pnnx_expr_9362 1 1 x6.31 10543 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x6.31=(36,64,192)f32 pnnx.Expression pnnx_expr_9357 0 1 10545 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_534 2 1 10540 10543 10544 $input=10540 $shape=10543 #10540=(36,64,576)f32 #10544=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_9356 0 1 23233 expr=0 pnnx.Expression pnnx_expr_9355 0 1 23234 expr=0 pnnx.Expression pnnx_expr_9354 0 1 23235 expr=0 pnnx.Expression pnnx_expr_9353 0 1 23236 expr=1 pnnx.Expression pnnx_expr_9352 0 1 23237 expr=0 pnnx.Expression pnnx_expr_9351 0 1 23238 expr=2 torch.permute torch.permute_2729 2 1 10544 10545 qkv1.31 $input=10544 $dims=10545 #10544=(36,64,3,6,32)f32 #qkv1.31=(3,36,6,64,32)f32 Tensor.select Tensor.select_800 3 1 qkv1.31 23233 23234 q.103 $input=qkv1.31 $dim=23233 $index=23234 #qkv1.31=(3,36,6,64,32)f32 #q.103=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9350 1 1 q.103 q1.31 expr=mul(@0,1.767767e-01) #q.103=(36,6,64,32)f32 #q1.31=(36,6,64,32)f32 Tensor.select Tensor.select_801 3 1 qkv1.31 23235 23236 k.103 $input=qkv1.31 $dim=23235 $index=23236 #qkv1.31=(3,36,6,64,32)f32 #k.103=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9348 0 1 10553 expr=[-1] Tensor.view Tensor.view_1537 2 1 relative_position_index.103 10553 10554 $input=relative_position_index.103 $shape=10553 #relative_position_index.103=(64,64)i64 #10554=(4096)i64 pnnx.Expression pnnx_expr_9347 1 1 10554 10555 expr=[@0] #10554=(4096)i64 pnnx.Expression pnnx_expr_9344 0 1 10557 expr=[64,64,-1] Tensor.index Tensor.index_376 2 1 relative_position_bias_table.103 10555 10556 $input=relative_position_bias_table.103 $expr=10555 #relative_position_bias_table.103=(225,6)f32 #10556=(4096,6)f32 pnnx.Expression pnnx_expr_9340 0 1 10559 expr=[2,0,1] Tensor.view Tensor.view_1538 2 1 10556 10557 relative_position_bias.103 $input=10556 $shape=10557 #10556=(4096,6)f32 #relative_position_bias.103=(64,64,6)f32 pnnx.Expression pnnx_expr_9339 0 1 23246 expr=0 torch.permute torch.permute_2730 2 1 relative_position_bias.103 10559 10560 $input=relative_position_bias.103 $dims=10559 #relative_position_bias.103=(64,64,6)f32 #10560=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_153 1 1 10560 relative_position_bias1.31 memory_format=torch.contiguous_format $input=10560 #10560=(6,64,64)f32 #relative_position_bias1.31=(6,64,64)f32 torch.transpose torch.transpose_3079 3 1 k.103 10519 10520 10551 $input=k.103 $dim0=10519 $dim1=10520 #k.103=(36,6,64,32)f32 #10551=(36,6,32,64)f32 torch.matmul torch.matmul_2304 2 1 q1.31 10551 attn.207 $input=q1.31 $other=10551 #q1.31=(36,6,64,32)f32 #10551=(36,6,32,64)f32 #attn.207=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3318 2 1 relative_position_bias1.31 23246 10562 $input=relative_position_bias1.31 $dim=23246 #relative_position_bias1.31=(6,64,64)f32 #10562=(1,6,64,64)f32 pnnx.Expression pnnx_expr_9337 2 1 attn.207 10562 input.231 expr=add(@0,@1) #attn.207=(36,6,64,64)f32 #10562=(1,6,64,64)f32 #input.231=(36,6,64,64)f32 nn.Softmax pnnx_unique_284 1 1 input.231 10564 dim=-1 #input.231=(36,6,64,64)f32 #10564=(36,6,64,64)f32 nn.Dropout pnnx_unique_285 1 1 10564 10565 #10564=(36,6,64,64)f32 #10565=(36,6,64,64)f32 Tensor.select Tensor.select_802 3 1 qkv1.31 23237 23238 v.103 $input=qkv1.31 $dim=23237 $index=23238 #qkv1.31=(3,36,6,64,32)f32 #v.103=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9336 0 1 23248 expr=1 pnnx.Expression pnnx_expr_9335 0 1 23249 expr=2 torch.matmul torch.matmul_2305 2 1 10565 v.103 10566 $input=10565 $other=v.103 #10565=(36,6,64,64)f32 #v.103=(36,6,64,32)f32 #10566=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9334 1 1 x6.31 10568 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x6.31=(36,64,192)f32 torch.transpose torch.transpose_3080 3 1 10566 23248 23249 10567 $input=10566 $dim0=23248 $dim1=23249 #10566=(36,6,64,32)f32 #10567=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_535 2 1 10567 10568 input1.33 $input=10567 $shape=10568 #10567=(36,64,6,32)f32 #input1.33=(36,64,192)f32 nn.Linear pnnx_unique_286 1 1 input1.33 10570 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.33=(36,64,192)f32 #10570=(36,64,192)f32 nn.Dropout pnnx_unique_287 1 1 10570 10571 #10570=(36,64,192)f32 #10571=(36,64,192)f32 pnnx.Expression pnnx_expr_9330 1 1 10456 10572 expr=[-1,8,8,int(size(@0,2))] #10456=(1,2304,192)f32 pnnx.Expression pnnx_expr_9317 1 1 157 10578 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_9310 0 1 10580 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1539 2 1 10571 10572 windows.103 $input=10571 $shape=10572 #10571=(36,64,192)f32 #windows.103=(36,8,8,192)f32 Tensor.view Tensor.view_1540 2 1 windows.103 10578 x7.31 $input=windows.103 $shape=10578 #windows.103=(36,8,8,192)f32 #x7.31=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_9307 1 1 157 10583 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2731 2 1 x7.31 10580 10581 $input=x7.31 $dims=10580 #x7.31=(1,6,6,8,8,192)f32 #10581=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_154 1 1 10581 10582 memory_format=torch.contiguous_format $input=10581 #10581=(1,6,8,6,8,192)f32 #10582=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_9304 2 1 10456 157 10587 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #10456=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1541 2 1 10582 10583 x8.31 $input=10582 $shape=10583 #10582=(1,6,8,6,8,192)f32 #x8.31=(1,48,48,192)f32 Tensor.view Tensor.view_1542 2 1 x8.31 10587 x9.31 $input=x8.31 $shape=10587 #x8.31=(1,48,48,192)f32 #x9.31=(1,2304,192)f32 pnnx.Expression pnnx_expr_9302 2 1 10456 x9.31 input.233 expr=add(@0,@1) #10456=(1,2304,192)f32 #x9.31=(1,2304,192)f32 #input.233=(1,2304,192)f32 nn.LayerNorm pnnx_unique_288 1 1 input.233 10591 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.233=(1,2304,192)f32 #10591=(1,2304,192)f32 nn.Linear pnnx_unique_289 1 1 10591 10596 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #10591=(1,2304,192)f32 #10596=(1,2304,384)f32 nn.GELU pnnx_unique_290 1 1 10596 10597 #10596=(1,2304,384)f32 #10597=(1,2304,384)f32 nn.Dropout pnnx_unique_291 1 1 10597 10598 #10597=(1,2304,384)f32 #10598=(1,2304,384)f32 nn.Linear pnnx_unique_292 1 1 10598 10599 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #10598=(1,2304,384)f32 #10599=(1,2304,192)f32 nn.Dropout pnnx_unique_293 1 1 10599 10600 #10599=(1,2304,192)f32 #10600=(1,2304,192)f32 pnnx.Expression pnnx_expr_9300 2 1 input.233 10600 10602 expr=add(@0,@1) #input.233=(1,2304,192)f32 #10600=(1,2304,192)f32 #10602=(1,2304,192)f32 pnnx.Attribute pnnx_unique_294 0 1 attn_mask.53 @attn_mask=(36,64,64)f32 #attn_mask.53=(36,64,64)f32 nn.LayerNorm pnnx_unique_295 1 1 10602 10631 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #10602=(1,2304,192)f32 #10631=(1,2304,192)f32 pnnx.Expression pnnx_expr_9277 2 1 10602 157 10632 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #10602=(1,2304,192)f32 #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_9275 0 1 10634 expr=[-4,-4] pnnx.Expression pnnx_expr_9273 0 1 10635 expr=[1,2] Tensor.view Tensor.view_1543 2 1 10631 10632 x.105 $input=10631 $shape=10632 #10631=(1,2304,192)f32 #x.105=(1,48,48,192)f32 torch.roll torch.roll_2470 3 1 x.105 10634 10635 x6.33 $input=x.105 $shifts=10634 $dims=10635 #x.105=(1,48,48,192)f32 #x6.33=(1,48,48,192)f32 pnnx.Expression pnnx_expr_9251 1 1 x6.33 10652 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x6.33=(1,48,48,192)f32 pnnx.Expression pnnx_expr_9246 0 1 10654 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1544 2 1 x6.33 10652 x7.33 $input=x6.33 $shape=10652 #x6.33=(1,48,48,192)f32 #x7.33=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_9243 1 1 x6.33 10657 expr=[-1,8,8,int(size(@0,3))] #x6.33=(1,48,48,192)f32 torch.permute torch.permute_2732 2 1 x7.33 10654 10655 $input=x7.33 $dims=10654 #x7.33=(1,6,8,6,8,192)f32 #10655=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_155 1 1 10655 10656 memory_format=torch.contiguous_format $input=10655 #10655=(1,6,6,8,8,192)f32 #10656=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_9241 1 1 10602 10659 expr=[-1,64,int(size(@0,2))] #10602=(1,2304,192)f32 pnnx.Expression pnnx_expr_9231 0 1 10670 expr=-2 pnnx.Expression pnnx_expr_9230 0 1 10671 expr=-1 pnnx.Attribute pnnx_unique_296 0 1 relative_position_bias_table.105 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.105=(225,6)f32 pnnx.Attribute pnnx_unique_297 0 1 relative_position_index.105 @relative_position_index=(64,64)i64 #relative_position_index.105=(64,64)i64 Tensor.view Tensor.view_1545 2 1 10656 10657 x_windows.105 $input=10656 $shape=10657 #10656=(1,6,6,8,8,192)f32 #x_windows.105=(36,8,8,192)f32 Tensor.view Tensor.view_1546 2 1 x_windows.105 10659 x8.33 $input=x_windows.105 $shape=10659 #x_windows.105=(36,8,8,192)f32 #x8.33=(36,64,192)f32 nn.Linear pnnx_unique_298 1 1 x8.33 10695 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x8.33=(36,64,192)f32 #10695=(36,64,576)f32 pnnx.Expression pnnx_expr_9211 1 1 x8.33 10698 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x8.33=(36,64,192)f32 pnnx.Expression pnnx_expr_9206 0 1 10700 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_536 2 1 10695 10698 10699 $input=10695 $shape=10698 #10695=(36,64,576)f32 #10699=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_9205 0 1 23292 expr=0 pnnx.Expression pnnx_expr_9204 0 1 23293 expr=0 pnnx.Expression pnnx_expr_9203 0 1 23294 expr=0 pnnx.Expression pnnx_expr_9202 0 1 23295 expr=1 pnnx.Expression pnnx_expr_9201 0 1 23296 expr=0 pnnx.Expression pnnx_expr_9200 0 1 23297 expr=2 torch.permute torch.permute_2733 2 1 10699 10700 qkv1.33 $input=10699 $dims=10700 #10699=(36,64,3,6,32)f32 #qkv1.33=(3,36,6,64,32)f32 Tensor.select Tensor.select_803 3 1 qkv1.33 23292 23293 q.105 $input=qkv1.33 $dim=23292 $index=23293 #qkv1.33=(3,36,6,64,32)f32 #q.105=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9199 1 1 q.105 q1.33 expr=mul(@0,1.767767e-01) #q.105=(36,6,64,32)f32 #q1.33=(36,6,64,32)f32 Tensor.select Tensor.select_804 3 1 qkv1.33 23294 23295 k.105 $input=qkv1.33 $dim=23294 $index=23295 #qkv1.33=(3,36,6,64,32)f32 #k.105=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9197 0 1 10708 expr=[-1] Tensor.view Tensor.view_1547 2 1 relative_position_index.105 10708 10709 $input=relative_position_index.105 $shape=10708 #relative_position_index.105=(64,64)i64 #10709=(4096)i64 pnnx.Expression pnnx_expr_9196 1 1 10709 10710 expr=[@0] #10709=(4096)i64 pnnx.Expression pnnx_expr_9193 0 1 10712 expr=[64,64,-1] Tensor.index Tensor.index_377 2 1 relative_position_bias_table.105 10710 10711 $input=relative_position_bias_table.105 $expr=10710 #relative_position_bias_table.105=(225,6)f32 #10711=(4096,6)f32 pnnx.Expression pnnx_expr_9189 0 1 10714 expr=[2,0,1] Tensor.view Tensor.view_1548 2 1 10711 10712 relative_position_bias.105 $input=10711 $shape=10712 #10711=(4096,6)f32 #relative_position_bias.105=(64,64,6)f32 pnnx.Expression pnnx_expr_9188 0 1 23305 expr=0 torch.permute torch.permute_2734 2 1 relative_position_bias.105 10714 10715 $input=relative_position_bias.105 $dims=10714 #relative_position_bias.105=(64,64,6)f32 #10715=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_156 1 1 10715 relative_position_bias1.33 memory_format=torch.contiguous_format $input=10715 #10715=(6,64,64)f32 #relative_position_bias1.33=(6,64,64)f32 torch.transpose torch.transpose_3081 3 1 k.105 10670 10671 10706 $input=k.105 $dim0=10670 $dim1=10671 #k.105=(36,6,64,32)f32 #10706=(36,6,32,64)f32 torch.matmul torch.matmul_2306 2 1 q1.33 10706 attn.211 $input=q1.33 $other=10706 #q1.33=(36,6,64,32)f32 #10706=(36,6,32,64)f32 #attn.211=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3319 2 1 relative_position_bias1.33 23305 10717 $input=relative_position_bias1.33 $dim=23305 #relative_position_bias1.33=(6,64,64)f32 #10717=(1,6,64,64)f32 pnnx.Expression pnnx_expr_9186 2 1 attn.211 10717 attn2.17 expr=add(@0,@1) #attn.211=(36,6,64,64)f32 #10717=(1,6,64,64)f32 #attn2.17=(36,6,64,64)f32 pnnx.Expression pnnx_expr_9177 2 1 x8.33 attn_mask.53 10724 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x8.33=(36,64,192)f32 #attn_mask.53=(36,64,64)f32 pnnx.Expression pnnx_expr_9176 0 1 23310 expr=1 pnnx.Expression pnnx_expr_9175 0 1 23311 expr=0 Tensor.view Tensor.view_1549 2 1 attn2.17 10724 10725 $input=attn2.17 $shape=10724 #attn2.17=(36,6,64,64)f32 #10725=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3320 2 1 attn_mask.53 23310 10726 $input=attn_mask.53 $dim=23310 #attn_mask.53=(36,64,64)f32 #10726=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3321 2 1 10726 23311 10727 $input=10726 $dim=23311 #10726=(36,1,64,64)f32 #10727=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_9173 2 1 10725 10727 attn3.17 expr=add(@0,@1) #10725=(1,36,6,64,64)f32 #10727=(1,36,1,64,64)f32 #attn3.17=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_9170 1 1 x8.33 10729 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x8.33=(36,64,192)f32 Tensor.view Tensor.view_1550 2 1 attn3.17 10729 input.235 $input=attn3.17 $shape=10729 #attn3.17=(1,36,6,64,64)f32 #input.235=(36,6,64,64)f32 nn.Softmax pnnx_unique_299 1 1 input.235 10731 dim=-1 #input.235=(36,6,64,64)f32 #10731=(36,6,64,64)f32 nn.Dropout pnnx_unique_300 1 1 10731 10732 #10731=(36,6,64,64)f32 #10732=(36,6,64,64)f32 Tensor.select Tensor.select_805 3 1 qkv1.33 23296 23297 v.105 $input=qkv1.33 $dim=23296 $index=23297 #qkv1.33=(3,36,6,64,32)f32 #v.105=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9169 0 1 23315 expr=1 pnnx.Expression pnnx_expr_9168 0 1 23316 expr=2 torch.matmul torch.matmul_2307 2 1 10732 v.105 10733 $input=10732 $other=v.105 #10732=(36,6,64,64)f32 #v.105=(36,6,64,32)f32 #10733=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9167 1 1 x8.33 10735 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x8.33=(36,64,192)f32 torch.transpose torch.transpose_3082 3 1 10733 23315 23316 10734 $input=10733 $dim0=23315 $dim1=23316 #10733=(36,6,64,32)f32 #10734=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_537 2 1 10734 10735 input1.35 $input=10734 $shape=10735 #10734=(36,64,6,32)f32 #input1.35=(36,64,192)f32 nn.Linear pnnx_unique_301 1 1 input1.35 10737 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.35=(36,64,192)f32 #10737=(36,64,192)f32 nn.Dropout pnnx_unique_302 1 1 10737 10738 #10737=(36,64,192)f32 #10738=(36,64,192)f32 pnnx.Expression pnnx_expr_9163 1 1 10602 10739 expr=[-1,8,8,int(size(@0,2))] #10602=(1,2304,192)f32 pnnx.Expression pnnx_expr_9150 1 1 157 10745 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_9143 0 1 10747 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1551 2 1 10738 10739 windows.105 $input=10738 $shape=10739 #10738=(36,64,192)f32 #windows.105=(36,8,8,192)f32 Tensor.view Tensor.view_1552 2 1 windows.105 10745 x9.33 $input=windows.105 $shape=10745 #windows.105=(36,8,8,192)f32 #x9.33=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_9140 1 1 157 10750 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2735 2 1 x9.33 10747 10748 $input=x9.33 $dims=10747 #x9.33=(1,6,6,8,8,192)f32 #10748=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_157 1 1 10748 10749 memory_format=torch.contiguous_format $input=10748 #10748=(1,6,8,6,8,192)f32 #10749=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_9137 0 1 10752 expr=[4,4] pnnx.Expression pnnx_expr_9134 0 1 10753 expr=[1,2] Tensor.view Tensor.view_1553 2 1 10749 10750 shifted_x.53 $input=10749 $shape=10750 #10749=(1,6,8,6,8,192)f32 #shifted_x.53=(1,48,48,192)f32 pnnx.Expression pnnx_expr_9131 2 1 10602 157 10757 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #10602=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.roll torch.roll_2471 3 1 shifted_x.53 10752 10753 x10.17 $input=shifted_x.53 $shifts=10752 $dims=10753 #shifted_x.53=(1,48,48,192)f32 #x10.17=(1,48,48,192)f32 Tensor.view Tensor.view_1554 2 1 x10.17 10757 x11.17 $input=x10.17 $shape=10757 #x10.17=(1,48,48,192)f32 #x11.17=(1,2304,192)f32 pnnx.Expression pnnx_expr_9129 2 1 10602 x11.17 input.237 expr=add(@0,@1) #10602=(1,2304,192)f32 #x11.17=(1,2304,192)f32 #input.237=(1,2304,192)f32 nn.LayerNorm pnnx_unique_303 1 1 input.237 10761 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.237=(1,2304,192)f32 #10761=(1,2304,192)f32 nn.Linear pnnx_unique_304 1 1 10761 10766 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #10761=(1,2304,192)f32 #10766=(1,2304,384)f32 nn.GELU pnnx_unique_305 1 1 10766 10767 #10766=(1,2304,384)f32 #10767=(1,2304,384)f32 nn.Dropout pnnx_unique_306 1 1 10767 10768 #10767=(1,2304,384)f32 #10768=(1,2304,384)f32 nn.Linear pnnx_unique_307 1 1 10768 10769 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #10768=(1,2304,384)f32 #10769=(1,2304,192)f32 nn.Dropout pnnx_unique_308 1 1 10769 10770 #10769=(1,2304,192)f32 #10770=(1,2304,192)f32 pnnx.Expression pnnx_expr_9127 2 1 input.237 10770 10772 expr=add(@0,@1) #input.237=(1,2304,192)f32 #10770=(1,2304,192)f32 #10772=(1,2304,192)f32 nn.LayerNorm pnnx_unique_309 1 1 10772 10799 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #10772=(1,2304,192)f32 #10799=(1,2304,192)f32 pnnx.Expression pnnx_expr_9105 2 1 10772 157 10800 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #10772=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1555 2 1 10799 10800 x.107 $input=10799 $shape=10800 #10799=(1,2304,192)f32 #x.107=(1,48,48,192)f32 pnnx.Expression pnnx_expr_9084 1 1 x.107 10817 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.107=(1,48,48,192)f32 pnnx.Expression pnnx_expr_9079 0 1 10819 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1556 2 1 x.107 10817 x5.55 $input=x.107 $shape=10817 #x.107=(1,48,48,192)f32 #x5.55=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_9076 1 1 x.107 10822 expr=[-1,8,8,int(size(@0,3))] #x.107=(1,48,48,192)f32 torch.permute torch.permute_2736 2 1 x5.55 10819 10820 $input=x5.55 $dims=10819 #x5.55=(1,6,8,6,8,192)f32 #10820=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_158 1 1 10820 10821 memory_format=torch.contiguous_format $input=10820 #10820=(1,6,6,8,8,192)f32 #10821=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_9074 1 1 10772 10824 expr=[-1,64,int(size(@0,2))] #10772=(1,2304,192)f32 pnnx.Expression pnnx_expr_9064 0 1 10835 expr=-2 pnnx.Expression pnnx_expr_9063 0 1 10836 expr=-1 pnnx.Attribute pnnx_unique_310 0 1 relative_position_bias_table.107 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.107=(225,6)f32 pnnx.Attribute pnnx_unique_311 0 1 relative_position_index.107 @relative_position_index=(64,64)i64 #relative_position_index.107=(64,64)i64 Tensor.view Tensor.view_1557 2 1 10821 10822 x_windows.107 $input=10821 $shape=10822 #10821=(1,6,6,8,8,192)f32 #x_windows.107=(36,8,8,192)f32 Tensor.view Tensor.view_1558 2 1 x_windows.107 10824 x6.35 $input=x_windows.107 $shape=10824 #x_windows.107=(36,8,8,192)f32 #x6.35=(36,64,192)f32 nn.Linear pnnx_unique_312 1 1 x6.35 10856 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x6.35=(36,64,192)f32 #10856=(36,64,576)f32 pnnx.Expression pnnx_expr_9048 1 1 x6.35 10859 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x6.35=(36,64,192)f32 pnnx.Expression pnnx_expr_9043 0 1 10861 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_538 2 1 10856 10859 10860 $input=10856 $shape=10859 #10856=(36,64,576)f32 #10860=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_9042 0 1 23360 expr=0 pnnx.Expression pnnx_expr_9041 0 1 23361 expr=0 pnnx.Expression pnnx_expr_9040 0 1 23362 expr=0 pnnx.Expression pnnx_expr_9039 0 1 23363 expr=1 pnnx.Expression pnnx_expr_9038 0 1 23364 expr=0 pnnx.Expression pnnx_expr_9037 0 1 23365 expr=2 torch.permute torch.permute_2737 2 1 10860 10861 qkv1.35 $input=10860 $dims=10861 #10860=(36,64,3,6,32)f32 #qkv1.35=(3,36,6,64,32)f32 Tensor.select Tensor.select_806 3 1 qkv1.35 23360 23361 q.107 $input=qkv1.35 $dim=23360 $index=23361 #qkv1.35=(3,36,6,64,32)f32 #q.107=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9036 1 1 q.107 q1.35 expr=mul(@0,1.767767e-01) #q.107=(36,6,64,32)f32 #q1.35=(36,6,64,32)f32 Tensor.select Tensor.select_807 3 1 qkv1.35 23362 23363 k.107 $input=qkv1.35 $dim=23362 $index=23363 #qkv1.35=(3,36,6,64,32)f32 #k.107=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9034 0 1 10869 expr=[-1] Tensor.view Tensor.view_1559 2 1 relative_position_index.107 10869 10870 $input=relative_position_index.107 $shape=10869 #relative_position_index.107=(64,64)i64 #10870=(4096)i64 pnnx.Expression pnnx_expr_9033 1 1 10870 10871 expr=[@0] #10870=(4096)i64 pnnx.Expression pnnx_expr_9030 0 1 10873 expr=[64,64,-1] Tensor.index Tensor.index_378 2 1 relative_position_bias_table.107 10871 10872 $input=relative_position_bias_table.107 $expr=10871 #relative_position_bias_table.107=(225,6)f32 #10872=(4096,6)f32 pnnx.Expression pnnx_expr_9026 0 1 10875 expr=[2,0,1] Tensor.view Tensor.view_1560 2 1 10872 10873 relative_position_bias.107 $input=10872 $shape=10873 #10872=(4096,6)f32 #relative_position_bias.107=(64,64,6)f32 pnnx.Expression pnnx_expr_9025 0 1 23373 expr=0 torch.permute torch.permute_2738 2 1 relative_position_bias.107 10875 10876 $input=relative_position_bias.107 $dims=10875 #relative_position_bias.107=(64,64,6)f32 #10876=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_159 1 1 10876 relative_position_bias1.35 memory_format=torch.contiguous_format $input=10876 #10876=(6,64,64)f32 #relative_position_bias1.35=(6,64,64)f32 torch.transpose torch.transpose_3083 3 1 k.107 10835 10836 10867 $input=k.107 $dim0=10835 $dim1=10836 #k.107=(36,6,64,32)f32 #10867=(36,6,32,64)f32 torch.matmul torch.matmul_2308 2 1 q1.35 10867 attn.215 $input=q1.35 $other=10867 #q1.35=(36,6,64,32)f32 #10867=(36,6,32,64)f32 #attn.215=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3322 2 1 relative_position_bias1.35 23373 10878 $input=relative_position_bias1.35 $dim=23373 #relative_position_bias1.35=(6,64,64)f32 #10878=(1,6,64,64)f32 pnnx.Expression pnnx_expr_9023 2 1 attn.215 10878 input.239 expr=add(@0,@1) #attn.215=(36,6,64,64)f32 #10878=(1,6,64,64)f32 #input.239=(36,6,64,64)f32 nn.Softmax pnnx_unique_313 1 1 input.239 10880 dim=-1 #input.239=(36,6,64,64)f32 #10880=(36,6,64,64)f32 nn.Dropout pnnx_unique_314 1 1 10880 10881 #10880=(36,6,64,64)f32 #10881=(36,6,64,64)f32 Tensor.select Tensor.select_808 3 1 qkv1.35 23364 23365 v.107 $input=qkv1.35 $dim=23364 $index=23365 #qkv1.35=(3,36,6,64,32)f32 #v.107=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9022 0 1 23375 expr=1 pnnx.Expression pnnx_expr_9021 0 1 23376 expr=2 torch.matmul torch.matmul_2309 2 1 10881 v.107 10882 $input=10881 $other=v.107 #10881=(36,6,64,64)f32 #v.107=(36,6,64,32)f32 #10882=(36,6,64,32)f32 pnnx.Expression pnnx_expr_9020 1 1 x6.35 10884 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x6.35=(36,64,192)f32 torch.transpose torch.transpose_3084 3 1 10882 23375 23376 10883 $input=10882 $dim0=23375 $dim1=23376 #10882=(36,6,64,32)f32 #10883=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_539 2 1 10883 10884 input1.37 $input=10883 $shape=10884 #10883=(36,64,6,32)f32 #input1.37=(36,64,192)f32 nn.Linear pnnx_unique_315 1 1 input1.37 10886 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.37=(36,64,192)f32 #10886=(36,64,192)f32 nn.Dropout pnnx_unique_316 1 1 10886 10887 #10886=(36,64,192)f32 #10887=(36,64,192)f32 pnnx.Expression pnnx_expr_9016 1 1 10772 10888 expr=[-1,8,8,int(size(@0,2))] #10772=(1,2304,192)f32 pnnx.Expression pnnx_expr_9003 1 1 157 10894 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_8996 0 1 10896 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1561 2 1 10887 10888 windows.107 $input=10887 $shape=10888 #10887=(36,64,192)f32 #windows.107=(36,8,8,192)f32 Tensor.view Tensor.view_1562 2 1 windows.107 10894 x7.35 $input=windows.107 $shape=10894 #windows.107=(36,8,8,192)f32 #x7.35=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_8993 1 1 157 10899 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2739 2 1 x7.35 10896 10897 $input=x7.35 $dims=10896 #x7.35=(1,6,6,8,8,192)f32 #10897=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_160 1 1 10897 10898 memory_format=torch.contiguous_format $input=10897 #10897=(1,6,8,6,8,192)f32 #10898=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_8990 2 1 10772 157 10903 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #10772=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1563 2 1 10898 10899 x8.35 $input=10898 $shape=10899 #10898=(1,6,8,6,8,192)f32 #x8.35=(1,48,48,192)f32 Tensor.view Tensor.view_1564 2 1 x8.35 10903 x9.35 $input=x8.35 $shape=10903 #x8.35=(1,48,48,192)f32 #x9.35=(1,2304,192)f32 pnnx.Expression pnnx_expr_8988 2 1 10772 x9.35 input.241 expr=add(@0,@1) #10772=(1,2304,192)f32 #x9.35=(1,2304,192)f32 #input.241=(1,2304,192)f32 nn.LayerNorm pnnx_unique_317 1 1 input.241 10907 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.241=(1,2304,192)f32 #10907=(1,2304,192)f32 nn.Linear pnnx_unique_318 1 1 10907 10912 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #10907=(1,2304,192)f32 #10912=(1,2304,384)f32 nn.GELU pnnx_unique_319 1 1 10912 10913 #10912=(1,2304,384)f32 #10913=(1,2304,384)f32 nn.Dropout pnnx_unique_320 1 1 10913 10914 #10913=(1,2304,384)f32 #10914=(1,2304,384)f32 nn.Linear pnnx_unique_321 1 1 10914 10915 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #10914=(1,2304,384)f32 #10915=(1,2304,192)f32 nn.Dropout pnnx_unique_322 1 1 10915 10916 #10915=(1,2304,192)f32 #10916=(1,2304,192)f32 pnnx.Expression pnnx_expr_8986 2 1 input.241 10916 10918 expr=add(@0,@1) #input.241=(1,2304,192)f32 #10916=(1,2304,192)f32 #10918=(1,2304,192)f32 pnnx.Attribute pnnx_unique_323 0 1 attn_mask.55 @attn_mask=(36,64,64)f32 #attn_mask.55=(36,64,64)f32 nn.LayerNorm pnnx_unique_324 1 1 10918 10947 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #10918=(1,2304,192)f32 #10947=(1,2304,192)f32 pnnx.Expression pnnx_expr_8963 2 1 10918 157 10948 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #10918=(1,2304,192)f32 #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_8961 0 1 10950 expr=[-4,-4] pnnx.Expression pnnx_expr_8959 0 1 10951 expr=[1,2] Tensor.view Tensor.view_1565 2 1 10947 10948 x.109 $input=10947 $shape=10948 #10947=(1,2304,192)f32 #x.109=(1,48,48,192)f32 torch.roll torch.roll_2472 3 1 x.109 10950 10951 x6.37 $input=x.109 $shifts=10950 $dims=10951 #x.109=(1,48,48,192)f32 #x6.37=(1,48,48,192)f32 pnnx.Expression pnnx_expr_8937 1 1 x6.37 10968 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x6.37=(1,48,48,192)f32 pnnx.Expression pnnx_expr_8932 0 1 10970 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1566 2 1 x6.37 10968 x7.37 $input=x6.37 $shape=10968 #x6.37=(1,48,48,192)f32 #x7.37=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_8929 1 1 x6.37 10973 expr=[-1,8,8,int(size(@0,3))] #x6.37=(1,48,48,192)f32 torch.permute torch.permute_2740 2 1 x7.37 10970 10971 $input=x7.37 $dims=10970 #x7.37=(1,6,8,6,8,192)f32 #10971=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_161 1 1 10971 10972 memory_format=torch.contiguous_format $input=10971 #10971=(1,6,6,8,8,192)f32 #10972=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_8927 1 1 10918 10975 expr=[-1,64,int(size(@0,2))] #10918=(1,2304,192)f32 pnnx.Expression pnnx_expr_8917 0 1 10986 expr=-2 pnnx.Expression pnnx_expr_8916 0 1 10987 expr=-1 pnnx.Attribute pnnx_unique_325 0 1 relative_position_bias_table.109 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.109=(225,6)f32 pnnx.Attribute pnnx_unique_326 0 1 relative_position_index.109 @relative_position_index=(64,64)i64 #relative_position_index.109=(64,64)i64 Tensor.view Tensor.view_1567 2 1 10972 10973 x_windows.109 $input=10972 $shape=10973 #10972=(1,6,6,8,8,192)f32 #x_windows.109=(36,8,8,192)f32 Tensor.view Tensor.view_1568 2 1 x_windows.109 10975 x8.37 $input=x_windows.109 $shape=10975 #x_windows.109=(36,8,8,192)f32 #x8.37=(36,64,192)f32 nn.Linear pnnx_unique_327 1 1 x8.37 11011 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x8.37=(36,64,192)f32 #11011=(36,64,576)f32 pnnx.Expression pnnx_expr_8897 1 1 x8.37 11014 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x8.37=(36,64,192)f32 pnnx.Expression pnnx_expr_8892 0 1 11016 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_540 2 1 11011 11014 11015 $input=11011 $shape=11014 #11011=(36,64,576)f32 #11015=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_8891 0 1 23419 expr=0 pnnx.Expression pnnx_expr_8890 0 1 23420 expr=0 pnnx.Expression pnnx_expr_8889 0 1 23421 expr=0 pnnx.Expression pnnx_expr_8888 0 1 23422 expr=1 pnnx.Expression pnnx_expr_8887 0 1 23423 expr=0 pnnx.Expression pnnx_expr_8886 0 1 23424 expr=2 torch.permute torch.permute_2741 2 1 11015 11016 qkv1.37 $input=11015 $dims=11016 #11015=(36,64,3,6,32)f32 #qkv1.37=(3,36,6,64,32)f32 Tensor.select Tensor.select_809 3 1 qkv1.37 23419 23420 q.109 $input=qkv1.37 $dim=23419 $index=23420 #qkv1.37=(3,36,6,64,32)f32 #q.109=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8885 1 1 q.109 q1.37 expr=mul(@0,1.767767e-01) #q.109=(36,6,64,32)f32 #q1.37=(36,6,64,32)f32 Tensor.select Tensor.select_810 3 1 qkv1.37 23421 23422 k.109 $input=qkv1.37 $dim=23421 $index=23422 #qkv1.37=(3,36,6,64,32)f32 #k.109=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8883 0 1 11024 expr=[-1] Tensor.view Tensor.view_1569 2 1 relative_position_index.109 11024 11025 $input=relative_position_index.109 $shape=11024 #relative_position_index.109=(64,64)i64 #11025=(4096)i64 pnnx.Expression pnnx_expr_8882 1 1 11025 11026 expr=[@0] #11025=(4096)i64 pnnx.Expression pnnx_expr_8879 0 1 11028 expr=[64,64,-1] Tensor.index Tensor.index_379 2 1 relative_position_bias_table.109 11026 11027 $input=relative_position_bias_table.109 $expr=11026 #relative_position_bias_table.109=(225,6)f32 #11027=(4096,6)f32 pnnx.Expression pnnx_expr_8875 0 1 11030 expr=[2,0,1] Tensor.view Tensor.view_1570 2 1 11027 11028 relative_position_bias.109 $input=11027 $shape=11028 #11027=(4096,6)f32 #relative_position_bias.109=(64,64,6)f32 pnnx.Expression pnnx_expr_8874 0 1 23432 expr=0 torch.permute torch.permute_2742 2 1 relative_position_bias.109 11030 11031 $input=relative_position_bias.109 $dims=11030 #relative_position_bias.109=(64,64,6)f32 #11031=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_162 1 1 11031 relative_position_bias1.37 memory_format=torch.contiguous_format $input=11031 #11031=(6,64,64)f32 #relative_position_bias1.37=(6,64,64)f32 torch.transpose torch.transpose_3085 3 1 k.109 10986 10987 11022 $input=k.109 $dim0=10986 $dim1=10987 #k.109=(36,6,64,32)f32 #11022=(36,6,32,64)f32 torch.matmul torch.matmul_2310 2 1 q1.37 11022 attn.219 $input=q1.37 $other=11022 #q1.37=(36,6,64,32)f32 #11022=(36,6,32,64)f32 #attn.219=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3323 2 1 relative_position_bias1.37 23432 11033 $input=relative_position_bias1.37 $dim=23432 #relative_position_bias1.37=(6,64,64)f32 #11033=(1,6,64,64)f32 pnnx.Expression pnnx_expr_8872 2 1 attn.219 11033 attn2.19 expr=add(@0,@1) #attn.219=(36,6,64,64)f32 #11033=(1,6,64,64)f32 #attn2.19=(36,6,64,64)f32 pnnx.Expression pnnx_expr_8863 2 1 x8.37 attn_mask.55 11040 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x8.37=(36,64,192)f32 #attn_mask.55=(36,64,64)f32 pnnx.Expression pnnx_expr_8862 0 1 23437 expr=1 pnnx.Expression pnnx_expr_8861 0 1 23438 expr=0 Tensor.view Tensor.view_1571 2 1 attn2.19 11040 11041 $input=attn2.19 $shape=11040 #attn2.19=(36,6,64,64)f32 #11041=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3324 2 1 attn_mask.55 23437 11042 $input=attn_mask.55 $dim=23437 #attn_mask.55=(36,64,64)f32 #11042=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3325 2 1 11042 23438 11043 $input=11042 $dim=23438 #11042=(36,1,64,64)f32 #11043=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_8859 2 1 11041 11043 attn3.19 expr=add(@0,@1) #11041=(1,36,6,64,64)f32 #11043=(1,36,1,64,64)f32 #attn3.19=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_8856 1 1 x8.37 11045 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x8.37=(36,64,192)f32 Tensor.view Tensor.view_1572 2 1 attn3.19 11045 input.243 $input=attn3.19 $shape=11045 #attn3.19=(1,36,6,64,64)f32 #input.243=(36,6,64,64)f32 nn.Softmax pnnx_unique_328 1 1 input.243 11047 dim=-1 #input.243=(36,6,64,64)f32 #11047=(36,6,64,64)f32 nn.Dropout pnnx_unique_329 1 1 11047 11048 #11047=(36,6,64,64)f32 #11048=(36,6,64,64)f32 Tensor.select Tensor.select_811 3 1 qkv1.37 23423 23424 v.109 $input=qkv1.37 $dim=23423 $index=23424 #qkv1.37=(3,36,6,64,32)f32 #v.109=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8855 0 1 23442 expr=1 pnnx.Expression pnnx_expr_8854 0 1 23443 expr=2 torch.matmul torch.matmul_2311 2 1 11048 v.109 11049 $input=11048 $other=v.109 #11048=(36,6,64,64)f32 #v.109=(36,6,64,32)f32 #11049=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8853 1 1 x8.37 11051 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x8.37=(36,64,192)f32 torch.transpose torch.transpose_3086 3 1 11049 23442 23443 11050 $input=11049 $dim0=23442 $dim1=23443 #11049=(36,6,64,32)f32 #11050=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_541 2 1 11050 11051 input1.39 $input=11050 $shape=11051 #11050=(36,64,6,32)f32 #input1.39=(36,64,192)f32 nn.Linear pnnx_unique_330 1 1 input1.39 11053 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.39=(36,64,192)f32 #11053=(36,64,192)f32 nn.Dropout pnnx_unique_331 1 1 11053 11054 #11053=(36,64,192)f32 #11054=(36,64,192)f32 pnnx.Expression pnnx_expr_8849 1 1 10918 11055 expr=[-1,8,8,int(size(@0,2))] #10918=(1,2304,192)f32 pnnx.Expression pnnx_expr_8836 1 1 157 11061 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_8829 0 1 11063 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1573 2 1 11054 11055 windows.109 $input=11054 $shape=11055 #11054=(36,64,192)f32 #windows.109=(36,8,8,192)f32 Tensor.view Tensor.view_1574 2 1 windows.109 11061 x9.37 $input=windows.109 $shape=11061 #windows.109=(36,8,8,192)f32 #x9.37=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_8826 1 1 157 11066 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2743 2 1 x9.37 11063 11064 $input=x9.37 $dims=11063 #x9.37=(1,6,6,8,8,192)f32 #11064=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_163 1 1 11064 11065 memory_format=torch.contiguous_format $input=11064 #11064=(1,6,8,6,8,192)f32 #11065=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_8823 0 1 11068 expr=[4,4] pnnx.Expression pnnx_expr_8820 0 1 11069 expr=[1,2] Tensor.view Tensor.view_1575 2 1 11065 11066 shifted_x.55 $input=11065 $shape=11066 #11065=(1,6,8,6,8,192)f32 #shifted_x.55=(1,48,48,192)f32 pnnx.Expression pnnx_expr_8817 2 1 10918 157 11073 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #10918=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.roll torch.roll_2473 3 1 shifted_x.55 11068 11069 x10.19 $input=shifted_x.55 $shifts=11068 $dims=11069 #shifted_x.55=(1,48,48,192)f32 #x10.19=(1,48,48,192)f32 Tensor.view Tensor.view_1576 2 1 x10.19 11073 x11.19 $input=x10.19 $shape=11073 #x10.19=(1,48,48,192)f32 #x11.19=(1,2304,192)f32 pnnx.Expression pnnx_expr_8815 2 1 10918 x11.19 input.245 expr=add(@0,@1) #10918=(1,2304,192)f32 #x11.19=(1,2304,192)f32 #input.245=(1,2304,192)f32 nn.LayerNorm pnnx_unique_332 1 1 input.245 11077 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.245=(1,2304,192)f32 #11077=(1,2304,192)f32 nn.Linear pnnx_unique_333 1 1 11077 11082 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #11077=(1,2304,192)f32 #11082=(1,2304,384)f32 nn.GELU pnnx_unique_334 1 1 11082 11083 #11082=(1,2304,384)f32 #11083=(1,2304,384)f32 nn.Dropout pnnx_unique_335 1 1 11083 11084 #11083=(1,2304,384)f32 #11084=(1,2304,384)f32 nn.Linear pnnx_unique_336 1 1 11084 11085 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #11084=(1,2304,384)f32 #11085=(1,2304,192)f32 nn.Dropout pnnx_unique_337 1 1 11085 11086 #11085=(1,2304,192)f32 #11086=(1,2304,192)f32 pnnx.Expression pnnx_expr_8813 2 1 input.245 11086 11088 expr=add(@0,@1) #input.245=(1,2304,192)f32 #11086=(1,2304,192)f32 #11088=(1,2304,192)f32 pnnx.Expression pnnx_expr_8811 0 1 11090 expr=1 pnnx.Expression pnnx_expr_8810 0 1 11091 expr=2 pnnx.Expression pnnx_expr_8805 2 1 11088 157 11097 expr=[int(size(@0,0)),192,int(size(@1,2)),int(size(@1,3))] #11088=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.transpose torch.transpose_3087 3 1 11088 11090 11091 11096 $input=11088 $dim0=11090 $dim1=11091 #11088=(1,2304,192)f32 #11096=(1,192,2304)f32 Tensor.view Tensor.view_1577 2 1 11096 11097 input.247 $input=11096 $shape=11097 #11096=(1,192,2304)f32 #input.247=(1,192,48,48)f32 nn.Conv2d pnnx_unique_338 1 1 input.247 11099 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(192)f32 @weight=(192,192,3,3)f32 #input.247=(1,192,48,48)f32 #11099=(1,192,48,48)f32 pnnx.Expression pnnx_expr_8804 0 1 11100 expr=-1 pnnx.Expression pnnx_expr_8803 0 1 11101 expr=2 pnnx.Expression pnnx_expr_8802 0 1 11102 expr=1 pnnx.Expression pnnx_expr_8801 0 1 23470 expr=2 torch.flatten torch.flatten_2193 3 1 11099 11101 11100 11103 $input=11099 $start_dim=11101 $end_dim=11100 #11099=(1,192,48,48)f32 #11103=(1,192,2304)f32 torch.transpose torch.transpose_3088 3 1 11103 11102 23470 11104 $input=11103 $dim0=11102 $dim1=23470 #11103=(1,192,2304)f32 #11104=(1,2304,192)f32 pnnx.Expression pnnx_expr_8800 2 1 11104 10123 11105 expr=add(@0,@1) #11104=(1,2304,192)f32 #10123=(1,2304,192)f32 #11105=(1,2304,192)f32 nn.LayerNorm pnnx_unique_339 1 1 11105 11149 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #11105=(1,2304,192)f32 #11149=(1,2304,192)f32 pnnx.Expression pnnx_expr_8777 2 1 11105 157 11150 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #11105=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1578 2 1 11149 11150 x.111 $input=11149 $shape=11150 #11149=(1,2304,192)f32 #x.111=(1,48,48,192)f32 pnnx.Expression pnnx_expr_8756 1 1 x.111 11167 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.111=(1,48,48,192)f32 pnnx.Expression pnnx_expr_8751 0 1 11169 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1579 2 1 x.111 11167 x5.57 $input=x.111 $shape=11167 #x.111=(1,48,48,192)f32 #x5.57=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_8748 1 1 x.111 11172 expr=[-1,8,8,int(size(@0,3))] #x.111=(1,48,48,192)f32 torch.permute torch.permute_2744 2 1 x5.57 11169 11170 $input=x5.57 $dims=11169 #x5.57=(1,6,8,6,8,192)f32 #11170=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_164 1 1 11170 11171 memory_format=torch.contiguous_format $input=11170 #11170=(1,6,6,8,8,192)f32 #11171=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_8746 1 1 11105 11174 expr=[-1,64,int(size(@0,2))] #11105=(1,2304,192)f32 pnnx.Expression pnnx_expr_8736 0 1 11185 expr=-2 pnnx.Expression pnnx_expr_8735 0 1 11186 expr=-1 pnnx.Attribute pnnx_unique_340 0 1 relative_position_bias_table.111 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.111=(225,6)f32 pnnx.Attribute pnnx_unique_341 0 1 relative_position_index.111 @relative_position_index=(64,64)i64 #relative_position_index.111=(64,64)i64 Tensor.view Tensor.view_1580 2 1 11171 11172 x_windows.111 $input=11171 $shape=11172 #11171=(1,6,6,8,8,192)f32 #x_windows.111=(36,8,8,192)f32 Tensor.view Tensor.view_1581 2 1 x_windows.111 11174 x6.39 $input=x_windows.111 $shape=11174 #x_windows.111=(36,8,8,192)f32 #x6.39=(36,64,192)f32 nn.Linear pnnx_unique_342 1 1 x6.39 11206 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x6.39=(36,64,192)f32 #11206=(36,64,576)f32 pnnx.Expression pnnx_expr_8720 1 1 x6.39 11209 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x6.39=(36,64,192)f32 pnnx.Expression pnnx_expr_8715 0 1 11211 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_542 2 1 11206 11209 11210 $input=11206 $shape=11209 #11206=(36,64,576)f32 #11210=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_8714 0 1 23488 expr=0 pnnx.Expression pnnx_expr_8713 0 1 23489 expr=0 pnnx.Expression pnnx_expr_8712 0 1 23490 expr=0 pnnx.Expression pnnx_expr_8711 0 1 23491 expr=1 pnnx.Expression pnnx_expr_8710 0 1 23492 expr=0 pnnx.Expression pnnx_expr_8709 0 1 23493 expr=2 torch.permute torch.permute_2745 2 1 11210 11211 qkv1.39 $input=11210 $dims=11211 #11210=(36,64,3,6,32)f32 #qkv1.39=(3,36,6,64,32)f32 Tensor.select Tensor.select_812 3 1 qkv1.39 23488 23489 q.111 $input=qkv1.39 $dim=23488 $index=23489 #qkv1.39=(3,36,6,64,32)f32 #q.111=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8708 1 1 q.111 q1.39 expr=mul(@0,1.767767e-01) #q.111=(36,6,64,32)f32 #q1.39=(36,6,64,32)f32 Tensor.select Tensor.select_813 3 1 qkv1.39 23490 23491 k.111 $input=qkv1.39 $dim=23490 $index=23491 #qkv1.39=(3,36,6,64,32)f32 #k.111=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8706 0 1 11219 expr=[-1] Tensor.view Tensor.view_1582 2 1 relative_position_index.111 11219 11220 $input=relative_position_index.111 $shape=11219 #relative_position_index.111=(64,64)i64 #11220=(4096)i64 pnnx.Expression pnnx_expr_8705 1 1 11220 11221 expr=[@0] #11220=(4096)i64 pnnx.Expression pnnx_expr_8702 0 1 11223 expr=[64,64,-1] Tensor.index Tensor.index_380 2 1 relative_position_bias_table.111 11221 11222 $input=relative_position_bias_table.111 $expr=11221 #relative_position_bias_table.111=(225,6)f32 #11222=(4096,6)f32 pnnx.Expression pnnx_expr_8698 0 1 11225 expr=[2,0,1] Tensor.view Tensor.view_1583 2 1 11222 11223 relative_position_bias.111 $input=11222 $shape=11223 #11222=(4096,6)f32 #relative_position_bias.111=(64,64,6)f32 pnnx.Expression pnnx_expr_8697 0 1 23501 expr=0 torch.permute torch.permute_2746 2 1 relative_position_bias.111 11225 11226 $input=relative_position_bias.111 $dims=11225 #relative_position_bias.111=(64,64,6)f32 #11226=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_165 1 1 11226 relative_position_bias1.39 memory_format=torch.contiguous_format $input=11226 #11226=(6,64,64)f32 #relative_position_bias1.39=(6,64,64)f32 torch.transpose torch.transpose_3089 3 1 k.111 11185 11186 11217 $input=k.111 $dim0=11185 $dim1=11186 #k.111=(36,6,64,32)f32 #11217=(36,6,32,64)f32 torch.matmul torch.matmul_2312 2 1 q1.39 11217 attn.223 $input=q1.39 $other=11217 #q1.39=(36,6,64,32)f32 #11217=(36,6,32,64)f32 #attn.223=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3326 2 1 relative_position_bias1.39 23501 11228 $input=relative_position_bias1.39 $dim=23501 #relative_position_bias1.39=(6,64,64)f32 #11228=(1,6,64,64)f32 pnnx.Expression pnnx_expr_8695 2 1 attn.223 11228 input.249 expr=add(@0,@1) #attn.223=(36,6,64,64)f32 #11228=(1,6,64,64)f32 #input.249=(36,6,64,64)f32 nn.Softmax pnnx_unique_343 1 1 input.249 11230 dim=-1 #input.249=(36,6,64,64)f32 #11230=(36,6,64,64)f32 nn.Dropout pnnx_unique_344 1 1 11230 11231 #11230=(36,6,64,64)f32 #11231=(36,6,64,64)f32 Tensor.select Tensor.select_814 3 1 qkv1.39 23492 23493 v.111 $input=qkv1.39 $dim=23492 $index=23493 #qkv1.39=(3,36,6,64,32)f32 #v.111=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8694 0 1 23503 expr=1 pnnx.Expression pnnx_expr_8693 0 1 23504 expr=2 torch.matmul torch.matmul_2313 2 1 11231 v.111 11232 $input=11231 $other=v.111 #11231=(36,6,64,64)f32 #v.111=(36,6,64,32)f32 #11232=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8692 1 1 x6.39 11234 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x6.39=(36,64,192)f32 torch.transpose torch.transpose_3090 3 1 11232 23503 23504 11233 $input=11232 $dim0=23503 $dim1=23504 #11232=(36,6,64,32)f32 #11233=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_543 2 1 11233 11234 input1.41 $input=11233 $shape=11234 #11233=(36,64,6,32)f32 #input1.41=(36,64,192)f32 nn.Linear pnnx_unique_345 1 1 input1.41 11236 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.41=(36,64,192)f32 #11236=(36,64,192)f32 nn.Dropout pnnx_unique_346 1 1 11236 11237 #11236=(36,64,192)f32 #11237=(36,64,192)f32 pnnx.Expression pnnx_expr_8688 1 1 11105 11238 expr=[-1,8,8,int(size(@0,2))] #11105=(1,2304,192)f32 pnnx.Expression pnnx_expr_8675 1 1 157 11244 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_8668 0 1 11246 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1584 2 1 11237 11238 windows.111 $input=11237 $shape=11238 #11237=(36,64,192)f32 #windows.111=(36,8,8,192)f32 Tensor.view Tensor.view_1585 2 1 windows.111 11244 x7.39 $input=windows.111 $shape=11244 #windows.111=(36,8,8,192)f32 #x7.39=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_8665 1 1 157 11249 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2747 2 1 x7.39 11246 11247 $input=x7.39 $dims=11246 #x7.39=(1,6,6,8,8,192)f32 #11247=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_166 1 1 11247 11248 memory_format=torch.contiguous_format $input=11247 #11247=(1,6,8,6,8,192)f32 #11248=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_8662 2 1 11105 157 11253 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #11105=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1586 2 1 11248 11249 x8.39 $input=11248 $shape=11249 #11248=(1,6,8,6,8,192)f32 #x8.39=(1,48,48,192)f32 Tensor.view Tensor.view_1587 2 1 x8.39 11253 x9.39 $input=x8.39 $shape=11253 #x8.39=(1,48,48,192)f32 #x9.39=(1,2304,192)f32 pnnx.Expression pnnx_expr_8660 2 1 11105 x9.39 input.251 expr=add(@0,@1) #11105=(1,2304,192)f32 #x9.39=(1,2304,192)f32 #input.251=(1,2304,192)f32 nn.LayerNorm pnnx_unique_347 1 1 input.251 11257 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.251=(1,2304,192)f32 #11257=(1,2304,192)f32 nn.Linear pnnx_unique_348 1 1 11257 11262 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #11257=(1,2304,192)f32 #11262=(1,2304,384)f32 nn.GELU pnnx_unique_349 1 1 11262 11263 #11262=(1,2304,384)f32 #11263=(1,2304,384)f32 nn.Dropout pnnx_unique_350 1 1 11263 11264 #11263=(1,2304,384)f32 #11264=(1,2304,384)f32 nn.Linear pnnx_unique_351 1 1 11264 11265 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #11264=(1,2304,384)f32 #11265=(1,2304,192)f32 nn.Dropout pnnx_unique_352 1 1 11265 11266 #11265=(1,2304,192)f32 #11266=(1,2304,192)f32 pnnx.Expression pnnx_expr_8658 2 1 input.251 11266 11268 expr=add(@0,@1) #input.251=(1,2304,192)f32 #11266=(1,2304,192)f32 #11268=(1,2304,192)f32 pnnx.Attribute pnnx_unique_353 0 1 attn_mask.57 @attn_mask=(36,64,64)f32 #attn_mask.57=(36,64,64)f32 nn.LayerNorm pnnx_unique_354 1 1 11268 11297 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #11268=(1,2304,192)f32 #11297=(1,2304,192)f32 pnnx.Expression pnnx_expr_8635 2 1 11268 157 11298 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #11268=(1,2304,192)f32 #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_8633 0 1 11300 expr=[-4,-4] pnnx.Expression pnnx_expr_8631 0 1 11301 expr=[1,2] Tensor.view Tensor.view_1588 2 1 11297 11298 x.113 $input=11297 $shape=11298 #11297=(1,2304,192)f32 #x.113=(1,48,48,192)f32 torch.roll torch.roll_2474 3 1 x.113 11300 11301 x6.41 $input=x.113 $shifts=11300 $dims=11301 #x.113=(1,48,48,192)f32 #x6.41=(1,48,48,192)f32 pnnx.Expression pnnx_expr_8609 1 1 x6.41 11318 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x6.41=(1,48,48,192)f32 pnnx.Expression pnnx_expr_8604 0 1 11320 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1589 2 1 x6.41 11318 x7.41 $input=x6.41 $shape=11318 #x6.41=(1,48,48,192)f32 #x7.41=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_8601 1 1 x6.41 11323 expr=[-1,8,8,int(size(@0,3))] #x6.41=(1,48,48,192)f32 torch.permute torch.permute_2748 2 1 x7.41 11320 11321 $input=x7.41 $dims=11320 #x7.41=(1,6,8,6,8,192)f32 #11321=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_167 1 1 11321 11322 memory_format=torch.contiguous_format $input=11321 #11321=(1,6,6,8,8,192)f32 #11322=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_8599 1 1 11268 11325 expr=[-1,64,int(size(@0,2))] #11268=(1,2304,192)f32 pnnx.Expression pnnx_expr_8589 0 1 11336 expr=-2 pnnx.Expression pnnx_expr_8588 0 1 11337 expr=-1 pnnx.Attribute pnnx_unique_355 0 1 relative_position_bias_table.113 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.113=(225,6)f32 pnnx.Attribute pnnx_unique_356 0 1 relative_position_index.113 @relative_position_index=(64,64)i64 #relative_position_index.113=(64,64)i64 Tensor.view Tensor.view_1590 2 1 11322 11323 x_windows.113 $input=11322 $shape=11323 #11322=(1,6,6,8,8,192)f32 #x_windows.113=(36,8,8,192)f32 Tensor.view Tensor.view_1591 2 1 x_windows.113 11325 x8.41 $input=x_windows.113 $shape=11325 #x_windows.113=(36,8,8,192)f32 #x8.41=(36,64,192)f32 nn.Linear pnnx_unique_357 1 1 x8.41 11361 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x8.41=(36,64,192)f32 #11361=(36,64,576)f32 pnnx.Expression pnnx_expr_8569 1 1 x8.41 11364 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x8.41=(36,64,192)f32 pnnx.Expression pnnx_expr_8564 0 1 11366 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_544 2 1 11361 11364 11365 $input=11361 $shape=11364 #11361=(36,64,576)f32 #11365=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_8563 0 1 23547 expr=0 pnnx.Expression pnnx_expr_8562 0 1 23548 expr=0 pnnx.Expression pnnx_expr_8561 0 1 23549 expr=0 pnnx.Expression pnnx_expr_8560 0 1 23550 expr=1 pnnx.Expression pnnx_expr_8559 0 1 23551 expr=0 pnnx.Expression pnnx_expr_8558 0 1 23552 expr=2 torch.permute torch.permute_2749 2 1 11365 11366 qkv1.41 $input=11365 $dims=11366 #11365=(36,64,3,6,32)f32 #qkv1.41=(3,36,6,64,32)f32 Tensor.select Tensor.select_815 3 1 qkv1.41 23547 23548 q.113 $input=qkv1.41 $dim=23547 $index=23548 #qkv1.41=(3,36,6,64,32)f32 #q.113=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8557 1 1 q.113 q1.41 expr=mul(@0,1.767767e-01) #q.113=(36,6,64,32)f32 #q1.41=(36,6,64,32)f32 Tensor.select Tensor.select_816 3 1 qkv1.41 23549 23550 k.113 $input=qkv1.41 $dim=23549 $index=23550 #qkv1.41=(3,36,6,64,32)f32 #k.113=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8555 0 1 11374 expr=[-1] Tensor.view Tensor.view_1592 2 1 relative_position_index.113 11374 11375 $input=relative_position_index.113 $shape=11374 #relative_position_index.113=(64,64)i64 #11375=(4096)i64 pnnx.Expression pnnx_expr_8554 1 1 11375 11376 expr=[@0] #11375=(4096)i64 pnnx.Expression pnnx_expr_8551 0 1 11378 expr=[64,64,-1] Tensor.index Tensor.index_381 2 1 relative_position_bias_table.113 11376 11377 $input=relative_position_bias_table.113 $expr=11376 #relative_position_bias_table.113=(225,6)f32 #11377=(4096,6)f32 pnnx.Expression pnnx_expr_8547 0 1 11380 expr=[2,0,1] Tensor.view Tensor.view_1593 2 1 11377 11378 relative_position_bias.113 $input=11377 $shape=11378 #11377=(4096,6)f32 #relative_position_bias.113=(64,64,6)f32 pnnx.Expression pnnx_expr_8546 0 1 23560 expr=0 torch.permute torch.permute_2750 2 1 relative_position_bias.113 11380 11381 $input=relative_position_bias.113 $dims=11380 #relative_position_bias.113=(64,64,6)f32 #11381=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_168 1 1 11381 relative_position_bias1.41 memory_format=torch.contiguous_format $input=11381 #11381=(6,64,64)f32 #relative_position_bias1.41=(6,64,64)f32 torch.transpose torch.transpose_3091 3 1 k.113 11336 11337 11372 $input=k.113 $dim0=11336 $dim1=11337 #k.113=(36,6,64,32)f32 #11372=(36,6,32,64)f32 torch.matmul torch.matmul_2314 2 1 q1.41 11372 attn.227 $input=q1.41 $other=11372 #q1.41=(36,6,64,32)f32 #11372=(36,6,32,64)f32 #attn.227=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3327 2 1 relative_position_bias1.41 23560 11383 $input=relative_position_bias1.41 $dim=23560 #relative_position_bias1.41=(6,64,64)f32 #11383=(1,6,64,64)f32 pnnx.Expression pnnx_expr_8544 2 1 attn.227 11383 attn2.21 expr=add(@0,@1) #attn.227=(36,6,64,64)f32 #11383=(1,6,64,64)f32 #attn2.21=(36,6,64,64)f32 pnnx.Expression pnnx_expr_8535 2 1 x8.41 attn_mask.57 11390 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x8.41=(36,64,192)f32 #attn_mask.57=(36,64,64)f32 pnnx.Expression pnnx_expr_8534 0 1 23565 expr=1 pnnx.Expression pnnx_expr_8533 0 1 23566 expr=0 Tensor.view Tensor.view_1594 2 1 attn2.21 11390 11391 $input=attn2.21 $shape=11390 #attn2.21=(36,6,64,64)f32 #11391=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3328 2 1 attn_mask.57 23565 11392 $input=attn_mask.57 $dim=23565 #attn_mask.57=(36,64,64)f32 #11392=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3329 2 1 11392 23566 11393 $input=11392 $dim=23566 #11392=(36,1,64,64)f32 #11393=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_8531 2 1 11391 11393 attn3.21 expr=add(@0,@1) #11391=(1,36,6,64,64)f32 #11393=(1,36,1,64,64)f32 #attn3.21=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_8528 1 1 x8.41 11395 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x8.41=(36,64,192)f32 Tensor.view Tensor.view_1595 2 1 attn3.21 11395 input.253 $input=attn3.21 $shape=11395 #attn3.21=(1,36,6,64,64)f32 #input.253=(36,6,64,64)f32 nn.Softmax pnnx_unique_358 1 1 input.253 11397 dim=-1 #input.253=(36,6,64,64)f32 #11397=(36,6,64,64)f32 nn.Dropout pnnx_unique_359 1 1 11397 11398 #11397=(36,6,64,64)f32 #11398=(36,6,64,64)f32 Tensor.select Tensor.select_817 3 1 qkv1.41 23551 23552 v.113 $input=qkv1.41 $dim=23551 $index=23552 #qkv1.41=(3,36,6,64,32)f32 #v.113=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8527 0 1 23570 expr=1 pnnx.Expression pnnx_expr_8526 0 1 23571 expr=2 torch.matmul torch.matmul_2315 2 1 11398 v.113 11399 $input=11398 $other=v.113 #11398=(36,6,64,64)f32 #v.113=(36,6,64,32)f32 #11399=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8525 1 1 x8.41 11401 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x8.41=(36,64,192)f32 torch.transpose torch.transpose_3092 3 1 11399 23570 23571 11400 $input=11399 $dim0=23570 $dim1=23571 #11399=(36,6,64,32)f32 #11400=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_545 2 1 11400 11401 input1.43 $input=11400 $shape=11401 #11400=(36,64,6,32)f32 #input1.43=(36,64,192)f32 nn.Linear pnnx_unique_360 1 1 input1.43 11403 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.43=(36,64,192)f32 #11403=(36,64,192)f32 nn.Dropout pnnx_unique_361 1 1 11403 11404 #11403=(36,64,192)f32 #11404=(36,64,192)f32 pnnx.Expression pnnx_expr_8521 1 1 11268 11405 expr=[-1,8,8,int(size(@0,2))] #11268=(1,2304,192)f32 pnnx.Expression pnnx_expr_8508 1 1 157 11411 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_8501 0 1 11413 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1596 2 1 11404 11405 windows.113 $input=11404 $shape=11405 #11404=(36,64,192)f32 #windows.113=(36,8,8,192)f32 Tensor.view Tensor.view_1597 2 1 windows.113 11411 x9.41 $input=windows.113 $shape=11411 #windows.113=(36,8,8,192)f32 #x9.41=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_8498 1 1 157 11416 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2751 2 1 x9.41 11413 11414 $input=x9.41 $dims=11413 #x9.41=(1,6,6,8,8,192)f32 #11414=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_169 1 1 11414 11415 memory_format=torch.contiguous_format $input=11414 #11414=(1,6,8,6,8,192)f32 #11415=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_8495 0 1 11418 expr=[4,4] pnnx.Expression pnnx_expr_8492 0 1 11419 expr=[1,2] Tensor.view Tensor.view_1598 2 1 11415 11416 shifted_x.57 $input=11415 $shape=11416 #11415=(1,6,8,6,8,192)f32 #shifted_x.57=(1,48,48,192)f32 pnnx.Expression pnnx_expr_8489 2 1 11268 157 11423 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #11268=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.roll torch.roll_2475 3 1 shifted_x.57 11418 11419 x10.21 $input=shifted_x.57 $shifts=11418 $dims=11419 #shifted_x.57=(1,48,48,192)f32 #x10.21=(1,48,48,192)f32 Tensor.view Tensor.view_1599 2 1 x10.21 11423 x11.21 $input=x10.21 $shape=11423 #x10.21=(1,48,48,192)f32 #x11.21=(1,2304,192)f32 pnnx.Expression pnnx_expr_8487 2 1 11268 x11.21 input.255 expr=add(@0,@1) #11268=(1,2304,192)f32 #x11.21=(1,2304,192)f32 #input.255=(1,2304,192)f32 nn.LayerNorm pnnx_unique_362 1 1 input.255 11427 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.255=(1,2304,192)f32 #11427=(1,2304,192)f32 nn.Linear pnnx_unique_363 1 1 11427 11432 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #11427=(1,2304,192)f32 #11432=(1,2304,384)f32 nn.GELU pnnx_unique_364 1 1 11432 11433 #11432=(1,2304,384)f32 #11433=(1,2304,384)f32 nn.Dropout pnnx_unique_365 1 1 11433 11434 #11433=(1,2304,384)f32 #11434=(1,2304,384)f32 nn.Linear pnnx_unique_366 1 1 11434 11435 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #11434=(1,2304,384)f32 #11435=(1,2304,192)f32 nn.Dropout pnnx_unique_367 1 1 11435 11436 #11435=(1,2304,192)f32 #11436=(1,2304,192)f32 pnnx.Expression pnnx_expr_8485 2 1 input.255 11436 11438 expr=add(@0,@1) #input.255=(1,2304,192)f32 #11436=(1,2304,192)f32 #11438=(1,2304,192)f32 nn.LayerNorm pnnx_unique_368 1 1 11438 11465 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #11438=(1,2304,192)f32 #11465=(1,2304,192)f32 pnnx.Expression pnnx_expr_8463 2 1 11438 157 11466 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #11438=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1600 2 1 11465 11466 x.115 $input=11465 $shape=11466 #11465=(1,2304,192)f32 #x.115=(1,48,48,192)f32 pnnx.Expression pnnx_expr_8442 1 1 x.115 11483 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.115=(1,48,48,192)f32 pnnx.Expression pnnx_expr_8437 0 1 11485 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1601 2 1 x.115 11483 x5.59 $input=x.115 $shape=11483 #x.115=(1,48,48,192)f32 #x5.59=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_8434 1 1 x.115 11488 expr=[-1,8,8,int(size(@0,3))] #x.115=(1,48,48,192)f32 torch.permute torch.permute_2752 2 1 x5.59 11485 11486 $input=x5.59 $dims=11485 #x5.59=(1,6,8,6,8,192)f32 #11486=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_170 1 1 11486 11487 memory_format=torch.contiguous_format $input=11486 #11486=(1,6,6,8,8,192)f32 #11487=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_8432 1 1 11438 11490 expr=[-1,64,int(size(@0,2))] #11438=(1,2304,192)f32 pnnx.Expression pnnx_expr_8422 0 1 11501 expr=-2 pnnx.Expression pnnx_expr_8421 0 1 11502 expr=-1 pnnx.Attribute pnnx_unique_369 0 1 relative_position_bias_table.115 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.115=(225,6)f32 pnnx.Attribute pnnx_unique_370 0 1 relative_position_index.115 @relative_position_index=(64,64)i64 #relative_position_index.115=(64,64)i64 Tensor.view Tensor.view_1602 2 1 11487 11488 x_windows.115 $input=11487 $shape=11488 #11487=(1,6,6,8,8,192)f32 #x_windows.115=(36,8,8,192)f32 Tensor.view Tensor.view_1603 2 1 x_windows.115 11490 x6.43 $input=x_windows.115 $shape=11490 #x_windows.115=(36,8,8,192)f32 #x6.43=(36,64,192)f32 nn.Linear pnnx_unique_371 1 1 x6.43 11522 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x6.43=(36,64,192)f32 #11522=(36,64,576)f32 pnnx.Expression pnnx_expr_8406 1 1 x6.43 11525 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x6.43=(36,64,192)f32 pnnx.Expression pnnx_expr_8401 0 1 11527 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_546 2 1 11522 11525 11526 $input=11522 $shape=11525 #11522=(36,64,576)f32 #11526=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_8400 0 1 23615 expr=0 pnnx.Expression pnnx_expr_8399 0 1 23616 expr=0 pnnx.Expression pnnx_expr_8398 0 1 23617 expr=0 pnnx.Expression pnnx_expr_8397 0 1 23618 expr=1 pnnx.Expression pnnx_expr_8396 0 1 23619 expr=0 pnnx.Expression pnnx_expr_8395 0 1 23620 expr=2 torch.permute torch.permute_2753 2 1 11526 11527 qkv1.43 $input=11526 $dims=11527 #11526=(36,64,3,6,32)f32 #qkv1.43=(3,36,6,64,32)f32 Tensor.select Tensor.select_818 3 1 qkv1.43 23615 23616 q.115 $input=qkv1.43 $dim=23615 $index=23616 #qkv1.43=(3,36,6,64,32)f32 #q.115=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8394 1 1 q.115 q1.43 expr=mul(@0,1.767767e-01) #q.115=(36,6,64,32)f32 #q1.43=(36,6,64,32)f32 Tensor.select Tensor.select_819 3 1 qkv1.43 23617 23618 k.115 $input=qkv1.43 $dim=23617 $index=23618 #qkv1.43=(3,36,6,64,32)f32 #k.115=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8392 0 1 11535 expr=[-1] Tensor.view Tensor.view_1604 2 1 relative_position_index.115 11535 11536 $input=relative_position_index.115 $shape=11535 #relative_position_index.115=(64,64)i64 #11536=(4096)i64 pnnx.Expression pnnx_expr_8391 1 1 11536 11537 expr=[@0] #11536=(4096)i64 pnnx.Expression pnnx_expr_8388 0 1 11539 expr=[64,64,-1] Tensor.index Tensor.index_382 2 1 relative_position_bias_table.115 11537 11538 $input=relative_position_bias_table.115 $expr=11537 #relative_position_bias_table.115=(225,6)f32 #11538=(4096,6)f32 pnnx.Expression pnnx_expr_8384 0 1 11541 expr=[2,0,1] Tensor.view Tensor.view_1605 2 1 11538 11539 relative_position_bias.115 $input=11538 $shape=11539 #11538=(4096,6)f32 #relative_position_bias.115=(64,64,6)f32 pnnx.Expression pnnx_expr_8383 0 1 23628 expr=0 torch.permute torch.permute_2754 2 1 relative_position_bias.115 11541 11542 $input=relative_position_bias.115 $dims=11541 #relative_position_bias.115=(64,64,6)f32 #11542=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_171 1 1 11542 relative_position_bias1.43 memory_format=torch.contiguous_format $input=11542 #11542=(6,64,64)f32 #relative_position_bias1.43=(6,64,64)f32 torch.transpose torch.transpose_3093 3 1 k.115 11501 11502 11533 $input=k.115 $dim0=11501 $dim1=11502 #k.115=(36,6,64,32)f32 #11533=(36,6,32,64)f32 torch.matmul torch.matmul_2316 2 1 q1.43 11533 attn.231 $input=q1.43 $other=11533 #q1.43=(36,6,64,32)f32 #11533=(36,6,32,64)f32 #attn.231=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3330 2 1 relative_position_bias1.43 23628 11544 $input=relative_position_bias1.43 $dim=23628 #relative_position_bias1.43=(6,64,64)f32 #11544=(1,6,64,64)f32 pnnx.Expression pnnx_expr_8381 2 1 attn.231 11544 input.257 expr=add(@0,@1) #attn.231=(36,6,64,64)f32 #11544=(1,6,64,64)f32 #input.257=(36,6,64,64)f32 nn.Softmax pnnx_unique_372 1 1 input.257 11546 dim=-1 #input.257=(36,6,64,64)f32 #11546=(36,6,64,64)f32 nn.Dropout pnnx_unique_373 1 1 11546 11547 #11546=(36,6,64,64)f32 #11547=(36,6,64,64)f32 Tensor.select Tensor.select_820 3 1 qkv1.43 23619 23620 v.115 $input=qkv1.43 $dim=23619 $index=23620 #qkv1.43=(3,36,6,64,32)f32 #v.115=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8380 0 1 23630 expr=1 pnnx.Expression pnnx_expr_8379 0 1 23631 expr=2 torch.matmul torch.matmul_2317 2 1 11547 v.115 11548 $input=11547 $other=v.115 #11547=(36,6,64,64)f32 #v.115=(36,6,64,32)f32 #11548=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8378 1 1 x6.43 11550 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x6.43=(36,64,192)f32 torch.transpose torch.transpose_3094 3 1 11548 23630 23631 11549 $input=11548 $dim0=23630 $dim1=23631 #11548=(36,6,64,32)f32 #11549=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_547 2 1 11549 11550 input1.45 $input=11549 $shape=11550 #11549=(36,64,6,32)f32 #input1.45=(36,64,192)f32 nn.Linear pnnx_unique_374 1 1 input1.45 11552 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.45=(36,64,192)f32 #11552=(36,64,192)f32 nn.Dropout pnnx_unique_375 1 1 11552 11553 #11552=(36,64,192)f32 #11553=(36,64,192)f32 pnnx.Expression pnnx_expr_8374 1 1 11438 11554 expr=[-1,8,8,int(size(@0,2))] #11438=(1,2304,192)f32 pnnx.Expression pnnx_expr_8361 1 1 157 11560 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_8354 0 1 11562 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1606 2 1 11553 11554 windows.115 $input=11553 $shape=11554 #11553=(36,64,192)f32 #windows.115=(36,8,8,192)f32 Tensor.view Tensor.view_1607 2 1 windows.115 11560 x7.43 $input=windows.115 $shape=11560 #windows.115=(36,8,8,192)f32 #x7.43=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_8351 1 1 157 11565 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2755 2 1 x7.43 11562 11563 $input=x7.43 $dims=11562 #x7.43=(1,6,6,8,8,192)f32 #11563=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_172 1 1 11563 11564 memory_format=torch.contiguous_format $input=11563 #11563=(1,6,8,6,8,192)f32 #11564=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_8348 2 1 11438 157 11569 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #11438=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1608 2 1 11564 11565 x8.43 $input=11564 $shape=11565 #11564=(1,6,8,6,8,192)f32 #x8.43=(1,48,48,192)f32 Tensor.view Tensor.view_1609 2 1 x8.43 11569 x9.43 $input=x8.43 $shape=11569 #x8.43=(1,48,48,192)f32 #x9.43=(1,2304,192)f32 pnnx.Expression pnnx_expr_8346 2 1 11438 x9.43 input.259 expr=add(@0,@1) #11438=(1,2304,192)f32 #x9.43=(1,2304,192)f32 #input.259=(1,2304,192)f32 nn.LayerNorm pnnx_unique_376 1 1 input.259 11573 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.259=(1,2304,192)f32 #11573=(1,2304,192)f32 nn.Linear pnnx_unique_377 1 1 11573 11578 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #11573=(1,2304,192)f32 #11578=(1,2304,384)f32 nn.GELU pnnx_unique_378 1 1 11578 11579 #11578=(1,2304,384)f32 #11579=(1,2304,384)f32 nn.Dropout pnnx_unique_379 1 1 11579 11580 #11579=(1,2304,384)f32 #11580=(1,2304,384)f32 nn.Linear pnnx_unique_380 1 1 11580 11581 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #11580=(1,2304,384)f32 #11581=(1,2304,192)f32 nn.Dropout pnnx_unique_381 1 1 11581 11582 #11581=(1,2304,192)f32 #11582=(1,2304,192)f32 pnnx.Expression pnnx_expr_8344 2 1 input.259 11582 11584 expr=add(@0,@1) #input.259=(1,2304,192)f32 #11582=(1,2304,192)f32 #11584=(1,2304,192)f32 pnnx.Attribute pnnx_unique_382 0 1 attn_mask.59 @attn_mask=(36,64,64)f32 #attn_mask.59=(36,64,64)f32 nn.LayerNorm pnnx_unique_383 1 1 11584 11613 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #11584=(1,2304,192)f32 #11613=(1,2304,192)f32 pnnx.Expression pnnx_expr_8321 2 1 11584 157 11614 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #11584=(1,2304,192)f32 #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_8319 0 1 11616 expr=[-4,-4] pnnx.Expression pnnx_expr_8317 0 1 11617 expr=[1,2] Tensor.view Tensor.view_1610 2 1 11613 11614 x.117 $input=11613 $shape=11614 #11613=(1,2304,192)f32 #x.117=(1,48,48,192)f32 torch.roll torch.roll_2476 3 1 x.117 11616 11617 x6.45 $input=x.117 $shifts=11616 $dims=11617 #x.117=(1,48,48,192)f32 #x6.45=(1,48,48,192)f32 pnnx.Expression pnnx_expr_8295 1 1 x6.45 11634 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x6.45=(1,48,48,192)f32 pnnx.Expression pnnx_expr_8290 0 1 11636 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1611 2 1 x6.45 11634 x7.45 $input=x6.45 $shape=11634 #x6.45=(1,48,48,192)f32 #x7.45=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_8287 1 1 x6.45 11639 expr=[-1,8,8,int(size(@0,3))] #x6.45=(1,48,48,192)f32 torch.permute torch.permute_2756 2 1 x7.45 11636 11637 $input=x7.45 $dims=11636 #x7.45=(1,6,8,6,8,192)f32 #11637=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_173 1 1 11637 11638 memory_format=torch.contiguous_format $input=11637 #11637=(1,6,6,8,8,192)f32 #11638=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_8285 1 1 11584 11641 expr=[-1,64,int(size(@0,2))] #11584=(1,2304,192)f32 pnnx.Expression pnnx_expr_8275 0 1 11652 expr=-2 pnnx.Expression pnnx_expr_8274 0 1 11653 expr=-1 pnnx.Attribute pnnx_unique_384 0 1 relative_position_bias_table.117 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.117=(225,6)f32 pnnx.Attribute pnnx_unique_385 0 1 relative_position_index.117 @relative_position_index=(64,64)i64 #relative_position_index.117=(64,64)i64 Tensor.view Tensor.view_1612 2 1 11638 11639 x_windows.117 $input=11638 $shape=11639 #11638=(1,6,6,8,8,192)f32 #x_windows.117=(36,8,8,192)f32 Tensor.view Tensor.view_1613 2 1 x_windows.117 11641 x8.45 $input=x_windows.117 $shape=11641 #x_windows.117=(36,8,8,192)f32 #x8.45=(36,64,192)f32 nn.Linear pnnx_unique_386 1 1 x8.45 11677 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x8.45=(36,64,192)f32 #11677=(36,64,576)f32 pnnx.Expression pnnx_expr_8255 1 1 x8.45 11680 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x8.45=(36,64,192)f32 pnnx.Expression pnnx_expr_8250 0 1 11682 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_548 2 1 11677 11680 11681 $input=11677 $shape=11680 #11677=(36,64,576)f32 #11681=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_8249 0 1 23674 expr=0 pnnx.Expression pnnx_expr_8248 0 1 23675 expr=0 pnnx.Expression pnnx_expr_8247 0 1 23676 expr=0 pnnx.Expression pnnx_expr_8246 0 1 23677 expr=1 pnnx.Expression pnnx_expr_8245 0 1 23678 expr=0 pnnx.Expression pnnx_expr_8244 0 1 23679 expr=2 torch.permute torch.permute_2757 2 1 11681 11682 qkv1.45 $input=11681 $dims=11682 #11681=(36,64,3,6,32)f32 #qkv1.45=(3,36,6,64,32)f32 Tensor.select Tensor.select_821 3 1 qkv1.45 23674 23675 q.117 $input=qkv1.45 $dim=23674 $index=23675 #qkv1.45=(3,36,6,64,32)f32 #q.117=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8243 1 1 q.117 q1.45 expr=mul(@0,1.767767e-01) #q.117=(36,6,64,32)f32 #q1.45=(36,6,64,32)f32 Tensor.select Tensor.select_822 3 1 qkv1.45 23676 23677 k.117 $input=qkv1.45 $dim=23676 $index=23677 #qkv1.45=(3,36,6,64,32)f32 #k.117=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8241 0 1 11690 expr=[-1] Tensor.view Tensor.view_1614 2 1 relative_position_index.117 11690 11691 $input=relative_position_index.117 $shape=11690 #relative_position_index.117=(64,64)i64 #11691=(4096)i64 pnnx.Expression pnnx_expr_8240 1 1 11691 11692 expr=[@0] #11691=(4096)i64 pnnx.Expression pnnx_expr_8237 0 1 11694 expr=[64,64,-1] Tensor.index Tensor.index_383 2 1 relative_position_bias_table.117 11692 11693 $input=relative_position_bias_table.117 $expr=11692 #relative_position_bias_table.117=(225,6)f32 #11693=(4096,6)f32 pnnx.Expression pnnx_expr_8233 0 1 11696 expr=[2,0,1] Tensor.view Tensor.view_1615 2 1 11693 11694 relative_position_bias.117 $input=11693 $shape=11694 #11693=(4096,6)f32 #relative_position_bias.117=(64,64,6)f32 pnnx.Expression pnnx_expr_8232 0 1 23687 expr=0 torch.permute torch.permute_2758 2 1 relative_position_bias.117 11696 11697 $input=relative_position_bias.117 $dims=11696 #relative_position_bias.117=(64,64,6)f32 #11697=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_174 1 1 11697 relative_position_bias1.45 memory_format=torch.contiguous_format $input=11697 #11697=(6,64,64)f32 #relative_position_bias1.45=(6,64,64)f32 torch.transpose torch.transpose_3095 3 1 k.117 11652 11653 11688 $input=k.117 $dim0=11652 $dim1=11653 #k.117=(36,6,64,32)f32 #11688=(36,6,32,64)f32 torch.matmul torch.matmul_2318 2 1 q1.45 11688 attn.235 $input=q1.45 $other=11688 #q1.45=(36,6,64,32)f32 #11688=(36,6,32,64)f32 #attn.235=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3331 2 1 relative_position_bias1.45 23687 11699 $input=relative_position_bias1.45 $dim=23687 #relative_position_bias1.45=(6,64,64)f32 #11699=(1,6,64,64)f32 pnnx.Expression pnnx_expr_8230 2 1 attn.235 11699 attn2.23 expr=add(@0,@1) #attn.235=(36,6,64,64)f32 #11699=(1,6,64,64)f32 #attn2.23=(36,6,64,64)f32 pnnx.Expression pnnx_expr_8221 2 1 x8.45 attn_mask.59 11706 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x8.45=(36,64,192)f32 #attn_mask.59=(36,64,64)f32 pnnx.Expression pnnx_expr_8220 0 1 23692 expr=1 pnnx.Expression pnnx_expr_8219 0 1 23693 expr=0 Tensor.view Tensor.view_1616 2 1 attn2.23 11706 11707 $input=attn2.23 $shape=11706 #attn2.23=(36,6,64,64)f32 #11707=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3332 2 1 attn_mask.59 23692 11708 $input=attn_mask.59 $dim=23692 #attn_mask.59=(36,64,64)f32 #11708=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3333 2 1 11708 23693 11709 $input=11708 $dim=23693 #11708=(36,1,64,64)f32 #11709=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_8217 2 1 11707 11709 attn3.23 expr=add(@0,@1) #11707=(1,36,6,64,64)f32 #11709=(1,36,1,64,64)f32 #attn3.23=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_8214 1 1 x8.45 11711 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x8.45=(36,64,192)f32 Tensor.view Tensor.view_1617 2 1 attn3.23 11711 input.261 $input=attn3.23 $shape=11711 #attn3.23=(1,36,6,64,64)f32 #input.261=(36,6,64,64)f32 nn.Softmax pnnx_unique_387 1 1 input.261 11713 dim=-1 #input.261=(36,6,64,64)f32 #11713=(36,6,64,64)f32 nn.Dropout pnnx_unique_388 1 1 11713 11714 #11713=(36,6,64,64)f32 #11714=(36,6,64,64)f32 Tensor.select Tensor.select_823 3 1 qkv1.45 23678 23679 v.117 $input=qkv1.45 $dim=23678 $index=23679 #qkv1.45=(3,36,6,64,32)f32 #v.117=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8213 0 1 23697 expr=1 pnnx.Expression pnnx_expr_8212 0 1 23698 expr=2 torch.matmul torch.matmul_2319 2 1 11714 v.117 11715 $input=11714 $other=v.117 #11714=(36,6,64,64)f32 #v.117=(36,6,64,32)f32 #11715=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8211 1 1 x8.45 11717 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x8.45=(36,64,192)f32 torch.transpose torch.transpose_3096 3 1 11715 23697 23698 11716 $input=11715 $dim0=23697 $dim1=23698 #11715=(36,6,64,32)f32 #11716=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_549 2 1 11716 11717 input1.47 $input=11716 $shape=11717 #11716=(36,64,6,32)f32 #input1.47=(36,64,192)f32 nn.Linear pnnx_unique_389 1 1 input1.47 11719 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.47=(36,64,192)f32 #11719=(36,64,192)f32 nn.Dropout pnnx_unique_390 1 1 11719 11720 #11719=(36,64,192)f32 #11720=(36,64,192)f32 pnnx.Expression pnnx_expr_8207 1 1 11584 11721 expr=[-1,8,8,int(size(@0,2))] #11584=(1,2304,192)f32 pnnx.Expression pnnx_expr_8194 1 1 157 11727 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_8187 0 1 11729 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1618 2 1 11720 11721 windows.117 $input=11720 $shape=11721 #11720=(36,64,192)f32 #windows.117=(36,8,8,192)f32 Tensor.view Tensor.view_1619 2 1 windows.117 11727 x9.45 $input=windows.117 $shape=11727 #windows.117=(36,8,8,192)f32 #x9.45=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_8184 1 1 157 11732 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2759 2 1 x9.45 11729 11730 $input=x9.45 $dims=11729 #x9.45=(1,6,6,8,8,192)f32 #11730=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_175 1 1 11730 11731 memory_format=torch.contiguous_format $input=11730 #11730=(1,6,8,6,8,192)f32 #11731=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_8181 0 1 11734 expr=[4,4] pnnx.Expression pnnx_expr_8178 0 1 11735 expr=[1,2] Tensor.view Tensor.view_1620 2 1 11731 11732 shifted_x.59 $input=11731 $shape=11732 #11731=(1,6,8,6,8,192)f32 #shifted_x.59=(1,48,48,192)f32 pnnx.Expression pnnx_expr_8175 2 1 11584 157 11739 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #11584=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.roll torch.roll_2477 3 1 shifted_x.59 11734 11735 x10.23 $input=shifted_x.59 $shifts=11734 $dims=11735 #shifted_x.59=(1,48,48,192)f32 #x10.23=(1,48,48,192)f32 Tensor.view Tensor.view_1621 2 1 x10.23 11739 x11.23 $input=x10.23 $shape=11739 #x10.23=(1,48,48,192)f32 #x11.23=(1,2304,192)f32 pnnx.Expression pnnx_expr_8173 2 1 11584 x11.23 input.263 expr=add(@0,@1) #11584=(1,2304,192)f32 #x11.23=(1,2304,192)f32 #input.263=(1,2304,192)f32 nn.LayerNorm pnnx_unique_391 1 1 input.263 11743 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.263=(1,2304,192)f32 #11743=(1,2304,192)f32 nn.Linear pnnx_unique_392 1 1 11743 11748 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #11743=(1,2304,192)f32 #11748=(1,2304,384)f32 nn.GELU pnnx_unique_393 1 1 11748 11749 #11748=(1,2304,384)f32 #11749=(1,2304,384)f32 nn.Dropout pnnx_unique_394 1 1 11749 11750 #11749=(1,2304,384)f32 #11750=(1,2304,384)f32 nn.Linear pnnx_unique_395 1 1 11750 11751 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #11750=(1,2304,384)f32 #11751=(1,2304,192)f32 nn.Dropout pnnx_unique_396 1 1 11751 11752 #11751=(1,2304,192)f32 #11752=(1,2304,192)f32 pnnx.Expression pnnx_expr_8171 2 1 input.263 11752 11754 expr=add(@0,@1) #input.263=(1,2304,192)f32 #11752=(1,2304,192)f32 #11754=(1,2304,192)f32 nn.LayerNorm pnnx_unique_397 1 1 11754 11781 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #11754=(1,2304,192)f32 #11781=(1,2304,192)f32 pnnx.Expression pnnx_expr_8149 2 1 11754 157 11782 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #11754=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1622 2 1 11781 11782 x.119 $input=11781 $shape=11782 #11781=(1,2304,192)f32 #x.119=(1,48,48,192)f32 pnnx.Expression pnnx_expr_8128 1 1 x.119 11799 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.119=(1,48,48,192)f32 pnnx.Expression pnnx_expr_8123 0 1 11801 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1623 2 1 x.119 11799 x5.61 $input=x.119 $shape=11799 #x.119=(1,48,48,192)f32 #x5.61=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_8120 1 1 x.119 11804 expr=[-1,8,8,int(size(@0,3))] #x.119=(1,48,48,192)f32 torch.permute torch.permute_2760 2 1 x5.61 11801 11802 $input=x5.61 $dims=11801 #x5.61=(1,6,8,6,8,192)f32 #11802=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_176 1 1 11802 11803 memory_format=torch.contiguous_format $input=11802 #11802=(1,6,6,8,8,192)f32 #11803=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_8118 1 1 11754 11806 expr=[-1,64,int(size(@0,2))] #11754=(1,2304,192)f32 pnnx.Expression pnnx_expr_8108 0 1 11817 expr=-2 pnnx.Expression pnnx_expr_8107 0 1 11818 expr=-1 pnnx.Attribute pnnx_unique_398 0 1 relative_position_bias_table.119 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.119=(225,6)f32 pnnx.Attribute pnnx_unique_399 0 1 relative_position_index.119 @relative_position_index=(64,64)i64 #relative_position_index.119=(64,64)i64 Tensor.view Tensor.view_1624 2 1 11803 11804 x_windows.119 $input=11803 $shape=11804 #11803=(1,6,6,8,8,192)f32 #x_windows.119=(36,8,8,192)f32 Tensor.view Tensor.view_1625 2 1 x_windows.119 11806 x6.47 $input=x_windows.119 $shape=11806 #x_windows.119=(36,8,8,192)f32 #x6.47=(36,64,192)f32 nn.Linear pnnx_unique_400 1 1 x6.47 11838 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x6.47=(36,64,192)f32 #11838=(36,64,576)f32 pnnx.Expression pnnx_expr_8092 1 1 x6.47 11841 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x6.47=(36,64,192)f32 pnnx.Expression pnnx_expr_8087 0 1 11843 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_550 2 1 11838 11841 11842 $input=11838 $shape=11841 #11838=(36,64,576)f32 #11842=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_8086 0 1 23742 expr=0 pnnx.Expression pnnx_expr_8085 0 1 23743 expr=0 pnnx.Expression pnnx_expr_8084 0 1 23744 expr=0 pnnx.Expression pnnx_expr_8083 0 1 23745 expr=1 pnnx.Expression pnnx_expr_8082 0 1 23746 expr=0 pnnx.Expression pnnx_expr_8081 0 1 23747 expr=2 torch.permute torch.permute_2761 2 1 11842 11843 qkv1.47 $input=11842 $dims=11843 #11842=(36,64,3,6,32)f32 #qkv1.47=(3,36,6,64,32)f32 Tensor.select Tensor.select_824 3 1 qkv1.47 23742 23743 q.119 $input=qkv1.47 $dim=23742 $index=23743 #qkv1.47=(3,36,6,64,32)f32 #q.119=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8080 1 1 q.119 q1.47 expr=mul(@0,1.767767e-01) #q.119=(36,6,64,32)f32 #q1.47=(36,6,64,32)f32 Tensor.select Tensor.select_825 3 1 qkv1.47 23744 23745 k.119 $input=qkv1.47 $dim=23744 $index=23745 #qkv1.47=(3,36,6,64,32)f32 #k.119=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8078 0 1 11851 expr=[-1] Tensor.view Tensor.view_1626 2 1 relative_position_index.119 11851 11852 $input=relative_position_index.119 $shape=11851 #relative_position_index.119=(64,64)i64 #11852=(4096)i64 pnnx.Expression pnnx_expr_8077 1 1 11852 11853 expr=[@0] #11852=(4096)i64 pnnx.Expression pnnx_expr_8074 0 1 11855 expr=[64,64,-1] Tensor.index Tensor.index_384 2 1 relative_position_bias_table.119 11853 11854 $input=relative_position_bias_table.119 $expr=11853 #relative_position_bias_table.119=(225,6)f32 #11854=(4096,6)f32 pnnx.Expression pnnx_expr_8070 0 1 11857 expr=[2,0,1] Tensor.view Tensor.view_1627 2 1 11854 11855 relative_position_bias.119 $input=11854 $shape=11855 #11854=(4096,6)f32 #relative_position_bias.119=(64,64,6)f32 pnnx.Expression pnnx_expr_8069 0 1 23755 expr=0 torch.permute torch.permute_2762 2 1 relative_position_bias.119 11857 11858 $input=relative_position_bias.119 $dims=11857 #relative_position_bias.119=(64,64,6)f32 #11858=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_177 1 1 11858 relative_position_bias1.47 memory_format=torch.contiguous_format $input=11858 #11858=(6,64,64)f32 #relative_position_bias1.47=(6,64,64)f32 torch.transpose torch.transpose_3097 3 1 k.119 11817 11818 11849 $input=k.119 $dim0=11817 $dim1=11818 #k.119=(36,6,64,32)f32 #11849=(36,6,32,64)f32 torch.matmul torch.matmul_2320 2 1 q1.47 11849 attn.239 $input=q1.47 $other=11849 #q1.47=(36,6,64,32)f32 #11849=(36,6,32,64)f32 #attn.239=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3334 2 1 relative_position_bias1.47 23755 11860 $input=relative_position_bias1.47 $dim=23755 #relative_position_bias1.47=(6,64,64)f32 #11860=(1,6,64,64)f32 pnnx.Expression pnnx_expr_8067 2 1 attn.239 11860 input.265 expr=add(@0,@1) #attn.239=(36,6,64,64)f32 #11860=(1,6,64,64)f32 #input.265=(36,6,64,64)f32 nn.Softmax pnnx_unique_401 1 1 input.265 11862 dim=-1 #input.265=(36,6,64,64)f32 #11862=(36,6,64,64)f32 nn.Dropout pnnx_unique_402 1 1 11862 11863 #11862=(36,6,64,64)f32 #11863=(36,6,64,64)f32 Tensor.select Tensor.select_826 3 1 qkv1.47 23746 23747 v.119 $input=qkv1.47 $dim=23746 $index=23747 #qkv1.47=(3,36,6,64,32)f32 #v.119=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8066 0 1 23757 expr=1 pnnx.Expression pnnx_expr_8065 0 1 23758 expr=2 torch.matmul torch.matmul_2321 2 1 11863 v.119 11864 $input=11863 $other=v.119 #11863=(36,6,64,64)f32 #v.119=(36,6,64,32)f32 #11864=(36,6,64,32)f32 pnnx.Expression pnnx_expr_8064 1 1 x6.47 11866 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x6.47=(36,64,192)f32 torch.transpose torch.transpose_3098 3 1 11864 23757 23758 11865 $input=11864 $dim0=23757 $dim1=23758 #11864=(36,6,64,32)f32 #11865=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_551 2 1 11865 11866 input1.49 $input=11865 $shape=11866 #11865=(36,64,6,32)f32 #input1.49=(36,64,192)f32 nn.Linear pnnx_unique_403 1 1 input1.49 11868 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.49=(36,64,192)f32 #11868=(36,64,192)f32 nn.Dropout pnnx_unique_404 1 1 11868 11869 #11868=(36,64,192)f32 #11869=(36,64,192)f32 pnnx.Expression pnnx_expr_8060 1 1 11754 11870 expr=[-1,8,8,int(size(@0,2))] #11754=(1,2304,192)f32 pnnx.Expression pnnx_expr_8047 1 1 157 11876 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_8040 0 1 11878 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1628 2 1 11869 11870 windows.119 $input=11869 $shape=11870 #11869=(36,64,192)f32 #windows.119=(36,8,8,192)f32 Tensor.view Tensor.view_1629 2 1 windows.119 11876 x7.47 $input=windows.119 $shape=11876 #windows.119=(36,8,8,192)f32 #x7.47=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_8037 1 1 157 11881 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2763 2 1 x7.47 11878 11879 $input=x7.47 $dims=11878 #x7.47=(1,6,6,8,8,192)f32 #11879=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_178 1 1 11879 11880 memory_format=torch.contiguous_format $input=11879 #11879=(1,6,8,6,8,192)f32 #11880=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_8034 2 1 11754 157 11885 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #11754=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1630 2 1 11880 11881 x8.47 $input=11880 $shape=11881 #11880=(1,6,8,6,8,192)f32 #x8.47=(1,48,48,192)f32 Tensor.view Tensor.view_1631 2 1 x8.47 11885 x9.47 $input=x8.47 $shape=11885 #x8.47=(1,48,48,192)f32 #x9.47=(1,2304,192)f32 pnnx.Expression pnnx_expr_8032 2 1 11754 x9.47 input.267 expr=add(@0,@1) #11754=(1,2304,192)f32 #x9.47=(1,2304,192)f32 #input.267=(1,2304,192)f32 nn.LayerNorm pnnx_unique_405 1 1 input.267 11889 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.267=(1,2304,192)f32 #11889=(1,2304,192)f32 nn.Linear pnnx_unique_406 1 1 11889 11894 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #11889=(1,2304,192)f32 #11894=(1,2304,384)f32 nn.GELU pnnx_unique_407 1 1 11894 11895 #11894=(1,2304,384)f32 #11895=(1,2304,384)f32 nn.Dropout pnnx_unique_408 1 1 11895 11896 #11895=(1,2304,384)f32 #11896=(1,2304,384)f32 nn.Linear pnnx_unique_409 1 1 11896 11897 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #11896=(1,2304,384)f32 #11897=(1,2304,192)f32 nn.Dropout pnnx_unique_410 1 1 11897 11898 #11897=(1,2304,192)f32 #11898=(1,2304,192)f32 pnnx.Expression pnnx_expr_8030 2 1 input.267 11898 11900 expr=add(@0,@1) #input.267=(1,2304,192)f32 #11898=(1,2304,192)f32 #11900=(1,2304,192)f32 pnnx.Attribute pnnx_unique_411 0 1 attn_mask.61 @attn_mask=(36,64,64)f32 #attn_mask.61=(36,64,64)f32 nn.LayerNorm pnnx_unique_412 1 1 11900 11929 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #11900=(1,2304,192)f32 #11929=(1,2304,192)f32 pnnx.Expression pnnx_expr_8007 2 1 11900 157 11930 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #11900=(1,2304,192)f32 #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_8005 0 1 11932 expr=[-4,-4] pnnx.Expression pnnx_expr_8003 0 1 11933 expr=[1,2] Tensor.view Tensor.view_1632 2 1 11929 11930 x.121 $input=11929 $shape=11930 #11929=(1,2304,192)f32 #x.121=(1,48,48,192)f32 torch.roll torch.roll_2478 3 1 x.121 11932 11933 x6.49 $input=x.121 $shifts=11932 $dims=11933 #x.121=(1,48,48,192)f32 #x6.49=(1,48,48,192)f32 pnnx.Expression pnnx_expr_7981 1 1 x6.49 11950 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x6.49=(1,48,48,192)f32 pnnx.Expression pnnx_expr_7976 0 1 11952 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1633 2 1 x6.49 11950 x7.49 $input=x6.49 $shape=11950 #x6.49=(1,48,48,192)f32 #x7.49=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_7973 1 1 x6.49 11955 expr=[-1,8,8,int(size(@0,3))] #x6.49=(1,48,48,192)f32 torch.permute torch.permute_2764 2 1 x7.49 11952 11953 $input=x7.49 $dims=11952 #x7.49=(1,6,8,6,8,192)f32 #11953=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_179 1 1 11953 11954 memory_format=torch.contiguous_format $input=11953 #11953=(1,6,6,8,8,192)f32 #11954=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_7971 1 1 11900 11957 expr=[-1,64,int(size(@0,2))] #11900=(1,2304,192)f32 pnnx.Expression pnnx_expr_7961 0 1 11968 expr=-2 pnnx.Expression pnnx_expr_7960 0 1 11969 expr=-1 pnnx.Attribute pnnx_unique_413 0 1 relative_position_bias_table.121 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.121=(225,6)f32 pnnx.Attribute pnnx_unique_414 0 1 relative_position_index.121 @relative_position_index=(64,64)i64 #relative_position_index.121=(64,64)i64 Tensor.view Tensor.view_1634 2 1 11954 11955 x_windows.121 $input=11954 $shape=11955 #11954=(1,6,6,8,8,192)f32 #x_windows.121=(36,8,8,192)f32 Tensor.view Tensor.view_1635 2 1 x_windows.121 11957 x8.49 $input=x_windows.121 $shape=11957 #x_windows.121=(36,8,8,192)f32 #x8.49=(36,64,192)f32 nn.Linear pnnx_unique_415 1 1 x8.49 11993 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x8.49=(36,64,192)f32 #11993=(36,64,576)f32 pnnx.Expression pnnx_expr_7941 1 1 x8.49 11996 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x8.49=(36,64,192)f32 pnnx.Expression pnnx_expr_7936 0 1 11998 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_552 2 1 11993 11996 11997 $input=11993 $shape=11996 #11993=(36,64,576)f32 #11997=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_7935 0 1 23801 expr=0 pnnx.Expression pnnx_expr_7934 0 1 23802 expr=0 pnnx.Expression pnnx_expr_7933 0 1 23803 expr=0 pnnx.Expression pnnx_expr_7932 0 1 23804 expr=1 pnnx.Expression pnnx_expr_7931 0 1 23805 expr=0 pnnx.Expression pnnx_expr_7930 0 1 23806 expr=2 torch.permute torch.permute_2765 2 1 11997 11998 qkv1.49 $input=11997 $dims=11998 #11997=(36,64,3,6,32)f32 #qkv1.49=(3,36,6,64,32)f32 Tensor.select Tensor.select_827 3 1 qkv1.49 23801 23802 q.121 $input=qkv1.49 $dim=23801 $index=23802 #qkv1.49=(3,36,6,64,32)f32 #q.121=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7929 1 1 q.121 q1.49 expr=mul(@0,1.767767e-01) #q.121=(36,6,64,32)f32 #q1.49=(36,6,64,32)f32 Tensor.select Tensor.select_828 3 1 qkv1.49 23803 23804 k.121 $input=qkv1.49 $dim=23803 $index=23804 #qkv1.49=(3,36,6,64,32)f32 #k.121=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7927 0 1 12006 expr=[-1] Tensor.view Tensor.view_1636 2 1 relative_position_index.121 12006 12007 $input=relative_position_index.121 $shape=12006 #relative_position_index.121=(64,64)i64 #12007=(4096)i64 pnnx.Expression pnnx_expr_7926 1 1 12007 12008 expr=[@0] #12007=(4096)i64 pnnx.Expression pnnx_expr_7923 0 1 12010 expr=[64,64,-1] Tensor.index Tensor.index_385 2 1 relative_position_bias_table.121 12008 12009 $input=relative_position_bias_table.121 $expr=12008 #relative_position_bias_table.121=(225,6)f32 #12009=(4096,6)f32 pnnx.Expression pnnx_expr_7919 0 1 12012 expr=[2,0,1] Tensor.view Tensor.view_1637 2 1 12009 12010 relative_position_bias.121 $input=12009 $shape=12010 #12009=(4096,6)f32 #relative_position_bias.121=(64,64,6)f32 pnnx.Expression pnnx_expr_7918 0 1 23814 expr=0 torch.permute torch.permute_2766 2 1 relative_position_bias.121 12012 12013 $input=relative_position_bias.121 $dims=12012 #relative_position_bias.121=(64,64,6)f32 #12013=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_180 1 1 12013 relative_position_bias1.49 memory_format=torch.contiguous_format $input=12013 #12013=(6,64,64)f32 #relative_position_bias1.49=(6,64,64)f32 torch.transpose torch.transpose_3099 3 1 k.121 11968 11969 12004 $input=k.121 $dim0=11968 $dim1=11969 #k.121=(36,6,64,32)f32 #12004=(36,6,32,64)f32 torch.matmul torch.matmul_2322 2 1 q1.49 12004 attn.243 $input=q1.49 $other=12004 #q1.49=(36,6,64,32)f32 #12004=(36,6,32,64)f32 #attn.243=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3335 2 1 relative_position_bias1.49 23814 12015 $input=relative_position_bias1.49 $dim=23814 #relative_position_bias1.49=(6,64,64)f32 #12015=(1,6,64,64)f32 pnnx.Expression pnnx_expr_7916 2 1 attn.243 12015 attn2.25 expr=add(@0,@1) #attn.243=(36,6,64,64)f32 #12015=(1,6,64,64)f32 #attn2.25=(36,6,64,64)f32 pnnx.Expression pnnx_expr_7907 2 1 x8.49 attn_mask.61 12022 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x8.49=(36,64,192)f32 #attn_mask.61=(36,64,64)f32 pnnx.Expression pnnx_expr_7906 0 1 23819 expr=1 pnnx.Expression pnnx_expr_7905 0 1 23820 expr=0 Tensor.view Tensor.view_1638 2 1 attn2.25 12022 12023 $input=attn2.25 $shape=12022 #attn2.25=(36,6,64,64)f32 #12023=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3336 2 1 attn_mask.61 23819 12024 $input=attn_mask.61 $dim=23819 #attn_mask.61=(36,64,64)f32 #12024=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3337 2 1 12024 23820 12025 $input=12024 $dim=23820 #12024=(36,1,64,64)f32 #12025=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_7903 2 1 12023 12025 attn3.25 expr=add(@0,@1) #12023=(1,36,6,64,64)f32 #12025=(1,36,1,64,64)f32 #attn3.25=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_7900 1 1 x8.49 12027 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x8.49=(36,64,192)f32 Tensor.view Tensor.view_1639 2 1 attn3.25 12027 input.269 $input=attn3.25 $shape=12027 #attn3.25=(1,36,6,64,64)f32 #input.269=(36,6,64,64)f32 nn.Softmax pnnx_unique_416 1 1 input.269 12029 dim=-1 #input.269=(36,6,64,64)f32 #12029=(36,6,64,64)f32 nn.Dropout pnnx_unique_417 1 1 12029 12030 #12029=(36,6,64,64)f32 #12030=(36,6,64,64)f32 Tensor.select Tensor.select_829 3 1 qkv1.49 23805 23806 v.121 $input=qkv1.49 $dim=23805 $index=23806 #qkv1.49=(3,36,6,64,32)f32 #v.121=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7899 0 1 23824 expr=1 pnnx.Expression pnnx_expr_7898 0 1 23825 expr=2 torch.matmul torch.matmul_2323 2 1 12030 v.121 12031 $input=12030 $other=v.121 #12030=(36,6,64,64)f32 #v.121=(36,6,64,32)f32 #12031=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7897 1 1 x8.49 12033 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x8.49=(36,64,192)f32 torch.transpose torch.transpose_3100 3 1 12031 23824 23825 12032 $input=12031 $dim0=23824 $dim1=23825 #12031=(36,6,64,32)f32 #12032=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_553 2 1 12032 12033 input1.51 $input=12032 $shape=12033 #12032=(36,64,6,32)f32 #input1.51=(36,64,192)f32 nn.Linear pnnx_unique_418 1 1 input1.51 12035 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.51=(36,64,192)f32 #12035=(36,64,192)f32 nn.Dropout pnnx_unique_419 1 1 12035 12036 #12035=(36,64,192)f32 #12036=(36,64,192)f32 pnnx.Expression pnnx_expr_7893 1 1 11900 12037 expr=[-1,8,8,int(size(@0,2))] #11900=(1,2304,192)f32 pnnx.Expression pnnx_expr_7880 1 1 157 12043 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_7873 0 1 12045 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1640 2 1 12036 12037 windows.121 $input=12036 $shape=12037 #12036=(36,64,192)f32 #windows.121=(36,8,8,192)f32 Tensor.view Tensor.view_1641 2 1 windows.121 12043 x9.49 $input=windows.121 $shape=12043 #windows.121=(36,8,8,192)f32 #x9.49=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_7870 1 1 157 12048 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2767 2 1 x9.49 12045 12046 $input=x9.49 $dims=12045 #x9.49=(1,6,6,8,8,192)f32 #12046=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_181 1 1 12046 12047 memory_format=torch.contiguous_format $input=12046 #12046=(1,6,8,6,8,192)f32 #12047=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_7867 0 1 12050 expr=[4,4] pnnx.Expression pnnx_expr_7864 0 1 12051 expr=[1,2] Tensor.view Tensor.view_1642 2 1 12047 12048 shifted_x.61 $input=12047 $shape=12048 #12047=(1,6,8,6,8,192)f32 #shifted_x.61=(1,48,48,192)f32 pnnx.Expression pnnx_expr_7861 2 1 11900 157 12055 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #11900=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.roll torch.roll_2479 3 1 shifted_x.61 12050 12051 x10.25 $input=shifted_x.61 $shifts=12050 $dims=12051 #shifted_x.61=(1,48,48,192)f32 #x10.25=(1,48,48,192)f32 Tensor.view Tensor.view_1643 2 1 x10.25 12055 x11.25 $input=x10.25 $shape=12055 #x10.25=(1,48,48,192)f32 #x11.25=(1,2304,192)f32 pnnx.Expression pnnx_expr_7859 2 1 11900 x11.25 input.271 expr=add(@0,@1) #11900=(1,2304,192)f32 #x11.25=(1,2304,192)f32 #input.271=(1,2304,192)f32 nn.LayerNorm pnnx_unique_420 1 1 input.271 12059 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.271=(1,2304,192)f32 #12059=(1,2304,192)f32 nn.Linear pnnx_unique_421 1 1 12059 12064 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #12059=(1,2304,192)f32 #12064=(1,2304,384)f32 nn.GELU pnnx_unique_422 1 1 12064 12065 #12064=(1,2304,384)f32 #12065=(1,2304,384)f32 nn.Dropout pnnx_unique_423 1 1 12065 12066 #12065=(1,2304,384)f32 #12066=(1,2304,384)f32 nn.Linear pnnx_unique_424 1 1 12066 12067 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #12066=(1,2304,384)f32 #12067=(1,2304,192)f32 nn.Dropout pnnx_unique_425 1 1 12067 12068 #12067=(1,2304,192)f32 #12068=(1,2304,192)f32 pnnx.Expression pnnx_expr_7857 2 1 input.271 12068 12070 expr=add(@0,@1) #input.271=(1,2304,192)f32 #12068=(1,2304,192)f32 #12070=(1,2304,192)f32 pnnx.Expression pnnx_expr_7855 0 1 12072 expr=1 pnnx.Expression pnnx_expr_7854 0 1 12073 expr=2 pnnx.Expression pnnx_expr_7849 2 1 12070 157 12079 expr=[int(size(@0,0)),192,int(size(@1,2)),int(size(@1,3))] #12070=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.transpose torch.transpose_3101 3 1 12070 12072 12073 12078 $input=12070 $dim0=12072 $dim1=12073 #12070=(1,2304,192)f32 #12078=(1,192,2304)f32 Tensor.view Tensor.view_1644 2 1 12078 12079 input.273 $input=12078 $shape=12079 #12078=(1,192,2304)f32 #input.273=(1,192,48,48)f32 nn.Conv2d pnnx_unique_426 1 1 input.273 12081 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(192)f32 @weight=(192,192,3,3)f32 #input.273=(1,192,48,48)f32 #12081=(1,192,48,48)f32 pnnx.Expression pnnx_expr_7848 0 1 12082 expr=-1 pnnx.Expression pnnx_expr_7847 0 1 12083 expr=2 pnnx.Expression pnnx_expr_7846 0 1 12084 expr=1 pnnx.Expression pnnx_expr_7845 0 1 23852 expr=2 torch.flatten torch.flatten_2194 3 1 12081 12083 12082 12085 $input=12081 $start_dim=12083 $end_dim=12082 #12081=(1,192,48,48)f32 #12085=(1,192,2304)f32 torch.transpose torch.transpose_3102 3 1 12085 12084 23852 12086 $input=12085 $dim0=12084 $dim1=23852 #12085=(1,192,2304)f32 #12086=(1,2304,192)f32 pnnx.Expression pnnx_expr_7844 2 1 12086 11105 12087 expr=add(@0,@1) #12086=(1,2304,192)f32 #11105=(1,2304,192)f32 #12087=(1,2304,192)f32 nn.LayerNorm pnnx_unique_427 1 1 12087 12131 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #12087=(1,2304,192)f32 #12131=(1,2304,192)f32 pnnx.Expression pnnx_expr_7821 2 1 12087 157 12132 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #12087=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1645 2 1 12131 12132 x.123 $input=12131 $shape=12132 #12131=(1,2304,192)f32 #x.123=(1,48,48,192)f32 pnnx.Expression pnnx_expr_7800 1 1 x.123 12149 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.123=(1,48,48,192)f32 pnnx.Expression pnnx_expr_7795 0 1 12151 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1646 2 1 x.123 12149 x5.63 $input=x.123 $shape=12149 #x.123=(1,48,48,192)f32 #x5.63=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_7792 1 1 x.123 12154 expr=[-1,8,8,int(size(@0,3))] #x.123=(1,48,48,192)f32 torch.permute torch.permute_2768 2 1 x5.63 12151 12152 $input=x5.63 $dims=12151 #x5.63=(1,6,8,6,8,192)f32 #12152=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_182 1 1 12152 12153 memory_format=torch.contiguous_format $input=12152 #12152=(1,6,6,8,8,192)f32 #12153=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_7790 1 1 12087 12156 expr=[-1,64,int(size(@0,2))] #12087=(1,2304,192)f32 pnnx.Expression pnnx_expr_7780 0 1 12167 expr=-2 pnnx.Expression pnnx_expr_7779 0 1 12168 expr=-1 pnnx.Attribute pnnx_unique_428 0 1 relative_position_bias_table.123 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.123=(225,6)f32 pnnx.Attribute pnnx_unique_429 0 1 relative_position_index.123 @relative_position_index=(64,64)i64 #relative_position_index.123=(64,64)i64 Tensor.view Tensor.view_1647 2 1 12153 12154 x_windows.123 $input=12153 $shape=12154 #12153=(1,6,6,8,8,192)f32 #x_windows.123=(36,8,8,192)f32 Tensor.view Tensor.view_1648 2 1 x_windows.123 12156 x6.51 $input=x_windows.123 $shape=12156 #x_windows.123=(36,8,8,192)f32 #x6.51=(36,64,192)f32 nn.Linear pnnx_unique_430 1 1 x6.51 12188 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x6.51=(36,64,192)f32 #12188=(36,64,576)f32 pnnx.Expression pnnx_expr_7764 1 1 x6.51 12191 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x6.51=(36,64,192)f32 pnnx.Expression pnnx_expr_7759 0 1 12193 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_554 2 1 12188 12191 12192 $input=12188 $shape=12191 #12188=(36,64,576)f32 #12192=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_7758 0 1 23870 expr=0 pnnx.Expression pnnx_expr_7757 0 1 23871 expr=0 pnnx.Expression pnnx_expr_7756 0 1 23872 expr=0 pnnx.Expression pnnx_expr_7755 0 1 23873 expr=1 pnnx.Expression pnnx_expr_7754 0 1 23874 expr=0 pnnx.Expression pnnx_expr_7753 0 1 23875 expr=2 torch.permute torch.permute_2769 2 1 12192 12193 qkv1.51 $input=12192 $dims=12193 #12192=(36,64,3,6,32)f32 #qkv1.51=(3,36,6,64,32)f32 Tensor.select Tensor.select_830 3 1 qkv1.51 23870 23871 q.123 $input=qkv1.51 $dim=23870 $index=23871 #qkv1.51=(3,36,6,64,32)f32 #q.123=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7752 1 1 q.123 q1.51 expr=mul(@0,1.767767e-01) #q.123=(36,6,64,32)f32 #q1.51=(36,6,64,32)f32 Tensor.select Tensor.select_831 3 1 qkv1.51 23872 23873 k.123 $input=qkv1.51 $dim=23872 $index=23873 #qkv1.51=(3,36,6,64,32)f32 #k.123=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7750 0 1 12201 expr=[-1] Tensor.view Tensor.view_1649 2 1 relative_position_index.123 12201 12202 $input=relative_position_index.123 $shape=12201 #relative_position_index.123=(64,64)i64 #12202=(4096)i64 pnnx.Expression pnnx_expr_7749 1 1 12202 12203 expr=[@0] #12202=(4096)i64 pnnx.Expression pnnx_expr_7746 0 1 12205 expr=[64,64,-1] Tensor.index Tensor.index_386 2 1 relative_position_bias_table.123 12203 12204 $input=relative_position_bias_table.123 $expr=12203 #relative_position_bias_table.123=(225,6)f32 #12204=(4096,6)f32 pnnx.Expression pnnx_expr_7742 0 1 12207 expr=[2,0,1] Tensor.view Tensor.view_1650 2 1 12204 12205 relative_position_bias.123 $input=12204 $shape=12205 #12204=(4096,6)f32 #relative_position_bias.123=(64,64,6)f32 pnnx.Expression pnnx_expr_7741 0 1 23883 expr=0 torch.permute torch.permute_2770 2 1 relative_position_bias.123 12207 12208 $input=relative_position_bias.123 $dims=12207 #relative_position_bias.123=(64,64,6)f32 #12208=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_183 1 1 12208 relative_position_bias1.51 memory_format=torch.contiguous_format $input=12208 #12208=(6,64,64)f32 #relative_position_bias1.51=(6,64,64)f32 torch.transpose torch.transpose_3103 3 1 k.123 12167 12168 12199 $input=k.123 $dim0=12167 $dim1=12168 #k.123=(36,6,64,32)f32 #12199=(36,6,32,64)f32 torch.matmul torch.matmul_2324 2 1 q1.51 12199 attn.247 $input=q1.51 $other=12199 #q1.51=(36,6,64,32)f32 #12199=(36,6,32,64)f32 #attn.247=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3338 2 1 relative_position_bias1.51 23883 12210 $input=relative_position_bias1.51 $dim=23883 #relative_position_bias1.51=(6,64,64)f32 #12210=(1,6,64,64)f32 pnnx.Expression pnnx_expr_7739 2 1 attn.247 12210 input.275 expr=add(@0,@1) #attn.247=(36,6,64,64)f32 #12210=(1,6,64,64)f32 #input.275=(36,6,64,64)f32 nn.Softmax pnnx_unique_431 1 1 input.275 12212 dim=-1 #input.275=(36,6,64,64)f32 #12212=(36,6,64,64)f32 nn.Dropout pnnx_unique_432 1 1 12212 12213 #12212=(36,6,64,64)f32 #12213=(36,6,64,64)f32 Tensor.select Tensor.select_832 3 1 qkv1.51 23874 23875 v.123 $input=qkv1.51 $dim=23874 $index=23875 #qkv1.51=(3,36,6,64,32)f32 #v.123=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7738 0 1 23885 expr=1 pnnx.Expression pnnx_expr_7737 0 1 23886 expr=2 torch.matmul torch.matmul_2325 2 1 12213 v.123 12214 $input=12213 $other=v.123 #12213=(36,6,64,64)f32 #v.123=(36,6,64,32)f32 #12214=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7736 1 1 x6.51 12216 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x6.51=(36,64,192)f32 torch.transpose torch.transpose_3104 3 1 12214 23885 23886 12215 $input=12214 $dim0=23885 $dim1=23886 #12214=(36,6,64,32)f32 #12215=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_555 2 1 12215 12216 input1.53 $input=12215 $shape=12216 #12215=(36,64,6,32)f32 #input1.53=(36,64,192)f32 nn.Linear pnnx_unique_433 1 1 input1.53 12218 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.53=(36,64,192)f32 #12218=(36,64,192)f32 nn.Dropout pnnx_unique_434 1 1 12218 12219 #12218=(36,64,192)f32 #12219=(36,64,192)f32 pnnx.Expression pnnx_expr_7732 1 1 12087 12220 expr=[-1,8,8,int(size(@0,2))] #12087=(1,2304,192)f32 pnnx.Expression pnnx_expr_7719 1 1 157 12226 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_7712 0 1 12228 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1651 2 1 12219 12220 windows.123 $input=12219 $shape=12220 #12219=(36,64,192)f32 #windows.123=(36,8,8,192)f32 Tensor.view Tensor.view_1652 2 1 windows.123 12226 x7.51 $input=windows.123 $shape=12226 #windows.123=(36,8,8,192)f32 #x7.51=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_7709 1 1 157 12231 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2771 2 1 x7.51 12228 12229 $input=x7.51 $dims=12228 #x7.51=(1,6,6,8,8,192)f32 #12229=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_184 1 1 12229 12230 memory_format=torch.contiguous_format $input=12229 #12229=(1,6,8,6,8,192)f32 #12230=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_7706 2 1 12087 157 12235 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #12087=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1653 2 1 12230 12231 x8.51 $input=12230 $shape=12231 #12230=(1,6,8,6,8,192)f32 #x8.51=(1,48,48,192)f32 Tensor.view Tensor.view_1654 2 1 x8.51 12235 x9.51 $input=x8.51 $shape=12235 #x8.51=(1,48,48,192)f32 #x9.51=(1,2304,192)f32 pnnx.Expression pnnx_expr_7704 2 1 12087 x9.51 input.277 expr=add(@0,@1) #12087=(1,2304,192)f32 #x9.51=(1,2304,192)f32 #input.277=(1,2304,192)f32 nn.LayerNorm pnnx_unique_435 1 1 input.277 12239 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.277=(1,2304,192)f32 #12239=(1,2304,192)f32 nn.Linear pnnx_unique_436 1 1 12239 12244 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #12239=(1,2304,192)f32 #12244=(1,2304,384)f32 nn.GELU pnnx_unique_437 1 1 12244 12245 #12244=(1,2304,384)f32 #12245=(1,2304,384)f32 nn.Dropout pnnx_unique_438 1 1 12245 12246 #12245=(1,2304,384)f32 #12246=(1,2304,384)f32 nn.Linear pnnx_unique_439 1 1 12246 12247 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #12246=(1,2304,384)f32 #12247=(1,2304,192)f32 nn.Dropout pnnx_unique_440 1 1 12247 12248 #12247=(1,2304,192)f32 #12248=(1,2304,192)f32 pnnx.Expression pnnx_expr_7702 2 1 input.277 12248 12250 expr=add(@0,@1) #input.277=(1,2304,192)f32 #12248=(1,2304,192)f32 #12250=(1,2304,192)f32 pnnx.Attribute pnnx_unique_441 0 1 attn_mask.63 @attn_mask=(36,64,64)f32 #attn_mask.63=(36,64,64)f32 nn.LayerNorm pnnx_unique_442 1 1 12250 12279 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #12250=(1,2304,192)f32 #12279=(1,2304,192)f32 pnnx.Expression pnnx_expr_7679 2 1 12250 157 12280 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #12250=(1,2304,192)f32 #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_7677 0 1 12282 expr=[-4,-4] pnnx.Expression pnnx_expr_7675 0 1 12283 expr=[1,2] Tensor.view Tensor.view_1655 2 1 12279 12280 x.125 $input=12279 $shape=12280 #12279=(1,2304,192)f32 #x.125=(1,48,48,192)f32 torch.roll torch.roll_2480 3 1 x.125 12282 12283 x6.53 $input=x.125 $shifts=12282 $dims=12283 #x.125=(1,48,48,192)f32 #x6.53=(1,48,48,192)f32 pnnx.Expression pnnx_expr_7653 1 1 x6.53 12300 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x6.53=(1,48,48,192)f32 pnnx.Expression pnnx_expr_7648 0 1 12302 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1656 2 1 x6.53 12300 x7.53 $input=x6.53 $shape=12300 #x6.53=(1,48,48,192)f32 #x7.53=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_7645 1 1 x6.53 12305 expr=[-1,8,8,int(size(@0,3))] #x6.53=(1,48,48,192)f32 torch.permute torch.permute_2772 2 1 x7.53 12302 12303 $input=x7.53 $dims=12302 #x7.53=(1,6,8,6,8,192)f32 #12303=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_185 1 1 12303 12304 memory_format=torch.contiguous_format $input=12303 #12303=(1,6,6,8,8,192)f32 #12304=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_7643 1 1 12250 12307 expr=[-1,64,int(size(@0,2))] #12250=(1,2304,192)f32 pnnx.Expression pnnx_expr_7633 0 1 12318 expr=-2 pnnx.Expression pnnx_expr_7632 0 1 12319 expr=-1 pnnx.Attribute pnnx_unique_443 0 1 relative_position_bias_table.125 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.125=(225,6)f32 pnnx.Attribute pnnx_unique_444 0 1 relative_position_index.125 @relative_position_index=(64,64)i64 #relative_position_index.125=(64,64)i64 Tensor.view Tensor.view_1657 2 1 12304 12305 x_windows.125 $input=12304 $shape=12305 #12304=(1,6,6,8,8,192)f32 #x_windows.125=(36,8,8,192)f32 Tensor.view Tensor.view_1658 2 1 x_windows.125 12307 x8.53 $input=x_windows.125 $shape=12307 #x_windows.125=(36,8,8,192)f32 #x8.53=(36,64,192)f32 nn.Linear pnnx_unique_445 1 1 x8.53 12343 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x8.53=(36,64,192)f32 #12343=(36,64,576)f32 pnnx.Expression pnnx_expr_7613 1 1 x8.53 12346 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x8.53=(36,64,192)f32 pnnx.Expression pnnx_expr_7608 0 1 12348 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_556 2 1 12343 12346 12347 $input=12343 $shape=12346 #12343=(36,64,576)f32 #12347=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_7607 0 1 23929 expr=0 pnnx.Expression pnnx_expr_7606 0 1 23930 expr=0 pnnx.Expression pnnx_expr_7605 0 1 23931 expr=0 pnnx.Expression pnnx_expr_7604 0 1 23932 expr=1 pnnx.Expression pnnx_expr_7603 0 1 23933 expr=0 pnnx.Expression pnnx_expr_7602 0 1 23934 expr=2 torch.permute torch.permute_2773 2 1 12347 12348 qkv1.53 $input=12347 $dims=12348 #12347=(36,64,3,6,32)f32 #qkv1.53=(3,36,6,64,32)f32 Tensor.select Tensor.select_833 3 1 qkv1.53 23929 23930 q.125 $input=qkv1.53 $dim=23929 $index=23930 #qkv1.53=(3,36,6,64,32)f32 #q.125=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7601 1 1 q.125 q1.53 expr=mul(@0,1.767767e-01) #q.125=(36,6,64,32)f32 #q1.53=(36,6,64,32)f32 Tensor.select Tensor.select_834 3 1 qkv1.53 23931 23932 k.125 $input=qkv1.53 $dim=23931 $index=23932 #qkv1.53=(3,36,6,64,32)f32 #k.125=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7599 0 1 12356 expr=[-1] Tensor.view Tensor.view_1659 2 1 relative_position_index.125 12356 12357 $input=relative_position_index.125 $shape=12356 #relative_position_index.125=(64,64)i64 #12357=(4096)i64 pnnx.Expression pnnx_expr_7598 1 1 12357 12358 expr=[@0] #12357=(4096)i64 pnnx.Expression pnnx_expr_7595 0 1 12360 expr=[64,64,-1] Tensor.index Tensor.index_387 2 1 relative_position_bias_table.125 12358 12359 $input=relative_position_bias_table.125 $expr=12358 #relative_position_bias_table.125=(225,6)f32 #12359=(4096,6)f32 pnnx.Expression pnnx_expr_7591 0 1 12362 expr=[2,0,1] Tensor.view Tensor.view_1660 2 1 12359 12360 relative_position_bias.125 $input=12359 $shape=12360 #12359=(4096,6)f32 #relative_position_bias.125=(64,64,6)f32 pnnx.Expression pnnx_expr_7590 0 1 23942 expr=0 torch.permute torch.permute_2774 2 1 relative_position_bias.125 12362 12363 $input=relative_position_bias.125 $dims=12362 #relative_position_bias.125=(64,64,6)f32 #12363=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_186 1 1 12363 relative_position_bias1.53 memory_format=torch.contiguous_format $input=12363 #12363=(6,64,64)f32 #relative_position_bias1.53=(6,64,64)f32 torch.transpose torch.transpose_3105 3 1 k.125 12318 12319 12354 $input=k.125 $dim0=12318 $dim1=12319 #k.125=(36,6,64,32)f32 #12354=(36,6,32,64)f32 torch.matmul torch.matmul_2326 2 1 q1.53 12354 attn.251 $input=q1.53 $other=12354 #q1.53=(36,6,64,32)f32 #12354=(36,6,32,64)f32 #attn.251=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3339 2 1 relative_position_bias1.53 23942 12365 $input=relative_position_bias1.53 $dim=23942 #relative_position_bias1.53=(6,64,64)f32 #12365=(1,6,64,64)f32 pnnx.Expression pnnx_expr_7588 2 1 attn.251 12365 attn2.27 expr=add(@0,@1) #attn.251=(36,6,64,64)f32 #12365=(1,6,64,64)f32 #attn2.27=(36,6,64,64)f32 pnnx.Expression pnnx_expr_7579 2 1 x8.53 attn_mask.63 12372 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x8.53=(36,64,192)f32 #attn_mask.63=(36,64,64)f32 pnnx.Expression pnnx_expr_7578 0 1 23947 expr=1 pnnx.Expression pnnx_expr_7577 0 1 23948 expr=0 Tensor.view Tensor.view_1661 2 1 attn2.27 12372 12373 $input=attn2.27 $shape=12372 #attn2.27=(36,6,64,64)f32 #12373=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3340 2 1 attn_mask.63 23947 12374 $input=attn_mask.63 $dim=23947 #attn_mask.63=(36,64,64)f32 #12374=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3341 2 1 12374 23948 12375 $input=12374 $dim=23948 #12374=(36,1,64,64)f32 #12375=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_7575 2 1 12373 12375 attn3.27 expr=add(@0,@1) #12373=(1,36,6,64,64)f32 #12375=(1,36,1,64,64)f32 #attn3.27=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_7572 1 1 x8.53 12377 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x8.53=(36,64,192)f32 Tensor.view Tensor.view_1662 2 1 attn3.27 12377 input.279 $input=attn3.27 $shape=12377 #attn3.27=(1,36,6,64,64)f32 #input.279=(36,6,64,64)f32 nn.Softmax pnnx_unique_446 1 1 input.279 12379 dim=-1 #input.279=(36,6,64,64)f32 #12379=(36,6,64,64)f32 nn.Dropout pnnx_unique_447 1 1 12379 12380 #12379=(36,6,64,64)f32 #12380=(36,6,64,64)f32 Tensor.select Tensor.select_835 3 1 qkv1.53 23933 23934 v.125 $input=qkv1.53 $dim=23933 $index=23934 #qkv1.53=(3,36,6,64,32)f32 #v.125=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7571 0 1 23952 expr=1 pnnx.Expression pnnx_expr_7570 0 1 23953 expr=2 torch.matmul torch.matmul_2327 2 1 12380 v.125 12381 $input=12380 $other=v.125 #12380=(36,6,64,64)f32 #v.125=(36,6,64,32)f32 #12381=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7569 1 1 x8.53 12383 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x8.53=(36,64,192)f32 torch.transpose torch.transpose_3106 3 1 12381 23952 23953 12382 $input=12381 $dim0=23952 $dim1=23953 #12381=(36,6,64,32)f32 #12382=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_557 2 1 12382 12383 input1.55 $input=12382 $shape=12383 #12382=(36,64,6,32)f32 #input1.55=(36,64,192)f32 nn.Linear pnnx_unique_448 1 1 input1.55 12385 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.55=(36,64,192)f32 #12385=(36,64,192)f32 nn.Dropout pnnx_unique_449 1 1 12385 12386 #12385=(36,64,192)f32 #12386=(36,64,192)f32 pnnx.Expression pnnx_expr_7565 1 1 12250 12387 expr=[-1,8,8,int(size(@0,2))] #12250=(1,2304,192)f32 pnnx.Expression pnnx_expr_7552 1 1 157 12393 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_7545 0 1 12395 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1663 2 1 12386 12387 windows.125 $input=12386 $shape=12387 #12386=(36,64,192)f32 #windows.125=(36,8,8,192)f32 Tensor.view Tensor.view_1664 2 1 windows.125 12393 x9.53 $input=windows.125 $shape=12393 #windows.125=(36,8,8,192)f32 #x9.53=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_7542 1 1 157 12398 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2775 2 1 x9.53 12395 12396 $input=x9.53 $dims=12395 #x9.53=(1,6,6,8,8,192)f32 #12396=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_187 1 1 12396 12397 memory_format=torch.contiguous_format $input=12396 #12396=(1,6,8,6,8,192)f32 #12397=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_7539 0 1 12400 expr=[4,4] pnnx.Expression pnnx_expr_7536 0 1 12401 expr=[1,2] Tensor.view Tensor.view_1665 2 1 12397 12398 shifted_x.63 $input=12397 $shape=12398 #12397=(1,6,8,6,8,192)f32 #shifted_x.63=(1,48,48,192)f32 pnnx.Expression pnnx_expr_7533 2 1 12250 157 12405 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #12250=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.roll torch.roll_2481 3 1 shifted_x.63 12400 12401 x10.27 $input=shifted_x.63 $shifts=12400 $dims=12401 #shifted_x.63=(1,48,48,192)f32 #x10.27=(1,48,48,192)f32 Tensor.view Tensor.view_1666 2 1 x10.27 12405 x11.27 $input=x10.27 $shape=12405 #x10.27=(1,48,48,192)f32 #x11.27=(1,2304,192)f32 pnnx.Expression pnnx_expr_7531 2 1 12250 x11.27 input.281 expr=add(@0,@1) #12250=(1,2304,192)f32 #x11.27=(1,2304,192)f32 #input.281=(1,2304,192)f32 nn.LayerNorm pnnx_unique_450 1 1 input.281 12409 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.281=(1,2304,192)f32 #12409=(1,2304,192)f32 nn.Linear pnnx_unique_451 1 1 12409 12414 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #12409=(1,2304,192)f32 #12414=(1,2304,384)f32 nn.GELU pnnx_unique_452 1 1 12414 12415 #12414=(1,2304,384)f32 #12415=(1,2304,384)f32 nn.Dropout pnnx_unique_453 1 1 12415 12416 #12415=(1,2304,384)f32 #12416=(1,2304,384)f32 nn.Linear pnnx_unique_454 1 1 12416 12417 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #12416=(1,2304,384)f32 #12417=(1,2304,192)f32 nn.Dropout pnnx_unique_455 1 1 12417 12418 #12417=(1,2304,192)f32 #12418=(1,2304,192)f32 pnnx.Expression pnnx_expr_7529 2 1 input.281 12418 12420 expr=add(@0,@1) #input.281=(1,2304,192)f32 #12418=(1,2304,192)f32 #12420=(1,2304,192)f32 nn.LayerNorm pnnx_unique_456 1 1 12420 12447 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #12420=(1,2304,192)f32 #12447=(1,2304,192)f32 pnnx.Expression pnnx_expr_7507 2 1 12420 157 12448 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #12420=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1667 2 1 12447 12448 x.127 $input=12447 $shape=12448 #12447=(1,2304,192)f32 #x.127=(1,48,48,192)f32 pnnx.Expression pnnx_expr_7486 1 1 x.127 12465 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.127=(1,48,48,192)f32 pnnx.Expression pnnx_expr_7481 0 1 12467 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1668 2 1 x.127 12465 x5.65 $input=x.127 $shape=12465 #x.127=(1,48,48,192)f32 #x5.65=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_7478 1 1 x.127 12470 expr=[-1,8,8,int(size(@0,3))] #x.127=(1,48,48,192)f32 torch.permute torch.permute_2776 2 1 x5.65 12467 12468 $input=x5.65 $dims=12467 #x5.65=(1,6,8,6,8,192)f32 #12468=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_188 1 1 12468 12469 memory_format=torch.contiguous_format $input=12468 #12468=(1,6,6,8,8,192)f32 #12469=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_7476 1 1 12420 12472 expr=[-1,64,int(size(@0,2))] #12420=(1,2304,192)f32 pnnx.Expression pnnx_expr_7466 0 1 12483 expr=-2 pnnx.Expression pnnx_expr_7465 0 1 12484 expr=-1 pnnx.Attribute pnnx_unique_457 0 1 relative_position_bias_table.127 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.127=(225,6)f32 pnnx.Attribute pnnx_unique_458 0 1 relative_position_index.127 @relative_position_index=(64,64)i64 #relative_position_index.127=(64,64)i64 Tensor.view Tensor.view_1669 2 1 12469 12470 x_windows.127 $input=12469 $shape=12470 #12469=(1,6,6,8,8,192)f32 #x_windows.127=(36,8,8,192)f32 Tensor.view Tensor.view_1670 2 1 x_windows.127 12472 x6.55 $input=x_windows.127 $shape=12472 #x_windows.127=(36,8,8,192)f32 #x6.55=(36,64,192)f32 nn.Linear pnnx_unique_459 1 1 x6.55 12504 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x6.55=(36,64,192)f32 #12504=(36,64,576)f32 pnnx.Expression pnnx_expr_7450 1 1 x6.55 12507 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x6.55=(36,64,192)f32 pnnx.Expression pnnx_expr_7445 0 1 12509 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_558 2 1 12504 12507 12508 $input=12504 $shape=12507 #12504=(36,64,576)f32 #12508=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_7444 0 1 23997 expr=0 pnnx.Expression pnnx_expr_7443 0 1 23998 expr=0 pnnx.Expression pnnx_expr_7442 0 1 23999 expr=0 pnnx.Expression pnnx_expr_7441 0 1 24000 expr=1 pnnx.Expression pnnx_expr_7440 0 1 24001 expr=0 pnnx.Expression pnnx_expr_7439 0 1 24002 expr=2 torch.permute torch.permute_2777 2 1 12508 12509 qkv1.55 $input=12508 $dims=12509 #12508=(36,64,3,6,32)f32 #qkv1.55=(3,36,6,64,32)f32 Tensor.select Tensor.select_836 3 1 qkv1.55 23997 23998 q.127 $input=qkv1.55 $dim=23997 $index=23998 #qkv1.55=(3,36,6,64,32)f32 #q.127=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7438 1 1 q.127 q1.55 expr=mul(@0,1.767767e-01) #q.127=(36,6,64,32)f32 #q1.55=(36,6,64,32)f32 Tensor.select Tensor.select_837 3 1 qkv1.55 23999 24000 k.127 $input=qkv1.55 $dim=23999 $index=24000 #qkv1.55=(3,36,6,64,32)f32 #k.127=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7436 0 1 12517 expr=[-1] Tensor.view Tensor.view_1671 2 1 relative_position_index.127 12517 12518 $input=relative_position_index.127 $shape=12517 #relative_position_index.127=(64,64)i64 #12518=(4096)i64 pnnx.Expression pnnx_expr_7435 1 1 12518 12519 expr=[@0] #12518=(4096)i64 pnnx.Expression pnnx_expr_7432 0 1 12521 expr=[64,64,-1] Tensor.index Tensor.index_388 2 1 relative_position_bias_table.127 12519 12520 $input=relative_position_bias_table.127 $expr=12519 #relative_position_bias_table.127=(225,6)f32 #12520=(4096,6)f32 pnnx.Expression pnnx_expr_7428 0 1 12523 expr=[2,0,1] Tensor.view Tensor.view_1672 2 1 12520 12521 relative_position_bias.127 $input=12520 $shape=12521 #12520=(4096,6)f32 #relative_position_bias.127=(64,64,6)f32 pnnx.Expression pnnx_expr_7427 0 1 24010 expr=0 torch.permute torch.permute_2778 2 1 relative_position_bias.127 12523 12524 $input=relative_position_bias.127 $dims=12523 #relative_position_bias.127=(64,64,6)f32 #12524=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_189 1 1 12524 relative_position_bias1.55 memory_format=torch.contiguous_format $input=12524 #12524=(6,64,64)f32 #relative_position_bias1.55=(6,64,64)f32 torch.transpose torch.transpose_3107 3 1 k.127 12483 12484 12515 $input=k.127 $dim0=12483 $dim1=12484 #k.127=(36,6,64,32)f32 #12515=(36,6,32,64)f32 torch.matmul torch.matmul_2328 2 1 q1.55 12515 attn.255 $input=q1.55 $other=12515 #q1.55=(36,6,64,32)f32 #12515=(36,6,32,64)f32 #attn.255=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3342 2 1 relative_position_bias1.55 24010 12526 $input=relative_position_bias1.55 $dim=24010 #relative_position_bias1.55=(6,64,64)f32 #12526=(1,6,64,64)f32 pnnx.Expression pnnx_expr_7425 2 1 attn.255 12526 input.283 expr=add(@0,@1) #attn.255=(36,6,64,64)f32 #12526=(1,6,64,64)f32 #input.283=(36,6,64,64)f32 nn.Softmax pnnx_unique_460 1 1 input.283 12528 dim=-1 #input.283=(36,6,64,64)f32 #12528=(36,6,64,64)f32 nn.Dropout pnnx_unique_461 1 1 12528 12529 #12528=(36,6,64,64)f32 #12529=(36,6,64,64)f32 Tensor.select Tensor.select_838 3 1 qkv1.55 24001 24002 v.127 $input=qkv1.55 $dim=24001 $index=24002 #qkv1.55=(3,36,6,64,32)f32 #v.127=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7424 0 1 24012 expr=1 pnnx.Expression pnnx_expr_7423 0 1 24013 expr=2 torch.matmul torch.matmul_2329 2 1 12529 v.127 12530 $input=12529 $other=v.127 #12529=(36,6,64,64)f32 #v.127=(36,6,64,32)f32 #12530=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7422 1 1 x6.55 12532 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x6.55=(36,64,192)f32 torch.transpose torch.transpose_3108 3 1 12530 24012 24013 12531 $input=12530 $dim0=24012 $dim1=24013 #12530=(36,6,64,32)f32 #12531=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_559 2 1 12531 12532 input1.57 $input=12531 $shape=12532 #12531=(36,64,6,32)f32 #input1.57=(36,64,192)f32 nn.Linear pnnx_unique_462 1 1 input1.57 12534 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.57=(36,64,192)f32 #12534=(36,64,192)f32 nn.Dropout pnnx_unique_463 1 1 12534 12535 #12534=(36,64,192)f32 #12535=(36,64,192)f32 pnnx.Expression pnnx_expr_7418 1 1 12420 12536 expr=[-1,8,8,int(size(@0,2))] #12420=(1,2304,192)f32 pnnx.Expression pnnx_expr_7405 1 1 157 12542 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_7398 0 1 12544 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1673 2 1 12535 12536 windows.127 $input=12535 $shape=12536 #12535=(36,64,192)f32 #windows.127=(36,8,8,192)f32 Tensor.view Tensor.view_1674 2 1 windows.127 12542 x7.55 $input=windows.127 $shape=12542 #windows.127=(36,8,8,192)f32 #x7.55=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_7395 1 1 157 12547 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2779 2 1 x7.55 12544 12545 $input=x7.55 $dims=12544 #x7.55=(1,6,6,8,8,192)f32 #12545=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_190 1 1 12545 12546 memory_format=torch.contiguous_format $input=12545 #12545=(1,6,8,6,8,192)f32 #12546=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_7392 2 1 12420 157 12551 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #12420=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1675 2 1 12546 12547 x8.55 $input=12546 $shape=12547 #12546=(1,6,8,6,8,192)f32 #x8.55=(1,48,48,192)f32 Tensor.view Tensor.view_1676 2 1 x8.55 12551 x9.55 $input=x8.55 $shape=12551 #x8.55=(1,48,48,192)f32 #x9.55=(1,2304,192)f32 pnnx.Expression pnnx_expr_7390 2 1 12420 x9.55 input.285 expr=add(@0,@1) #12420=(1,2304,192)f32 #x9.55=(1,2304,192)f32 #input.285=(1,2304,192)f32 nn.LayerNorm pnnx_unique_464 1 1 input.285 12555 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.285=(1,2304,192)f32 #12555=(1,2304,192)f32 nn.Linear pnnx_unique_465 1 1 12555 12560 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #12555=(1,2304,192)f32 #12560=(1,2304,384)f32 nn.GELU pnnx_unique_466 1 1 12560 12561 #12560=(1,2304,384)f32 #12561=(1,2304,384)f32 nn.Dropout pnnx_unique_467 1 1 12561 12562 #12561=(1,2304,384)f32 #12562=(1,2304,384)f32 nn.Linear pnnx_unique_468 1 1 12562 12563 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #12562=(1,2304,384)f32 #12563=(1,2304,192)f32 nn.Dropout pnnx_unique_469 1 1 12563 12564 #12563=(1,2304,192)f32 #12564=(1,2304,192)f32 pnnx.Expression pnnx_expr_7388 2 1 input.285 12564 12566 expr=add(@0,@1) #input.285=(1,2304,192)f32 #12564=(1,2304,192)f32 #12566=(1,2304,192)f32 pnnx.Attribute pnnx_unique_470 0 1 attn_mask.65 @attn_mask=(36,64,64)f32 #attn_mask.65=(36,64,64)f32 nn.LayerNorm pnnx_unique_471 1 1 12566 12595 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #12566=(1,2304,192)f32 #12595=(1,2304,192)f32 pnnx.Expression pnnx_expr_7365 2 1 12566 157 12596 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #12566=(1,2304,192)f32 #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_7363 0 1 12598 expr=[-4,-4] pnnx.Expression pnnx_expr_7361 0 1 12599 expr=[1,2] Tensor.view Tensor.view_1677 2 1 12595 12596 x.129 $input=12595 $shape=12596 #12595=(1,2304,192)f32 #x.129=(1,48,48,192)f32 torch.roll torch.roll_2482 3 1 x.129 12598 12599 x6.57 $input=x.129 $shifts=12598 $dims=12599 #x.129=(1,48,48,192)f32 #x6.57=(1,48,48,192)f32 pnnx.Expression pnnx_expr_7339 1 1 x6.57 12616 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x6.57=(1,48,48,192)f32 pnnx.Expression pnnx_expr_7334 0 1 12618 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1678 2 1 x6.57 12616 x7.57 $input=x6.57 $shape=12616 #x6.57=(1,48,48,192)f32 #x7.57=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_7331 1 1 x6.57 12621 expr=[-1,8,8,int(size(@0,3))] #x6.57=(1,48,48,192)f32 torch.permute torch.permute_2780 2 1 x7.57 12618 12619 $input=x7.57 $dims=12618 #x7.57=(1,6,8,6,8,192)f32 #12619=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_191 1 1 12619 12620 memory_format=torch.contiguous_format $input=12619 #12619=(1,6,6,8,8,192)f32 #12620=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_7329 1 1 12566 12623 expr=[-1,64,int(size(@0,2))] #12566=(1,2304,192)f32 pnnx.Expression pnnx_expr_7319 0 1 12634 expr=-2 pnnx.Expression pnnx_expr_7318 0 1 12635 expr=-1 pnnx.Attribute pnnx_unique_472 0 1 relative_position_bias_table.129 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.129=(225,6)f32 pnnx.Attribute pnnx_unique_473 0 1 relative_position_index.129 @relative_position_index=(64,64)i64 #relative_position_index.129=(64,64)i64 Tensor.view Tensor.view_1679 2 1 12620 12621 x_windows.129 $input=12620 $shape=12621 #12620=(1,6,6,8,8,192)f32 #x_windows.129=(36,8,8,192)f32 Tensor.view Tensor.view_1680 2 1 x_windows.129 12623 x8.57 $input=x_windows.129 $shape=12623 #x_windows.129=(36,8,8,192)f32 #x8.57=(36,64,192)f32 nn.Linear pnnx_unique_474 1 1 x8.57 12659 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x8.57=(36,64,192)f32 #12659=(36,64,576)f32 pnnx.Expression pnnx_expr_7299 1 1 x8.57 12662 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x8.57=(36,64,192)f32 pnnx.Expression pnnx_expr_7294 0 1 12664 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_560 2 1 12659 12662 12663 $input=12659 $shape=12662 #12659=(36,64,576)f32 #12663=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_7293 0 1 24056 expr=0 pnnx.Expression pnnx_expr_7292 0 1 24057 expr=0 pnnx.Expression pnnx_expr_7291 0 1 24058 expr=0 pnnx.Expression pnnx_expr_7290 0 1 24059 expr=1 pnnx.Expression pnnx_expr_7289 0 1 24060 expr=0 pnnx.Expression pnnx_expr_7288 0 1 24061 expr=2 torch.permute torch.permute_2781 2 1 12663 12664 qkv1.57 $input=12663 $dims=12664 #12663=(36,64,3,6,32)f32 #qkv1.57=(3,36,6,64,32)f32 Tensor.select Tensor.select_839 3 1 qkv1.57 24056 24057 q.129 $input=qkv1.57 $dim=24056 $index=24057 #qkv1.57=(3,36,6,64,32)f32 #q.129=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7287 1 1 q.129 q1.57 expr=mul(@0,1.767767e-01) #q.129=(36,6,64,32)f32 #q1.57=(36,6,64,32)f32 Tensor.select Tensor.select_840 3 1 qkv1.57 24058 24059 k.129 $input=qkv1.57 $dim=24058 $index=24059 #qkv1.57=(3,36,6,64,32)f32 #k.129=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7285 0 1 12672 expr=[-1] Tensor.view Tensor.view_1681 2 1 relative_position_index.129 12672 12673 $input=relative_position_index.129 $shape=12672 #relative_position_index.129=(64,64)i64 #12673=(4096)i64 pnnx.Expression pnnx_expr_7284 1 1 12673 12674 expr=[@0] #12673=(4096)i64 pnnx.Expression pnnx_expr_7281 0 1 12676 expr=[64,64,-1] Tensor.index Tensor.index_389 2 1 relative_position_bias_table.129 12674 12675 $input=relative_position_bias_table.129 $expr=12674 #relative_position_bias_table.129=(225,6)f32 #12675=(4096,6)f32 pnnx.Expression pnnx_expr_7277 0 1 12678 expr=[2,0,1] Tensor.view Tensor.view_1682 2 1 12675 12676 relative_position_bias.129 $input=12675 $shape=12676 #12675=(4096,6)f32 #relative_position_bias.129=(64,64,6)f32 pnnx.Expression pnnx_expr_7276 0 1 24069 expr=0 torch.permute torch.permute_2782 2 1 relative_position_bias.129 12678 12679 $input=relative_position_bias.129 $dims=12678 #relative_position_bias.129=(64,64,6)f32 #12679=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_192 1 1 12679 relative_position_bias1.57 memory_format=torch.contiguous_format $input=12679 #12679=(6,64,64)f32 #relative_position_bias1.57=(6,64,64)f32 torch.transpose torch.transpose_3109 3 1 k.129 12634 12635 12670 $input=k.129 $dim0=12634 $dim1=12635 #k.129=(36,6,64,32)f32 #12670=(36,6,32,64)f32 torch.matmul torch.matmul_2330 2 1 q1.57 12670 attn.259 $input=q1.57 $other=12670 #q1.57=(36,6,64,32)f32 #12670=(36,6,32,64)f32 #attn.259=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3343 2 1 relative_position_bias1.57 24069 12681 $input=relative_position_bias1.57 $dim=24069 #relative_position_bias1.57=(6,64,64)f32 #12681=(1,6,64,64)f32 pnnx.Expression pnnx_expr_7274 2 1 attn.259 12681 attn2.29 expr=add(@0,@1) #attn.259=(36,6,64,64)f32 #12681=(1,6,64,64)f32 #attn2.29=(36,6,64,64)f32 pnnx.Expression pnnx_expr_7265 2 1 x8.57 attn_mask.65 12688 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x8.57=(36,64,192)f32 #attn_mask.65=(36,64,64)f32 pnnx.Expression pnnx_expr_7264 0 1 24074 expr=1 pnnx.Expression pnnx_expr_7263 0 1 24075 expr=0 Tensor.view Tensor.view_1683 2 1 attn2.29 12688 12689 $input=attn2.29 $shape=12688 #attn2.29=(36,6,64,64)f32 #12689=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3344 2 1 attn_mask.65 24074 12690 $input=attn_mask.65 $dim=24074 #attn_mask.65=(36,64,64)f32 #12690=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3345 2 1 12690 24075 12691 $input=12690 $dim=24075 #12690=(36,1,64,64)f32 #12691=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_7261 2 1 12689 12691 attn3.29 expr=add(@0,@1) #12689=(1,36,6,64,64)f32 #12691=(1,36,1,64,64)f32 #attn3.29=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_7258 1 1 x8.57 12693 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x8.57=(36,64,192)f32 Tensor.view Tensor.view_1684 2 1 attn3.29 12693 input.287 $input=attn3.29 $shape=12693 #attn3.29=(1,36,6,64,64)f32 #input.287=(36,6,64,64)f32 nn.Softmax pnnx_unique_475 1 1 input.287 12695 dim=-1 #input.287=(36,6,64,64)f32 #12695=(36,6,64,64)f32 nn.Dropout pnnx_unique_476 1 1 12695 12696 #12695=(36,6,64,64)f32 #12696=(36,6,64,64)f32 Tensor.select Tensor.select_841 3 1 qkv1.57 24060 24061 v.129 $input=qkv1.57 $dim=24060 $index=24061 #qkv1.57=(3,36,6,64,32)f32 #v.129=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7257 0 1 24079 expr=1 pnnx.Expression pnnx_expr_7256 0 1 24080 expr=2 torch.matmul torch.matmul_2331 2 1 12696 v.129 12697 $input=12696 $other=v.129 #12696=(36,6,64,64)f32 #v.129=(36,6,64,32)f32 #12697=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7255 1 1 x8.57 12699 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x8.57=(36,64,192)f32 torch.transpose torch.transpose_3110 3 1 12697 24079 24080 12698 $input=12697 $dim0=24079 $dim1=24080 #12697=(36,6,64,32)f32 #12698=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_561 2 1 12698 12699 input1.59 $input=12698 $shape=12699 #12698=(36,64,6,32)f32 #input1.59=(36,64,192)f32 nn.Linear pnnx_unique_477 1 1 input1.59 12701 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.59=(36,64,192)f32 #12701=(36,64,192)f32 nn.Dropout pnnx_unique_478 1 1 12701 12702 #12701=(36,64,192)f32 #12702=(36,64,192)f32 pnnx.Expression pnnx_expr_7251 1 1 12566 12703 expr=[-1,8,8,int(size(@0,2))] #12566=(1,2304,192)f32 pnnx.Expression pnnx_expr_7238 1 1 157 12709 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_7231 0 1 12711 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1685 2 1 12702 12703 windows.129 $input=12702 $shape=12703 #12702=(36,64,192)f32 #windows.129=(36,8,8,192)f32 Tensor.view Tensor.view_1686 2 1 windows.129 12709 x9.57 $input=windows.129 $shape=12709 #windows.129=(36,8,8,192)f32 #x9.57=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_7228 1 1 157 12714 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2783 2 1 x9.57 12711 12712 $input=x9.57 $dims=12711 #x9.57=(1,6,6,8,8,192)f32 #12712=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_193 1 1 12712 12713 memory_format=torch.contiguous_format $input=12712 #12712=(1,6,8,6,8,192)f32 #12713=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_7225 0 1 12716 expr=[4,4] pnnx.Expression pnnx_expr_7222 0 1 12717 expr=[1,2] Tensor.view Tensor.view_1687 2 1 12713 12714 shifted_x.65 $input=12713 $shape=12714 #12713=(1,6,8,6,8,192)f32 #shifted_x.65=(1,48,48,192)f32 pnnx.Expression pnnx_expr_7219 2 1 12566 157 12721 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #12566=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.roll torch.roll_2483 3 1 shifted_x.65 12716 12717 x10.29 $input=shifted_x.65 $shifts=12716 $dims=12717 #shifted_x.65=(1,48,48,192)f32 #x10.29=(1,48,48,192)f32 Tensor.view Tensor.view_1688 2 1 x10.29 12721 x11.29 $input=x10.29 $shape=12721 #x10.29=(1,48,48,192)f32 #x11.29=(1,2304,192)f32 pnnx.Expression pnnx_expr_7217 2 1 12566 x11.29 input.289 expr=add(@0,@1) #12566=(1,2304,192)f32 #x11.29=(1,2304,192)f32 #input.289=(1,2304,192)f32 nn.LayerNorm pnnx_unique_479 1 1 input.289 12725 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.289=(1,2304,192)f32 #12725=(1,2304,192)f32 nn.Linear pnnx_unique_480 1 1 12725 12730 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #12725=(1,2304,192)f32 #12730=(1,2304,384)f32 nn.GELU pnnx_unique_481 1 1 12730 12731 #12730=(1,2304,384)f32 #12731=(1,2304,384)f32 nn.Dropout pnnx_unique_482 1 1 12731 12732 #12731=(1,2304,384)f32 #12732=(1,2304,384)f32 nn.Linear pnnx_unique_483 1 1 12732 12733 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #12732=(1,2304,384)f32 #12733=(1,2304,192)f32 nn.Dropout pnnx_unique_484 1 1 12733 12734 #12733=(1,2304,192)f32 #12734=(1,2304,192)f32 pnnx.Expression pnnx_expr_7215 2 1 input.289 12734 12736 expr=add(@0,@1) #input.289=(1,2304,192)f32 #12734=(1,2304,192)f32 #12736=(1,2304,192)f32 nn.LayerNorm pnnx_unique_485 1 1 12736 12763 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #12736=(1,2304,192)f32 #12763=(1,2304,192)f32 pnnx.Expression pnnx_expr_7193 2 1 12736 157 12764 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #12736=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1689 2 1 12763 12764 x.131 $input=12763 $shape=12764 #12763=(1,2304,192)f32 #x.131=(1,48,48,192)f32 pnnx.Expression pnnx_expr_7172 1 1 x.131 12781 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.131=(1,48,48,192)f32 pnnx.Expression pnnx_expr_7167 0 1 12783 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1690 2 1 x.131 12781 x5.67 $input=x.131 $shape=12781 #x.131=(1,48,48,192)f32 #x5.67=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_7164 1 1 x.131 12786 expr=[-1,8,8,int(size(@0,3))] #x.131=(1,48,48,192)f32 torch.permute torch.permute_2784 2 1 x5.67 12783 12784 $input=x5.67 $dims=12783 #x5.67=(1,6,8,6,8,192)f32 #12784=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_194 1 1 12784 12785 memory_format=torch.contiguous_format $input=12784 #12784=(1,6,6,8,8,192)f32 #12785=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_7162 1 1 12736 12788 expr=[-1,64,int(size(@0,2))] #12736=(1,2304,192)f32 pnnx.Expression pnnx_expr_7152 0 1 12799 expr=-2 pnnx.Expression pnnx_expr_7151 0 1 12800 expr=-1 pnnx.Attribute pnnx_unique_486 0 1 relative_position_bias_table.131 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.131=(225,6)f32 pnnx.Attribute pnnx_unique_487 0 1 relative_position_index.131 @relative_position_index=(64,64)i64 #relative_position_index.131=(64,64)i64 Tensor.view Tensor.view_1691 2 1 12785 12786 x_windows.131 $input=12785 $shape=12786 #12785=(1,6,6,8,8,192)f32 #x_windows.131=(36,8,8,192)f32 Tensor.view Tensor.view_1692 2 1 x_windows.131 12788 x6.59 $input=x_windows.131 $shape=12788 #x_windows.131=(36,8,8,192)f32 #x6.59=(36,64,192)f32 nn.Linear pnnx_unique_488 1 1 x6.59 12820 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x6.59=(36,64,192)f32 #12820=(36,64,576)f32 pnnx.Expression pnnx_expr_7136 1 1 x6.59 12823 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x6.59=(36,64,192)f32 pnnx.Expression pnnx_expr_7131 0 1 12825 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_562 2 1 12820 12823 12824 $input=12820 $shape=12823 #12820=(36,64,576)f32 #12824=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_7130 0 1 24124 expr=0 pnnx.Expression pnnx_expr_7129 0 1 24125 expr=0 pnnx.Expression pnnx_expr_7128 0 1 24126 expr=0 pnnx.Expression pnnx_expr_7127 0 1 24127 expr=1 pnnx.Expression pnnx_expr_7126 0 1 24128 expr=0 pnnx.Expression pnnx_expr_7125 0 1 24129 expr=2 torch.permute torch.permute_2785 2 1 12824 12825 qkv1.59 $input=12824 $dims=12825 #12824=(36,64,3,6,32)f32 #qkv1.59=(3,36,6,64,32)f32 Tensor.select Tensor.select_842 3 1 qkv1.59 24124 24125 q.131 $input=qkv1.59 $dim=24124 $index=24125 #qkv1.59=(3,36,6,64,32)f32 #q.131=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7124 1 1 q.131 q1.59 expr=mul(@0,1.767767e-01) #q.131=(36,6,64,32)f32 #q1.59=(36,6,64,32)f32 Tensor.select Tensor.select_843 3 1 qkv1.59 24126 24127 k.131 $input=qkv1.59 $dim=24126 $index=24127 #qkv1.59=(3,36,6,64,32)f32 #k.131=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7122 0 1 12833 expr=[-1] Tensor.view Tensor.view_1693 2 1 relative_position_index.131 12833 12834 $input=relative_position_index.131 $shape=12833 #relative_position_index.131=(64,64)i64 #12834=(4096)i64 pnnx.Expression pnnx_expr_7121 1 1 12834 12835 expr=[@0] #12834=(4096)i64 pnnx.Expression pnnx_expr_7118 0 1 12837 expr=[64,64,-1] Tensor.index Tensor.index_390 2 1 relative_position_bias_table.131 12835 12836 $input=relative_position_bias_table.131 $expr=12835 #relative_position_bias_table.131=(225,6)f32 #12836=(4096,6)f32 pnnx.Expression pnnx_expr_7114 0 1 12839 expr=[2,0,1] Tensor.view Tensor.view_1694 2 1 12836 12837 relative_position_bias.131 $input=12836 $shape=12837 #12836=(4096,6)f32 #relative_position_bias.131=(64,64,6)f32 pnnx.Expression pnnx_expr_7113 0 1 24137 expr=0 torch.permute torch.permute_2786 2 1 relative_position_bias.131 12839 12840 $input=relative_position_bias.131 $dims=12839 #relative_position_bias.131=(64,64,6)f32 #12840=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_195 1 1 12840 relative_position_bias1.59 memory_format=torch.contiguous_format $input=12840 #12840=(6,64,64)f32 #relative_position_bias1.59=(6,64,64)f32 torch.transpose torch.transpose_3111 3 1 k.131 12799 12800 12831 $input=k.131 $dim0=12799 $dim1=12800 #k.131=(36,6,64,32)f32 #12831=(36,6,32,64)f32 torch.matmul torch.matmul_2332 2 1 q1.59 12831 attn.263 $input=q1.59 $other=12831 #q1.59=(36,6,64,32)f32 #12831=(36,6,32,64)f32 #attn.263=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3346 2 1 relative_position_bias1.59 24137 12842 $input=relative_position_bias1.59 $dim=24137 #relative_position_bias1.59=(6,64,64)f32 #12842=(1,6,64,64)f32 pnnx.Expression pnnx_expr_7111 2 1 attn.263 12842 input.291 expr=add(@0,@1) #attn.263=(36,6,64,64)f32 #12842=(1,6,64,64)f32 #input.291=(36,6,64,64)f32 nn.Softmax pnnx_unique_489 1 1 input.291 12844 dim=-1 #input.291=(36,6,64,64)f32 #12844=(36,6,64,64)f32 nn.Dropout pnnx_unique_490 1 1 12844 12845 #12844=(36,6,64,64)f32 #12845=(36,6,64,64)f32 Tensor.select Tensor.select_844 3 1 qkv1.59 24128 24129 v.131 $input=qkv1.59 $dim=24128 $index=24129 #qkv1.59=(3,36,6,64,32)f32 #v.131=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7110 0 1 24139 expr=1 pnnx.Expression pnnx_expr_7109 0 1 24140 expr=2 torch.matmul torch.matmul_2333 2 1 12845 v.131 12846 $input=12845 $other=v.131 #12845=(36,6,64,64)f32 #v.131=(36,6,64,32)f32 #12846=(36,6,64,32)f32 pnnx.Expression pnnx_expr_7108 1 1 x6.59 12848 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x6.59=(36,64,192)f32 torch.transpose torch.transpose_3112 3 1 12846 24139 24140 12847 $input=12846 $dim0=24139 $dim1=24140 #12846=(36,6,64,32)f32 #12847=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_563 2 1 12847 12848 input1.61 $input=12847 $shape=12848 #12847=(36,64,6,32)f32 #input1.61=(36,64,192)f32 nn.Linear pnnx_unique_491 1 1 input1.61 12850 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.61=(36,64,192)f32 #12850=(36,64,192)f32 nn.Dropout pnnx_unique_492 1 1 12850 12851 #12850=(36,64,192)f32 #12851=(36,64,192)f32 pnnx.Expression pnnx_expr_7104 1 1 12736 12852 expr=[-1,8,8,int(size(@0,2))] #12736=(1,2304,192)f32 pnnx.Expression pnnx_expr_7091 1 1 157 12858 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_7084 0 1 12860 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1695 2 1 12851 12852 windows.131 $input=12851 $shape=12852 #12851=(36,64,192)f32 #windows.131=(36,8,8,192)f32 Tensor.view Tensor.view_1696 2 1 windows.131 12858 x7.59 $input=windows.131 $shape=12858 #windows.131=(36,8,8,192)f32 #x7.59=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_7081 1 1 157 12863 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2787 2 1 x7.59 12860 12861 $input=x7.59 $dims=12860 #x7.59=(1,6,6,8,8,192)f32 #12861=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_196 1 1 12861 12862 memory_format=torch.contiguous_format $input=12861 #12861=(1,6,8,6,8,192)f32 #12862=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_7078 2 1 12736 157 12867 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #12736=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1697 2 1 12862 12863 x8.59 $input=12862 $shape=12863 #12862=(1,6,8,6,8,192)f32 #x8.59=(1,48,48,192)f32 Tensor.view Tensor.view_1698 2 1 x8.59 12867 x9.59 $input=x8.59 $shape=12867 #x8.59=(1,48,48,192)f32 #x9.59=(1,2304,192)f32 pnnx.Expression pnnx_expr_7076 2 1 12736 x9.59 input.293 expr=add(@0,@1) #12736=(1,2304,192)f32 #x9.59=(1,2304,192)f32 #input.293=(1,2304,192)f32 nn.LayerNorm pnnx_unique_493 1 1 input.293 12871 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.293=(1,2304,192)f32 #12871=(1,2304,192)f32 nn.Linear pnnx_unique_494 1 1 12871 12876 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #12871=(1,2304,192)f32 #12876=(1,2304,384)f32 nn.GELU pnnx_unique_495 1 1 12876 12877 #12876=(1,2304,384)f32 #12877=(1,2304,384)f32 nn.Dropout pnnx_unique_496 1 1 12877 12878 #12877=(1,2304,384)f32 #12878=(1,2304,384)f32 nn.Linear pnnx_unique_497 1 1 12878 12879 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #12878=(1,2304,384)f32 #12879=(1,2304,192)f32 nn.Dropout pnnx_unique_498 1 1 12879 12880 #12879=(1,2304,192)f32 #12880=(1,2304,192)f32 pnnx.Expression pnnx_expr_7074 2 1 input.293 12880 12882 expr=add(@0,@1) #input.293=(1,2304,192)f32 #12880=(1,2304,192)f32 #12882=(1,2304,192)f32 pnnx.Attribute pnnx_unique_499 0 1 attn_mask.67 @attn_mask=(36,64,64)f32 #attn_mask.67=(36,64,64)f32 nn.LayerNorm pnnx_unique_500 1 1 12882 12911 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #12882=(1,2304,192)f32 #12911=(1,2304,192)f32 pnnx.Expression pnnx_expr_7051 2 1 12882 157 12912 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #12882=(1,2304,192)f32 #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_7049 0 1 12914 expr=[-4,-4] pnnx.Expression pnnx_expr_7047 0 1 12915 expr=[1,2] Tensor.view Tensor.view_1699 2 1 12911 12912 x.133 $input=12911 $shape=12912 #12911=(1,2304,192)f32 #x.133=(1,48,48,192)f32 torch.roll torch.roll_2484 3 1 x.133 12914 12915 x6.61 $input=x.133 $shifts=12914 $dims=12915 #x.133=(1,48,48,192)f32 #x6.61=(1,48,48,192)f32 pnnx.Expression pnnx_expr_7025 1 1 x6.61 12932 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x6.61=(1,48,48,192)f32 pnnx.Expression pnnx_expr_7020 0 1 12934 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1700 2 1 x6.61 12932 x7.61 $input=x6.61 $shape=12932 #x6.61=(1,48,48,192)f32 #x7.61=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_7017 1 1 x6.61 12937 expr=[-1,8,8,int(size(@0,3))] #x6.61=(1,48,48,192)f32 torch.permute torch.permute_2788 2 1 x7.61 12934 12935 $input=x7.61 $dims=12934 #x7.61=(1,6,8,6,8,192)f32 #12935=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_197 1 1 12935 12936 memory_format=torch.contiguous_format $input=12935 #12935=(1,6,6,8,8,192)f32 #12936=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_7015 1 1 12882 12939 expr=[-1,64,int(size(@0,2))] #12882=(1,2304,192)f32 pnnx.Expression pnnx_expr_7005 0 1 12950 expr=-2 pnnx.Expression pnnx_expr_7004 0 1 12951 expr=-1 pnnx.Attribute pnnx_unique_501 0 1 relative_position_bias_table.133 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.133=(225,6)f32 pnnx.Attribute pnnx_unique_502 0 1 relative_position_index.133 @relative_position_index=(64,64)i64 #relative_position_index.133=(64,64)i64 Tensor.view Tensor.view_1701 2 1 12936 12937 x_windows.133 $input=12936 $shape=12937 #12936=(1,6,6,8,8,192)f32 #x_windows.133=(36,8,8,192)f32 Tensor.view Tensor.view_1702 2 1 x_windows.133 12939 x8.61 $input=x_windows.133 $shape=12939 #x_windows.133=(36,8,8,192)f32 #x8.61=(36,64,192)f32 nn.Linear pnnx_unique_503 1 1 x8.61 12975 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x8.61=(36,64,192)f32 #12975=(36,64,576)f32 pnnx.Expression pnnx_expr_6985 1 1 x8.61 12978 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x8.61=(36,64,192)f32 pnnx.Expression pnnx_expr_6980 0 1 12980 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_564 2 1 12975 12978 12979 $input=12975 $shape=12978 #12975=(36,64,576)f32 #12979=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_6979 0 1 24183 expr=0 pnnx.Expression pnnx_expr_6978 0 1 24184 expr=0 pnnx.Expression pnnx_expr_6977 0 1 24185 expr=0 pnnx.Expression pnnx_expr_6976 0 1 24186 expr=1 pnnx.Expression pnnx_expr_6975 0 1 24187 expr=0 pnnx.Expression pnnx_expr_6974 0 1 24188 expr=2 torch.permute torch.permute_2789 2 1 12979 12980 qkv1.61 $input=12979 $dims=12980 #12979=(36,64,3,6,32)f32 #qkv1.61=(3,36,6,64,32)f32 Tensor.select Tensor.select_845 3 1 qkv1.61 24183 24184 q.133 $input=qkv1.61 $dim=24183 $index=24184 #qkv1.61=(3,36,6,64,32)f32 #q.133=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6973 1 1 q.133 q1.61 expr=mul(@0,1.767767e-01) #q.133=(36,6,64,32)f32 #q1.61=(36,6,64,32)f32 Tensor.select Tensor.select_846 3 1 qkv1.61 24185 24186 k.133 $input=qkv1.61 $dim=24185 $index=24186 #qkv1.61=(3,36,6,64,32)f32 #k.133=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6971 0 1 12988 expr=[-1] Tensor.view Tensor.view_1703 2 1 relative_position_index.133 12988 12989 $input=relative_position_index.133 $shape=12988 #relative_position_index.133=(64,64)i64 #12989=(4096)i64 pnnx.Expression pnnx_expr_6970 1 1 12989 12990 expr=[@0] #12989=(4096)i64 pnnx.Expression pnnx_expr_6967 0 1 12992 expr=[64,64,-1] Tensor.index Tensor.index_391 2 1 relative_position_bias_table.133 12990 12991 $input=relative_position_bias_table.133 $expr=12990 #relative_position_bias_table.133=(225,6)f32 #12991=(4096,6)f32 pnnx.Expression pnnx_expr_6963 0 1 12994 expr=[2,0,1] Tensor.view Tensor.view_1704 2 1 12991 12992 relative_position_bias.133 $input=12991 $shape=12992 #12991=(4096,6)f32 #relative_position_bias.133=(64,64,6)f32 pnnx.Expression pnnx_expr_6962 0 1 24196 expr=0 torch.permute torch.permute_2790 2 1 relative_position_bias.133 12994 12995 $input=relative_position_bias.133 $dims=12994 #relative_position_bias.133=(64,64,6)f32 #12995=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_198 1 1 12995 relative_position_bias1.61 memory_format=torch.contiguous_format $input=12995 #12995=(6,64,64)f32 #relative_position_bias1.61=(6,64,64)f32 torch.transpose torch.transpose_3113 3 1 k.133 12950 12951 12986 $input=k.133 $dim0=12950 $dim1=12951 #k.133=(36,6,64,32)f32 #12986=(36,6,32,64)f32 torch.matmul torch.matmul_2334 2 1 q1.61 12986 attn.267 $input=q1.61 $other=12986 #q1.61=(36,6,64,32)f32 #12986=(36,6,32,64)f32 #attn.267=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3347 2 1 relative_position_bias1.61 24196 12997 $input=relative_position_bias1.61 $dim=24196 #relative_position_bias1.61=(6,64,64)f32 #12997=(1,6,64,64)f32 pnnx.Expression pnnx_expr_6960 2 1 attn.267 12997 attn2.31 expr=add(@0,@1) #attn.267=(36,6,64,64)f32 #12997=(1,6,64,64)f32 #attn2.31=(36,6,64,64)f32 pnnx.Expression pnnx_expr_6951 2 1 x8.61 attn_mask.67 13004 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x8.61=(36,64,192)f32 #attn_mask.67=(36,64,64)f32 pnnx.Expression pnnx_expr_6950 0 1 24201 expr=1 pnnx.Expression pnnx_expr_6949 0 1 24202 expr=0 Tensor.view Tensor.view_1705 2 1 attn2.31 13004 13005 $input=attn2.31 $shape=13004 #attn2.31=(36,6,64,64)f32 #13005=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3348 2 1 attn_mask.67 24201 13006 $input=attn_mask.67 $dim=24201 #attn_mask.67=(36,64,64)f32 #13006=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3349 2 1 13006 24202 13007 $input=13006 $dim=24202 #13006=(36,1,64,64)f32 #13007=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_6947 2 1 13005 13007 attn3.31 expr=add(@0,@1) #13005=(1,36,6,64,64)f32 #13007=(1,36,1,64,64)f32 #attn3.31=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_6944 1 1 x8.61 13009 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x8.61=(36,64,192)f32 Tensor.view Tensor.view_1706 2 1 attn3.31 13009 input.295 $input=attn3.31 $shape=13009 #attn3.31=(1,36,6,64,64)f32 #input.295=(36,6,64,64)f32 nn.Softmax pnnx_unique_504 1 1 input.295 13011 dim=-1 #input.295=(36,6,64,64)f32 #13011=(36,6,64,64)f32 nn.Dropout pnnx_unique_505 1 1 13011 13012 #13011=(36,6,64,64)f32 #13012=(36,6,64,64)f32 Tensor.select Tensor.select_847 3 1 qkv1.61 24187 24188 v.133 $input=qkv1.61 $dim=24187 $index=24188 #qkv1.61=(3,36,6,64,32)f32 #v.133=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6943 0 1 24206 expr=1 pnnx.Expression pnnx_expr_6942 0 1 24207 expr=2 torch.matmul torch.matmul_2335 2 1 13012 v.133 13013 $input=13012 $other=v.133 #13012=(36,6,64,64)f32 #v.133=(36,6,64,32)f32 #13013=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6941 1 1 x8.61 13015 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x8.61=(36,64,192)f32 torch.transpose torch.transpose_3114 3 1 13013 24206 24207 13014 $input=13013 $dim0=24206 $dim1=24207 #13013=(36,6,64,32)f32 #13014=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_565 2 1 13014 13015 input1.63 $input=13014 $shape=13015 #13014=(36,64,6,32)f32 #input1.63=(36,64,192)f32 nn.Linear pnnx_unique_506 1 1 input1.63 13017 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.63=(36,64,192)f32 #13017=(36,64,192)f32 nn.Dropout pnnx_unique_507 1 1 13017 13018 #13017=(36,64,192)f32 #13018=(36,64,192)f32 pnnx.Expression pnnx_expr_6937 1 1 12882 13019 expr=[-1,8,8,int(size(@0,2))] #12882=(1,2304,192)f32 pnnx.Expression pnnx_expr_6924 1 1 157 13025 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_6917 0 1 13027 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1707 2 1 13018 13019 windows.133 $input=13018 $shape=13019 #13018=(36,64,192)f32 #windows.133=(36,8,8,192)f32 Tensor.view Tensor.view_1708 2 1 windows.133 13025 x9.61 $input=windows.133 $shape=13025 #windows.133=(36,8,8,192)f32 #x9.61=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_6914 1 1 157 13030 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2791 2 1 x9.61 13027 13028 $input=x9.61 $dims=13027 #x9.61=(1,6,6,8,8,192)f32 #13028=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_199 1 1 13028 13029 memory_format=torch.contiguous_format $input=13028 #13028=(1,6,8,6,8,192)f32 #13029=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_6911 0 1 13032 expr=[4,4] pnnx.Expression pnnx_expr_6908 0 1 13033 expr=[1,2] Tensor.view Tensor.view_1709 2 1 13029 13030 shifted_x.67 $input=13029 $shape=13030 #13029=(1,6,8,6,8,192)f32 #shifted_x.67=(1,48,48,192)f32 pnnx.Expression pnnx_expr_6905 2 1 12882 157 13037 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #12882=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.roll torch.roll_2485 3 1 shifted_x.67 13032 13033 x10.31 $input=shifted_x.67 $shifts=13032 $dims=13033 #shifted_x.67=(1,48,48,192)f32 #x10.31=(1,48,48,192)f32 Tensor.view Tensor.view_1710 2 1 x10.31 13037 x11.31 $input=x10.31 $shape=13037 #x10.31=(1,48,48,192)f32 #x11.31=(1,2304,192)f32 pnnx.Expression pnnx_expr_6903 2 1 12882 x11.31 input.297 expr=add(@0,@1) #12882=(1,2304,192)f32 #x11.31=(1,2304,192)f32 #input.297=(1,2304,192)f32 nn.LayerNorm pnnx_unique_508 1 1 input.297 13041 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.297=(1,2304,192)f32 #13041=(1,2304,192)f32 nn.Linear pnnx_unique_509 1 1 13041 13046 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #13041=(1,2304,192)f32 #13046=(1,2304,384)f32 nn.GELU pnnx_unique_510 1 1 13046 13047 #13046=(1,2304,384)f32 #13047=(1,2304,384)f32 nn.Dropout pnnx_unique_511 1 1 13047 13048 #13047=(1,2304,384)f32 #13048=(1,2304,384)f32 nn.Linear pnnx_unique_512 1 1 13048 13049 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #13048=(1,2304,384)f32 #13049=(1,2304,192)f32 nn.Dropout pnnx_unique_513 1 1 13049 13050 #13049=(1,2304,192)f32 #13050=(1,2304,192)f32 pnnx.Expression pnnx_expr_6901 2 1 input.297 13050 13052 expr=add(@0,@1) #input.297=(1,2304,192)f32 #13050=(1,2304,192)f32 #13052=(1,2304,192)f32 pnnx.Expression pnnx_expr_6899 0 1 13054 expr=1 pnnx.Expression pnnx_expr_6898 0 1 13055 expr=2 pnnx.Expression pnnx_expr_6893 2 1 13052 157 13061 expr=[int(size(@0,0)),192,int(size(@1,2)),int(size(@1,3))] #13052=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.transpose torch.transpose_3115 3 1 13052 13054 13055 13060 $input=13052 $dim0=13054 $dim1=13055 #13052=(1,2304,192)f32 #13060=(1,192,2304)f32 Tensor.view Tensor.view_1711 2 1 13060 13061 input.299 $input=13060 $shape=13061 #13060=(1,192,2304)f32 #input.299=(1,192,48,48)f32 nn.Conv2d pnnx_unique_514 1 1 input.299 13063 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(192)f32 @weight=(192,192,3,3)f32 #input.299=(1,192,48,48)f32 #13063=(1,192,48,48)f32 pnnx.Expression pnnx_expr_6892 0 1 13064 expr=-1 pnnx.Expression pnnx_expr_6891 0 1 13065 expr=2 pnnx.Expression pnnx_expr_6890 0 1 13066 expr=1 pnnx.Expression pnnx_expr_6889 0 1 24234 expr=2 torch.flatten torch.flatten_2195 3 1 13063 13065 13064 13067 $input=13063 $start_dim=13065 $end_dim=13064 #13063=(1,192,48,48)f32 #13067=(1,192,2304)f32 torch.transpose torch.transpose_3116 3 1 13067 13066 24234 13068 $input=13067 $dim0=13066 $dim1=24234 #13067=(1,192,2304)f32 #13068=(1,2304,192)f32 pnnx.Expression pnnx_expr_6888 2 1 13068 12087 13069 expr=add(@0,@1) #13068=(1,2304,192)f32 #12087=(1,2304,192)f32 #13069=(1,2304,192)f32 nn.LayerNorm pnnx_unique_515 1 1 13069 13113 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #13069=(1,2304,192)f32 #13113=(1,2304,192)f32 pnnx.Expression pnnx_expr_6865 2 1 13069 157 13114 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #13069=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1712 2 1 13113 13114 x.135 $input=13113 $shape=13114 #13113=(1,2304,192)f32 #x.135=(1,48,48,192)f32 pnnx.Expression pnnx_expr_6844 1 1 x.135 13131 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.135=(1,48,48,192)f32 pnnx.Expression pnnx_expr_6839 0 1 13133 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1713 2 1 x.135 13131 x5.69 $input=x.135 $shape=13131 #x.135=(1,48,48,192)f32 #x5.69=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_6836 1 1 x.135 13136 expr=[-1,8,8,int(size(@0,3))] #x.135=(1,48,48,192)f32 torch.permute torch.permute_2792 2 1 x5.69 13133 13134 $input=x5.69 $dims=13133 #x5.69=(1,6,8,6,8,192)f32 #13134=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_200 1 1 13134 13135 memory_format=torch.contiguous_format $input=13134 #13134=(1,6,6,8,8,192)f32 #13135=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_6834 1 1 13069 13138 expr=[-1,64,int(size(@0,2))] #13069=(1,2304,192)f32 pnnx.Expression pnnx_expr_6824 0 1 13149 expr=-2 pnnx.Expression pnnx_expr_6823 0 1 13150 expr=-1 pnnx.Attribute pnnx_unique_516 0 1 relative_position_bias_table.135 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.135=(225,6)f32 pnnx.Attribute pnnx_unique_517 0 1 relative_position_index.135 @relative_position_index=(64,64)i64 #relative_position_index.135=(64,64)i64 Tensor.view Tensor.view_1714 2 1 13135 13136 x_windows.135 $input=13135 $shape=13136 #13135=(1,6,6,8,8,192)f32 #x_windows.135=(36,8,8,192)f32 Tensor.view Tensor.view_1715 2 1 x_windows.135 13138 x6.2 $input=x_windows.135 $shape=13138 #x_windows.135=(36,8,8,192)f32 #x6.2=(36,64,192)f32 nn.Linear pnnx_unique_518 1 1 x6.2 13170 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x6.2=(36,64,192)f32 #13170=(36,64,576)f32 pnnx.Expression pnnx_expr_6808 1 1 x6.2 13173 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x6.2=(36,64,192)f32 pnnx.Expression pnnx_expr_6803 0 1 13175 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_566 2 1 13170 13173 13174 $input=13170 $shape=13173 #13170=(36,64,576)f32 #13174=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_6802 0 1 24252 expr=0 pnnx.Expression pnnx_expr_6801 0 1 24253 expr=0 pnnx.Expression pnnx_expr_6800 0 1 24254 expr=0 pnnx.Expression pnnx_expr_6799 0 1 24255 expr=1 pnnx.Expression pnnx_expr_6798 0 1 24256 expr=0 pnnx.Expression pnnx_expr_6797 0 1 24257 expr=2 torch.permute torch.permute_2793 2 1 13174 13175 qkv1.2 $input=13174 $dims=13175 #13174=(36,64,3,6,32)f32 #qkv1.2=(3,36,6,64,32)f32 Tensor.select Tensor.select_848 3 1 qkv1.2 24252 24253 q.135 $input=qkv1.2 $dim=24252 $index=24253 #qkv1.2=(3,36,6,64,32)f32 #q.135=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6796 1 1 q.135 q1.2 expr=mul(@0,1.767767e-01) #q.135=(36,6,64,32)f32 #q1.2=(36,6,64,32)f32 Tensor.select Tensor.select_849 3 1 qkv1.2 24254 24255 k.135 $input=qkv1.2 $dim=24254 $index=24255 #qkv1.2=(3,36,6,64,32)f32 #k.135=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6794 0 1 13183 expr=[-1] Tensor.view Tensor.view_1716 2 1 relative_position_index.135 13183 13184 $input=relative_position_index.135 $shape=13183 #relative_position_index.135=(64,64)i64 #13184=(4096)i64 pnnx.Expression pnnx_expr_6793 1 1 13184 13185 expr=[@0] #13184=(4096)i64 pnnx.Expression pnnx_expr_6790 0 1 13187 expr=[64,64,-1] Tensor.index Tensor.index_392 2 1 relative_position_bias_table.135 13185 13186 $input=relative_position_bias_table.135 $expr=13185 #relative_position_bias_table.135=(225,6)f32 #13186=(4096,6)f32 pnnx.Expression pnnx_expr_6786 0 1 13189 expr=[2,0,1] Tensor.view Tensor.view_1717 2 1 13186 13187 relative_position_bias.135 $input=13186 $shape=13187 #13186=(4096,6)f32 #relative_position_bias.135=(64,64,6)f32 pnnx.Expression pnnx_expr_6785 0 1 24265 expr=0 torch.permute torch.permute_2794 2 1 relative_position_bias.135 13189 13190 $input=relative_position_bias.135 $dims=13189 #relative_position_bias.135=(64,64,6)f32 #13190=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_201 1 1 13190 relative_position_bias1.2 memory_format=torch.contiguous_format $input=13190 #13190=(6,64,64)f32 #relative_position_bias1.2=(6,64,64)f32 torch.transpose torch.transpose_3117 3 1 k.135 13149 13150 13181 $input=k.135 $dim0=13149 $dim1=13150 #k.135=(36,6,64,32)f32 #13181=(36,6,32,64)f32 torch.matmul torch.matmul_2336 2 1 q1.2 13181 attn.271 $input=q1.2 $other=13181 #q1.2=(36,6,64,32)f32 #13181=(36,6,32,64)f32 #attn.271=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3350 2 1 relative_position_bias1.2 24265 13192 $input=relative_position_bias1.2 $dim=24265 #relative_position_bias1.2=(6,64,64)f32 #13192=(1,6,64,64)f32 pnnx.Expression pnnx_expr_6783 2 1 attn.271 13192 input.307 expr=add(@0,@1) #attn.271=(36,6,64,64)f32 #13192=(1,6,64,64)f32 #input.307=(36,6,64,64)f32 nn.Softmax pnnx_unique_519 1 1 input.307 13194 dim=-1 #input.307=(36,6,64,64)f32 #13194=(36,6,64,64)f32 nn.Dropout pnnx_unique_520 1 1 13194 13195 #13194=(36,6,64,64)f32 #13195=(36,6,64,64)f32 Tensor.select Tensor.select_850 3 1 qkv1.2 24256 24257 v.135 $input=qkv1.2 $dim=24256 $index=24257 #qkv1.2=(3,36,6,64,32)f32 #v.135=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6782 0 1 24267 expr=1 pnnx.Expression pnnx_expr_6781 0 1 24268 expr=2 torch.matmul torch.matmul_2337 2 1 13195 v.135 13196 $input=13195 $other=v.135 #13195=(36,6,64,64)f32 #v.135=(36,6,64,32)f32 #13196=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6780 1 1 x6.2 13198 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x6.2=(36,64,192)f32 torch.transpose torch.transpose_3118 3 1 13196 24267 24268 13197 $input=13196 $dim0=24267 $dim1=24268 #13196=(36,6,64,32)f32 #13197=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_567 2 1 13197 13198 input1.2 $input=13197 $shape=13198 #13197=(36,64,6,32)f32 #input1.2=(36,64,192)f32 nn.Linear pnnx_unique_521 1 1 input1.2 13200 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.2=(36,64,192)f32 #13200=(36,64,192)f32 nn.Dropout pnnx_unique_522 1 1 13200 13201 #13200=(36,64,192)f32 #13201=(36,64,192)f32 pnnx.Expression pnnx_expr_6776 1 1 13069 13202 expr=[-1,8,8,int(size(@0,2))] #13069=(1,2304,192)f32 pnnx.Expression pnnx_expr_6763 1 1 157 13208 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_6756 0 1 13210 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1718 2 1 13201 13202 windows.135 $input=13201 $shape=13202 #13201=(36,64,192)f32 #windows.135=(36,8,8,192)f32 Tensor.view Tensor.view_1719 2 1 windows.135 13208 x7.2 $input=windows.135 $shape=13208 #windows.135=(36,8,8,192)f32 #x7.2=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_6753 1 1 157 13213 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2795 2 1 x7.2 13210 13211 $input=x7.2 $dims=13210 #x7.2=(1,6,6,8,8,192)f32 #13211=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_202 1 1 13211 13212 memory_format=torch.contiguous_format $input=13211 #13211=(1,6,8,6,8,192)f32 #13212=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_6750 2 1 13069 157 13217 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #13069=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1720 2 1 13212 13213 x8.2 $input=13212 $shape=13213 #13212=(1,6,8,6,8,192)f32 #x8.2=(1,48,48,192)f32 Tensor.view Tensor.view_1721 2 1 x8.2 13217 x9.2 $input=x8.2 $shape=13217 #x8.2=(1,48,48,192)f32 #x9.2=(1,2304,192)f32 pnnx.Expression pnnx_expr_6748 2 1 13069 x9.2 input.309 expr=add(@0,@1) #13069=(1,2304,192)f32 #x9.2=(1,2304,192)f32 #input.309=(1,2304,192)f32 nn.LayerNorm pnnx_unique_523 1 1 input.309 13221 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.309=(1,2304,192)f32 #13221=(1,2304,192)f32 nn.Linear pnnx_unique_524 1 1 13221 13226 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #13221=(1,2304,192)f32 #13226=(1,2304,384)f32 nn.GELU pnnx_unique_525 1 1 13226 13227 #13226=(1,2304,384)f32 #13227=(1,2304,384)f32 nn.Dropout pnnx_unique_526 1 1 13227 13228 #13227=(1,2304,384)f32 #13228=(1,2304,384)f32 nn.Linear pnnx_unique_527 1 1 13228 13229 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #13228=(1,2304,384)f32 #13229=(1,2304,192)f32 nn.Dropout pnnx_unique_528 1 1 13229 13230 #13229=(1,2304,192)f32 #13230=(1,2304,192)f32 pnnx.Expression pnnx_expr_6746 2 1 input.309 13230 13232 expr=add(@0,@1) #input.309=(1,2304,192)f32 #13230=(1,2304,192)f32 #13232=(1,2304,192)f32 pnnx.Attribute pnnx_unique_529 0 1 attn_mask.69 @attn_mask=(36,64,64)f32 #attn_mask.69=(36,64,64)f32 nn.LayerNorm pnnx_unique_530 1 1 13232 13261 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #13232=(1,2304,192)f32 #13261=(1,2304,192)f32 pnnx.Expression pnnx_expr_6723 2 1 13232 157 13262 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #13232=(1,2304,192)f32 #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_6721 0 1 13264 expr=[-4,-4] pnnx.Expression pnnx_expr_6719 0 1 13265 expr=[1,2] Tensor.view Tensor.view_1722 2 1 13261 13262 x.137 $input=13261 $shape=13262 #13261=(1,2304,192)f32 #x.137=(1,48,48,192)f32 torch.roll torch.roll_2486 3 1 x.137 13264 13265 x6.4 $input=x.137 $shifts=13264 $dims=13265 #x.137=(1,48,48,192)f32 #x6.4=(1,48,48,192)f32 pnnx.Expression pnnx_expr_6697 1 1 x6.4 13282 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x6.4=(1,48,48,192)f32 pnnx.Expression pnnx_expr_6692 0 1 13284 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1723 2 1 x6.4 13282 x7.4 $input=x6.4 $shape=13282 #x6.4=(1,48,48,192)f32 #x7.4=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_6689 1 1 x6.4 13287 expr=[-1,8,8,int(size(@0,3))] #x6.4=(1,48,48,192)f32 torch.permute torch.permute_2796 2 1 x7.4 13284 13285 $input=x7.4 $dims=13284 #x7.4=(1,6,8,6,8,192)f32 #13285=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_203 1 1 13285 13286 memory_format=torch.contiguous_format $input=13285 #13285=(1,6,6,8,8,192)f32 #13286=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_6687 1 1 13232 13289 expr=[-1,64,int(size(@0,2))] #13232=(1,2304,192)f32 pnnx.Expression pnnx_expr_6677 0 1 13300 expr=-2 pnnx.Expression pnnx_expr_6676 0 1 13301 expr=-1 pnnx.Attribute pnnx_unique_531 0 1 relative_position_bias_table.137 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.137=(225,6)f32 pnnx.Attribute pnnx_unique_532 0 1 relative_position_index.137 @relative_position_index=(64,64)i64 #relative_position_index.137=(64,64)i64 Tensor.view Tensor.view_1724 2 1 13286 13287 x_windows.137 $input=13286 $shape=13287 #13286=(1,6,6,8,8,192)f32 #x_windows.137=(36,8,8,192)f32 Tensor.view Tensor.view_1725 2 1 x_windows.137 13289 x8.4 $input=x_windows.137 $shape=13289 #x_windows.137=(36,8,8,192)f32 #x8.4=(36,64,192)f32 nn.Linear pnnx_unique_533 1 1 x8.4 13325 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x8.4=(36,64,192)f32 #13325=(36,64,576)f32 pnnx.Expression pnnx_expr_6657 1 1 x8.4 13328 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x8.4=(36,64,192)f32 pnnx.Expression pnnx_expr_6652 0 1 13330 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_568 2 1 13325 13328 13329 $input=13325 $shape=13328 #13325=(36,64,576)f32 #13329=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_6651 0 1 24311 expr=0 pnnx.Expression pnnx_expr_6650 0 1 24312 expr=0 pnnx.Expression pnnx_expr_6649 0 1 24313 expr=0 pnnx.Expression pnnx_expr_6648 0 1 24314 expr=1 pnnx.Expression pnnx_expr_6647 0 1 24315 expr=0 pnnx.Expression pnnx_expr_6646 0 1 24316 expr=2 torch.permute torch.permute_2797 2 1 13329 13330 qkv1.4 $input=13329 $dims=13330 #13329=(36,64,3,6,32)f32 #qkv1.4=(3,36,6,64,32)f32 Tensor.select Tensor.select_851 3 1 qkv1.4 24311 24312 q.137 $input=qkv1.4 $dim=24311 $index=24312 #qkv1.4=(3,36,6,64,32)f32 #q.137=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6645 1 1 q.137 q1.4 expr=mul(@0,1.767767e-01) #q.137=(36,6,64,32)f32 #q1.4=(36,6,64,32)f32 Tensor.select Tensor.select_852 3 1 qkv1.4 24313 24314 k.137 $input=qkv1.4 $dim=24313 $index=24314 #qkv1.4=(3,36,6,64,32)f32 #k.137=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6643 0 1 13338 expr=[-1] Tensor.view Tensor.view_1726 2 1 relative_position_index.137 13338 13339 $input=relative_position_index.137 $shape=13338 #relative_position_index.137=(64,64)i64 #13339=(4096)i64 pnnx.Expression pnnx_expr_6642 1 1 13339 13340 expr=[@0] #13339=(4096)i64 pnnx.Expression pnnx_expr_6639 0 1 13342 expr=[64,64,-1] Tensor.index Tensor.index_393 2 1 relative_position_bias_table.137 13340 13341 $input=relative_position_bias_table.137 $expr=13340 #relative_position_bias_table.137=(225,6)f32 #13341=(4096,6)f32 pnnx.Expression pnnx_expr_6635 0 1 13344 expr=[2,0,1] Tensor.view Tensor.view_1727 2 1 13341 13342 relative_position_bias.137 $input=13341 $shape=13342 #13341=(4096,6)f32 #relative_position_bias.137=(64,64,6)f32 pnnx.Expression pnnx_expr_6634 0 1 24324 expr=0 torch.permute torch.permute_2798 2 1 relative_position_bias.137 13344 13345 $input=relative_position_bias.137 $dims=13344 #relative_position_bias.137=(64,64,6)f32 #13345=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_204 1 1 13345 relative_position_bias1.4 memory_format=torch.contiguous_format $input=13345 #13345=(6,64,64)f32 #relative_position_bias1.4=(6,64,64)f32 torch.transpose torch.transpose_3119 3 1 k.137 13300 13301 13336 $input=k.137 $dim0=13300 $dim1=13301 #k.137=(36,6,64,32)f32 #13336=(36,6,32,64)f32 torch.matmul torch.matmul_2338 2 1 q1.4 13336 attn.275 $input=q1.4 $other=13336 #q1.4=(36,6,64,32)f32 #13336=(36,6,32,64)f32 #attn.275=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3351 2 1 relative_position_bias1.4 24324 13347 $input=relative_position_bias1.4 $dim=24324 #relative_position_bias1.4=(6,64,64)f32 #13347=(1,6,64,64)f32 pnnx.Expression pnnx_expr_6632 2 1 attn.275 13347 attn2.2 expr=add(@0,@1) #attn.275=(36,6,64,64)f32 #13347=(1,6,64,64)f32 #attn2.2=(36,6,64,64)f32 pnnx.Expression pnnx_expr_6623 2 1 x8.4 attn_mask.69 13354 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x8.4=(36,64,192)f32 #attn_mask.69=(36,64,64)f32 pnnx.Expression pnnx_expr_6622 0 1 24329 expr=1 pnnx.Expression pnnx_expr_6621 0 1 24330 expr=0 Tensor.view Tensor.view_1728 2 1 attn2.2 13354 13355 $input=attn2.2 $shape=13354 #attn2.2=(36,6,64,64)f32 #13355=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3352 2 1 attn_mask.69 24329 13356 $input=attn_mask.69 $dim=24329 #attn_mask.69=(36,64,64)f32 #13356=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3353 2 1 13356 24330 13357 $input=13356 $dim=24330 #13356=(36,1,64,64)f32 #13357=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_6619 2 1 13355 13357 attn3.2 expr=add(@0,@1) #13355=(1,36,6,64,64)f32 #13357=(1,36,1,64,64)f32 #attn3.2=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_6616 1 1 x8.4 13359 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x8.4=(36,64,192)f32 Tensor.view Tensor.view_1729 2 1 attn3.2 13359 input.311 $input=attn3.2 $shape=13359 #attn3.2=(1,36,6,64,64)f32 #input.311=(36,6,64,64)f32 nn.Softmax pnnx_unique_534 1 1 input.311 13361 dim=-1 #input.311=(36,6,64,64)f32 #13361=(36,6,64,64)f32 nn.Dropout pnnx_unique_535 1 1 13361 13362 #13361=(36,6,64,64)f32 #13362=(36,6,64,64)f32 Tensor.select Tensor.select_853 3 1 qkv1.4 24315 24316 v.137 $input=qkv1.4 $dim=24315 $index=24316 #qkv1.4=(3,36,6,64,32)f32 #v.137=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6615 0 1 24334 expr=1 pnnx.Expression pnnx_expr_6614 0 1 24335 expr=2 torch.matmul torch.matmul_2339 2 1 13362 v.137 13363 $input=13362 $other=v.137 #13362=(36,6,64,64)f32 #v.137=(36,6,64,32)f32 #13363=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6613 1 1 x8.4 13365 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x8.4=(36,64,192)f32 torch.transpose torch.transpose_3120 3 1 13363 24334 24335 13364 $input=13363 $dim0=24334 $dim1=24335 #13363=(36,6,64,32)f32 #13364=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_569 2 1 13364 13365 input1.4 $input=13364 $shape=13365 #13364=(36,64,6,32)f32 #input1.4=(36,64,192)f32 nn.Linear pnnx_unique_536 1 1 input1.4 13367 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.4=(36,64,192)f32 #13367=(36,64,192)f32 nn.Dropout pnnx_unique_537 1 1 13367 13368 #13367=(36,64,192)f32 #13368=(36,64,192)f32 pnnx.Expression pnnx_expr_6609 1 1 13232 13369 expr=[-1,8,8,int(size(@0,2))] #13232=(1,2304,192)f32 pnnx.Expression pnnx_expr_6596 1 1 157 13375 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_6589 0 1 13377 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1730 2 1 13368 13369 windows.137 $input=13368 $shape=13369 #13368=(36,64,192)f32 #windows.137=(36,8,8,192)f32 Tensor.view Tensor.view_1731 2 1 windows.137 13375 x9.4 $input=windows.137 $shape=13375 #windows.137=(36,8,8,192)f32 #x9.4=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_6586 1 1 157 13380 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2799 2 1 x9.4 13377 13378 $input=x9.4 $dims=13377 #x9.4=(1,6,6,8,8,192)f32 #13378=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_205 1 1 13378 13379 memory_format=torch.contiguous_format $input=13378 #13378=(1,6,8,6,8,192)f32 #13379=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_6583 0 1 13382 expr=[4,4] pnnx.Expression pnnx_expr_6580 0 1 13383 expr=[1,2] Tensor.view Tensor.view_1732 2 1 13379 13380 shifted_x.69 $input=13379 $shape=13380 #13379=(1,6,8,6,8,192)f32 #shifted_x.69=(1,48,48,192)f32 pnnx.Expression pnnx_expr_6577 2 1 13232 157 13387 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #13232=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.roll torch.roll_2487 3 1 shifted_x.69 13382 13383 x10.2 $input=shifted_x.69 $shifts=13382 $dims=13383 #shifted_x.69=(1,48,48,192)f32 #x10.2=(1,48,48,192)f32 Tensor.view Tensor.view_1733 2 1 x10.2 13387 x11.2 $input=x10.2 $shape=13387 #x10.2=(1,48,48,192)f32 #x11.2=(1,2304,192)f32 pnnx.Expression pnnx_expr_6575 2 1 13232 x11.2 input.313 expr=add(@0,@1) #13232=(1,2304,192)f32 #x11.2=(1,2304,192)f32 #input.313=(1,2304,192)f32 nn.LayerNorm pnnx_unique_538 1 1 input.313 13391 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.313=(1,2304,192)f32 #13391=(1,2304,192)f32 nn.Linear pnnx_unique_539 1 1 13391 13396 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #13391=(1,2304,192)f32 #13396=(1,2304,384)f32 nn.GELU pnnx_unique_540 1 1 13396 13397 #13396=(1,2304,384)f32 #13397=(1,2304,384)f32 nn.Dropout pnnx_unique_541 1 1 13397 13398 #13397=(1,2304,384)f32 #13398=(1,2304,384)f32 nn.Linear pnnx_unique_542 1 1 13398 13399 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #13398=(1,2304,384)f32 #13399=(1,2304,192)f32 nn.Dropout pnnx_unique_543 1 1 13399 13400 #13399=(1,2304,192)f32 #13400=(1,2304,192)f32 pnnx.Expression pnnx_expr_6573 2 1 input.313 13400 13402 expr=add(@0,@1) #input.313=(1,2304,192)f32 #13400=(1,2304,192)f32 #13402=(1,2304,192)f32 nn.LayerNorm pnnx_unique_544 1 1 13402 13429 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #13402=(1,2304,192)f32 #13429=(1,2304,192)f32 pnnx.Expression pnnx_expr_6551 2 1 13402 157 13430 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #13402=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1734 2 1 13429 13430 x.139 $input=13429 $shape=13430 #13429=(1,2304,192)f32 #x.139=(1,48,48,192)f32 pnnx.Expression pnnx_expr_6530 1 1 x.139 13447 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.139=(1,48,48,192)f32 pnnx.Expression pnnx_expr_6525 0 1 13449 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1735 2 1 x.139 13447 x5.71 $input=x.139 $shape=13447 #x.139=(1,48,48,192)f32 #x5.71=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_6522 1 1 x.139 13452 expr=[-1,8,8,int(size(@0,3))] #x.139=(1,48,48,192)f32 torch.permute torch.permute_2800 2 1 x5.71 13449 13450 $input=x5.71 $dims=13449 #x5.71=(1,6,8,6,8,192)f32 #13450=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_206 1 1 13450 13451 memory_format=torch.contiguous_format $input=13450 #13450=(1,6,6,8,8,192)f32 #13451=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_6520 1 1 13402 13454 expr=[-1,64,int(size(@0,2))] #13402=(1,2304,192)f32 pnnx.Expression pnnx_expr_6510 0 1 13465 expr=-2 pnnx.Expression pnnx_expr_6509 0 1 13466 expr=-1 pnnx.Attribute pnnx_unique_545 0 1 relative_position_bias_table.139 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.139=(225,6)f32 pnnx.Attribute pnnx_unique_546 0 1 relative_position_index.139 @relative_position_index=(64,64)i64 #relative_position_index.139=(64,64)i64 Tensor.view Tensor.view_1736 2 1 13451 13452 x_windows.139 $input=13451 $shape=13452 #13451=(1,6,6,8,8,192)f32 #x_windows.139=(36,8,8,192)f32 Tensor.view Tensor.view_1737 2 1 x_windows.139 13454 x6.6 $input=x_windows.139 $shape=13454 #x_windows.139=(36,8,8,192)f32 #x6.6=(36,64,192)f32 nn.Linear pnnx_unique_547 1 1 x6.6 13486 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x6.6=(36,64,192)f32 #13486=(36,64,576)f32 pnnx.Expression pnnx_expr_6494 1 1 x6.6 13489 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x6.6=(36,64,192)f32 pnnx.Expression pnnx_expr_6489 0 1 13491 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_570 2 1 13486 13489 13490 $input=13486 $shape=13489 #13486=(36,64,576)f32 #13490=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_6488 0 1 24379 expr=0 pnnx.Expression pnnx_expr_6487 0 1 24380 expr=0 pnnx.Expression pnnx_expr_6486 0 1 24381 expr=0 pnnx.Expression pnnx_expr_6485 0 1 24382 expr=1 pnnx.Expression pnnx_expr_6484 0 1 24383 expr=0 pnnx.Expression pnnx_expr_6483 0 1 24384 expr=2 torch.permute torch.permute_2801 2 1 13490 13491 qkv1.6 $input=13490 $dims=13491 #13490=(36,64,3,6,32)f32 #qkv1.6=(3,36,6,64,32)f32 Tensor.select Tensor.select_854 3 1 qkv1.6 24379 24380 q.139 $input=qkv1.6 $dim=24379 $index=24380 #qkv1.6=(3,36,6,64,32)f32 #q.139=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6482 1 1 q.139 q1.6 expr=mul(@0,1.767767e-01) #q.139=(36,6,64,32)f32 #q1.6=(36,6,64,32)f32 Tensor.select Tensor.select_855 3 1 qkv1.6 24381 24382 k.139 $input=qkv1.6 $dim=24381 $index=24382 #qkv1.6=(3,36,6,64,32)f32 #k.139=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6480 0 1 13499 expr=[-1] Tensor.view Tensor.view_1738 2 1 relative_position_index.139 13499 13500 $input=relative_position_index.139 $shape=13499 #relative_position_index.139=(64,64)i64 #13500=(4096)i64 pnnx.Expression pnnx_expr_6479 1 1 13500 13501 expr=[@0] #13500=(4096)i64 pnnx.Expression pnnx_expr_6476 0 1 13503 expr=[64,64,-1] Tensor.index Tensor.index_394 2 1 relative_position_bias_table.139 13501 13502 $input=relative_position_bias_table.139 $expr=13501 #relative_position_bias_table.139=(225,6)f32 #13502=(4096,6)f32 pnnx.Expression pnnx_expr_6472 0 1 13505 expr=[2,0,1] Tensor.view Tensor.view_1739 2 1 13502 13503 relative_position_bias.139 $input=13502 $shape=13503 #13502=(4096,6)f32 #relative_position_bias.139=(64,64,6)f32 pnnx.Expression pnnx_expr_6471 0 1 24392 expr=0 torch.permute torch.permute_2802 2 1 relative_position_bias.139 13505 13506 $input=relative_position_bias.139 $dims=13505 #relative_position_bias.139=(64,64,6)f32 #13506=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_207 1 1 13506 relative_position_bias1.6 memory_format=torch.contiguous_format $input=13506 #13506=(6,64,64)f32 #relative_position_bias1.6=(6,64,64)f32 torch.transpose torch.transpose_3121 3 1 k.139 13465 13466 13497 $input=k.139 $dim0=13465 $dim1=13466 #k.139=(36,6,64,32)f32 #13497=(36,6,32,64)f32 torch.matmul torch.matmul_2340 2 1 q1.6 13497 attn.279 $input=q1.6 $other=13497 #q1.6=(36,6,64,32)f32 #13497=(36,6,32,64)f32 #attn.279=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3354 2 1 relative_position_bias1.6 24392 13508 $input=relative_position_bias1.6 $dim=24392 #relative_position_bias1.6=(6,64,64)f32 #13508=(1,6,64,64)f32 pnnx.Expression pnnx_expr_6469 2 1 attn.279 13508 input.315 expr=add(@0,@1) #attn.279=(36,6,64,64)f32 #13508=(1,6,64,64)f32 #input.315=(36,6,64,64)f32 nn.Softmax pnnx_unique_548 1 1 input.315 13510 dim=-1 #input.315=(36,6,64,64)f32 #13510=(36,6,64,64)f32 nn.Dropout pnnx_unique_549 1 1 13510 13511 #13510=(36,6,64,64)f32 #13511=(36,6,64,64)f32 Tensor.select Tensor.select_856 3 1 qkv1.6 24383 24384 v.139 $input=qkv1.6 $dim=24383 $index=24384 #qkv1.6=(3,36,6,64,32)f32 #v.139=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6468 0 1 24394 expr=1 pnnx.Expression pnnx_expr_6467 0 1 24395 expr=2 torch.matmul torch.matmul_2341 2 1 13511 v.139 13512 $input=13511 $other=v.139 #13511=(36,6,64,64)f32 #v.139=(36,6,64,32)f32 #13512=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6466 1 1 x6.6 13514 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x6.6=(36,64,192)f32 torch.transpose torch.transpose_3122 3 1 13512 24394 24395 13513 $input=13512 $dim0=24394 $dim1=24395 #13512=(36,6,64,32)f32 #13513=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_571 2 1 13513 13514 input1.6 $input=13513 $shape=13514 #13513=(36,64,6,32)f32 #input1.6=(36,64,192)f32 nn.Linear pnnx_unique_550 1 1 input1.6 13516 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.6=(36,64,192)f32 #13516=(36,64,192)f32 nn.Dropout pnnx_unique_551 1 1 13516 13517 #13516=(36,64,192)f32 #13517=(36,64,192)f32 pnnx.Expression pnnx_expr_6462 1 1 13402 13518 expr=[-1,8,8,int(size(@0,2))] #13402=(1,2304,192)f32 pnnx.Expression pnnx_expr_6449 1 1 157 13524 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_6442 0 1 13526 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1740 2 1 13517 13518 windows.139 $input=13517 $shape=13518 #13517=(36,64,192)f32 #windows.139=(36,8,8,192)f32 Tensor.view Tensor.view_1741 2 1 windows.139 13524 x7.6 $input=windows.139 $shape=13524 #windows.139=(36,8,8,192)f32 #x7.6=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_6439 1 1 157 13529 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2803 2 1 x7.6 13526 13527 $input=x7.6 $dims=13526 #x7.6=(1,6,6,8,8,192)f32 #13527=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_208 1 1 13527 13528 memory_format=torch.contiguous_format $input=13527 #13527=(1,6,8,6,8,192)f32 #13528=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_6436 2 1 13402 157 13533 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #13402=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1742 2 1 13528 13529 x8.6 $input=13528 $shape=13529 #13528=(1,6,8,6,8,192)f32 #x8.6=(1,48,48,192)f32 Tensor.view Tensor.view_1743 2 1 x8.6 13533 x9.6 $input=x8.6 $shape=13533 #x8.6=(1,48,48,192)f32 #x9.6=(1,2304,192)f32 pnnx.Expression pnnx_expr_6434 2 1 13402 x9.6 input.317 expr=add(@0,@1) #13402=(1,2304,192)f32 #x9.6=(1,2304,192)f32 #input.317=(1,2304,192)f32 nn.LayerNorm pnnx_unique_552 1 1 input.317 13537 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.317=(1,2304,192)f32 #13537=(1,2304,192)f32 nn.Linear pnnx_unique_553 1 1 13537 13542 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #13537=(1,2304,192)f32 #13542=(1,2304,384)f32 nn.GELU pnnx_unique_554 1 1 13542 13543 #13542=(1,2304,384)f32 #13543=(1,2304,384)f32 nn.Dropout pnnx_unique_555 1 1 13543 13544 #13543=(1,2304,384)f32 #13544=(1,2304,384)f32 nn.Linear pnnx_unique_556 1 1 13544 13545 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #13544=(1,2304,384)f32 #13545=(1,2304,192)f32 nn.Dropout pnnx_unique_557 1 1 13545 13546 #13545=(1,2304,192)f32 #13546=(1,2304,192)f32 pnnx.Expression pnnx_expr_6432 2 1 input.317 13546 13548 expr=add(@0,@1) #input.317=(1,2304,192)f32 #13546=(1,2304,192)f32 #13548=(1,2304,192)f32 pnnx.Attribute pnnx_unique_558 0 1 attn_mask.71 @attn_mask=(36,64,64)f32 #attn_mask.71=(36,64,64)f32 nn.LayerNorm pnnx_unique_559 1 1 13548 13577 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #13548=(1,2304,192)f32 #13577=(1,2304,192)f32 pnnx.Expression pnnx_expr_6409 2 1 13548 157 13578 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #13548=(1,2304,192)f32 #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_6407 0 1 13580 expr=[-4,-4] pnnx.Expression pnnx_expr_6405 0 1 13581 expr=[1,2] Tensor.view Tensor.view_1744 2 1 13577 13578 x.141 $input=13577 $shape=13578 #13577=(1,2304,192)f32 #x.141=(1,48,48,192)f32 torch.roll torch.roll_2488 3 1 x.141 13580 13581 x6.8 $input=x.141 $shifts=13580 $dims=13581 #x.141=(1,48,48,192)f32 #x6.8=(1,48,48,192)f32 pnnx.Expression pnnx_expr_6383 1 1 x6.8 13598 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x6.8=(1,48,48,192)f32 pnnx.Expression pnnx_expr_6378 0 1 13600 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1745 2 1 x6.8 13598 x7.8 $input=x6.8 $shape=13598 #x6.8=(1,48,48,192)f32 #x7.8=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_6375 1 1 x6.8 13603 expr=[-1,8,8,int(size(@0,3))] #x6.8=(1,48,48,192)f32 torch.permute torch.permute_2804 2 1 x7.8 13600 13601 $input=x7.8 $dims=13600 #x7.8=(1,6,8,6,8,192)f32 #13601=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_209 1 1 13601 13602 memory_format=torch.contiguous_format $input=13601 #13601=(1,6,6,8,8,192)f32 #13602=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_6373 1 1 13548 13605 expr=[-1,64,int(size(@0,2))] #13548=(1,2304,192)f32 pnnx.Expression pnnx_expr_6363 0 1 13616 expr=-2 pnnx.Expression pnnx_expr_6362 0 1 13617 expr=-1 pnnx.Attribute pnnx_unique_560 0 1 relative_position_bias_table.141 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.141=(225,6)f32 pnnx.Attribute pnnx_unique_561 0 1 relative_position_index.141 @relative_position_index=(64,64)i64 #relative_position_index.141=(64,64)i64 Tensor.view Tensor.view_1746 2 1 13602 13603 x_windows.141 $input=13602 $shape=13603 #13602=(1,6,6,8,8,192)f32 #x_windows.141=(36,8,8,192)f32 Tensor.view Tensor.view_1747 2 1 x_windows.141 13605 x8.8 $input=x_windows.141 $shape=13605 #x_windows.141=(36,8,8,192)f32 #x8.8=(36,64,192)f32 nn.Linear pnnx_unique_562 1 1 x8.8 13641 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x8.8=(36,64,192)f32 #13641=(36,64,576)f32 pnnx.Expression pnnx_expr_6343 1 1 x8.8 13644 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x8.8=(36,64,192)f32 pnnx.Expression pnnx_expr_6338 0 1 13646 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_572 2 1 13641 13644 13645 $input=13641 $shape=13644 #13641=(36,64,576)f32 #13645=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_6337 0 1 24438 expr=0 pnnx.Expression pnnx_expr_6336 0 1 24439 expr=0 pnnx.Expression pnnx_expr_6335 0 1 24440 expr=0 pnnx.Expression pnnx_expr_6334 0 1 24441 expr=1 pnnx.Expression pnnx_expr_6333 0 1 24442 expr=0 pnnx.Expression pnnx_expr_6332 0 1 24443 expr=2 torch.permute torch.permute_2805 2 1 13645 13646 qkv1.8 $input=13645 $dims=13646 #13645=(36,64,3,6,32)f32 #qkv1.8=(3,36,6,64,32)f32 Tensor.select Tensor.select_857 3 1 qkv1.8 24438 24439 q.141 $input=qkv1.8 $dim=24438 $index=24439 #qkv1.8=(3,36,6,64,32)f32 #q.141=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6331 1 1 q.141 q1.8 expr=mul(@0,1.767767e-01) #q.141=(36,6,64,32)f32 #q1.8=(36,6,64,32)f32 Tensor.select Tensor.select_858 3 1 qkv1.8 24440 24441 k.141 $input=qkv1.8 $dim=24440 $index=24441 #qkv1.8=(3,36,6,64,32)f32 #k.141=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6329 0 1 13654 expr=[-1] Tensor.view Tensor.view_1748 2 1 relative_position_index.141 13654 13655 $input=relative_position_index.141 $shape=13654 #relative_position_index.141=(64,64)i64 #13655=(4096)i64 pnnx.Expression pnnx_expr_6328 1 1 13655 13656 expr=[@0] #13655=(4096)i64 pnnx.Expression pnnx_expr_6325 0 1 13658 expr=[64,64,-1] Tensor.index Tensor.index_395 2 1 relative_position_bias_table.141 13656 13657 $input=relative_position_bias_table.141 $expr=13656 #relative_position_bias_table.141=(225,6)f32 #13657=(4096,6)f32 pnnx.Expression pnnx_expr_6321 0 1 13660 expr=[2,0,1] Tensor.view Tensor.view_1749 2 1 13657 13658 relative_position_bias.141 $input=13657 $shape=13658 #13657=(4096,6)f32 #relative_position_bias.141=(64,64,6)f32 pnnx.Expression pnnx_expr_6320 0 1 24451 expr=0 torch.permute torch.permute_2806 2 1 relative_position_bias.141 13660 13661 $input=relative_position_bias.141 $dims=13660 #relative_position_bias.141=(64,64,6)f32 #13661=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_210 1 1 13661 relative_position_bias1.8 memory_format=torch.contiguous_format $input=13661 #13661=(6,64,64)f32 #relative_position_bias1.8=(6,64,64)f32 torch.transpose torch.transpose_3123 3 1 k.141 13616 13617 13652 $input=k.141 $dim0=13616 $dim1=13617 #k.141=(36,6,64,32)f32 #13652=(36,6,32,64)f32 torch.matmul torch.matmul_2342 2 1 q1.8 13652 attn.283 $input=q1.8 $other=13652 #q1.8=(36,6,64,32)f32 #13652=(36,6,32,64)f32 #attn.283=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3355 2 1 relative_position_bias1.8 24451 13663 $input=relative_position_bias1.8 $dim=24451 #relative_position_bias1.8=(6,64,64)f32 #13663=(1,6,64,64)f32 pnnx.Expression pnnx_expr_6318 2 1 attn.283 13663 attn2.4 expr=add(@0,@1) #attn.283=(36,6,64,64)f32 #13663=(1,6,64,64)f32 #attn2.4=(36,6,64,64)f32 pnnx.Expression pnnx_expr_6309 2 1 x8.8 attn_mask.71 13670 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x8.8=(36,64,192)f32 #attn_mask.71=(36,64,64)f32 pnnx.Expression pnnx_expr_6308 0 1 24456 expr=1 pnnx.Expression pnnx_expr_6307 0 1 24457 expr=0 Tensor.view Tensor.view_1750 2 1 attn2.4 13670 13671 $input=attn2.4 $shape=13670 #attn2.4=(36,6,64,64)f32 #13671=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3356 2 1 attn_mask.71 24456 13672 $input=attn_mask.71 $dim=24456 #attn_mask.71=(36,64,64)f32 #13672=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3357 2 1 13672 24457 13673 $input=13672 $dim=24457 #13672=(36,1,64,64)f32 #13673=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_6305 2 1 13671 13673 attn3.4 expr=add(@0,@1) #13671=(1,36,6,64,64)f32 #13673=(1,36,1,64,64)f32 #attn3.4=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_6302 1 1 x8.8 13675 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x8.8=(36,64,192)f32 Tensor.view Tensor.view_1751 2 1 attn3.4 13675 input.319 $input=attn3.4 $shape=13675 #attn3.4=(1,36,6,64,64)f32 #input.319=(36,6,64,64)f32 nn.Softmax pnnx_unique_563 1 1 input.319 13677 dim=-1 #input.319=(36,6,64,64)f32 #13677=(36,6,64,64)f32 nn.Dropout pnnx_unique_564 1 1 13677 13678 #13677=(36,6,64,64)f32 #13678=(36,6,64,64)f32 Tensor.select Tensor.select_859 3 1 qkv1.8 24442 24443 v.141 $input=qkv1.8 $dim=24442 $index=24443 #qkv1.8=(3,36,6,64,32)f32 #v.141=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6301 0 1 24461 expr=1 pnnx.Expression pnnx_expr_6300 0 1 24462 expr=2 torch.matmul torch.matmul_2343 2 1 13678 v.141 13679 $input=13678 $other=v.141 #13678=(36,6,64,64)f32 #v.141=(36,6,64,32)f32 #13679=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6299 1 1 x8.8 13681 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x8.8=(36,64,192)f32 torch.transpose torch.transpose_3124 3 1 13679 24461 24462 13680 $input=13679 $dim0=24461 $dim1=24462 #13679=(36,6,64,32)f32 #13680=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_573 2 1 13680 13681 input1.8 $input=13680 $shape=13681 #13680=(36,64,6,32)f32 #input1.8=(36,64,192)f32 nn.Linear pnnx_unique_565 1 1 input1.8 13683 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.8=(36,64,192)f32 #13683=(36,64,192)f32 nn.Dropout pnnx_unique_566 1 1 13683 13684 #13683=(36,64,192)f32 #13684=(36,64,192)f32 pnnx.Expression pnnx_expr_6295 1 1 13548 13685 expr=[-1,8,8,int(size(@0,2))] #13548=(1,2304,192)f32 pnnx.Expression pnnx_expr_6282 1 1 157 13691 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_6275 0 1 13693 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1752 2 1 13684 13685 windows.141 $input=13684 $shape=13685 #13684=(36,64,192)f32 #windows.141=(36,8,8,192)f32 Tensor.view Tensor.view_1753 2 1 windows.141 13691 x9.8 $input=windows.141 $shape=13691 #windows.141=(36,8,8,192)f32 #x9.8=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_6272 1 1 157 13696 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2807 2 1 x9.8 13693 13694 $input=x9.8 $dims=13693 #x9.8=(1,6,6,8,8,192)f32 #13694=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_211 1 1 13694 13695 memory_format=torch.contiguous_format $input=13694 #13694=(1,6,8,6,8,192)f32 #13695=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_6269 0 1 13698 expr=[4,4] pnnx.Expression pnnx_expr_6266 0 1 13699 expr=[1,2] Tensor.view Tensor.view_1754 2 1 13695 13696 shifted_x.71 $input=13695 $shape=13696 #13695=(1,6,8,6,8,192)f32 #shifted_x.71=(1,48,48,192)f32 pnnx.Expression pnnx_expr_6263 2 1 13548 157 13703 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #13548=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.roll torch.roll_2489 3 1 shifted_x.71 13698 13699 x10.4 $input=shifted_x.71 $shifts=13698 $dims=13699 #shifted_x.71=(1,48,48,192)f32 #x10.4=(1,48,48,192)f32 Tensor.view Tensor.view_1755 2 1 x10.4 13703 x11.4 $input=x10.4 $shape=13703 #x10.4=(1,48,48,192)f32 #x11.4=(1,2304,192)f32 pnnx.Expression pnnx_expr_6261 2 1 13548 x11.4 input.321 expr=add(@0,@1) #13548=(1,2304,192)f32 #x11.4=(1,2304,192)f32 #input.321=(1,2304,192)f32 nn.LayerNorm pnnx_unique_567 1 1 input.321 13707 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.321=(1,2304,192)f32 #13707=(1,2304,192)f32 nn.Linear pnnx_unique_568 1 1 13707 13712 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #13707=(1,2304,192)f32 #13712=(1,2304,384)f32 nn.GELU pnnx_unique_569 1 1 13712 13713 #13712=(1,2304,384)f32 #13713=(1,2304,384)f32 nn.Dropout pnnx_unique_570 1 1 13713 13714 #13713=(1,2304,384)f32 #13714=(1,2304,384)f32 nn.Linear pnnx_unique_571 1 1 13714 13715 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #13714=(1,2304,384)f32 #13715=(1,2304,192)f32 nn.Dropout pnnx_unique_572 1 1 13715 13716 #13715=(1,2304,192)f32 #13716=(1,2304,192)f32 pnnx.Expression pnnx_expr_6259 2 1 input.321 13716 13718 expr=add(@0,@1) #input.321=(1,2304,192)f32 #13716=(1,2304,192)f32 #13718=(1,2304,192)f32 nn.LayerNorm pnnx_unique_573 1 1 13718 13745 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #13718=(1,2304,192)f32 #13745=(1,2304,192)f32 pnnx.Expression pnnx_expr_6237 2 1 13718 157 13746 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #13718=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1756 2 1 13745 13746 x.143 $input=13745 $shape=13746 #13745=(1,2304,192)f32 #x.143=(1,48,48,192)f32 pnnx.Expression pnnx_expr_6216 1 1 x.143 13763 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.143=(1,48,48,192)f32 pnnx.Expression pnnx_expr_6211 0 1 13765 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1757 2 1 x.143 13763 x5.73 $input=x.143 $shape=13763 #x.143=(1,48,48,192)f32 #x5.73=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_6208 1 1 x.143 13768 expr=[-1,8,8,int(size(@0,3))] #x.143=(1,48,48,192)f32 torch.permute torch.permute_2808 2 1 x5.73 13765 13766 $input=x5.73 $dims=13765 #x5.73=(1,6,8,6,8,192)f32 #13766=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_212 1 1 13766 13767 memory_format=torch.contiguous_format $input=13766 #13766=(1,6,6,8,8,192)f32 #13767=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_6206 1 1 13718 13770 expr=[-1,64,int(size(@0,2))] #13718=(1,2304,192)f32 pnnx.Expression pnnx_expr_6196 0 1 13781 expr=-2 pnnx.Expression pnnx_expr_6195 0 1 13782 expr=-1 pnnx.Attribute pnnx_unique_574 0 1 relative_position_bias_table.143 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.143=(225,6)f32 pnnx.Attribute pnnx_unique_575 0 1 relative_position_index.143 @relative_position_index=(64,64)i64 #relative_position_index.143=(64,64)i64 Tensor.view Tensor.view_1758 2 1 13767 13768 x_windows.143 $input=13767 $shape=13768 #13767=(1,6,6,8,8,192)f32 #x_windows.143=(36,8,8,192)f32 Tensor.view Tensor.view_1759 2 1 x_windows.143 13770 x6.10 $input=x_windows.143 $shape=13770 #x_windows.143=(36,8,8,192)f32 #x6.10=(36,64,192)f32 nn.Linear pnnx_unique_576 1 1 x6.10 13802 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x6.10=(36,64,192)f32 #13802=(36,64,576)f32 pnnx.Expression pnnx_expr_6180 1 1 x6.10 13805 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x6.10=(36,64,192)f32 pnnx.Expression pnnx_expr_6175 0 1 13807 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_574 2 1 13802 13805 13806 $input=13802 $shape=13805 #13802=(36,64,576)f32 #13806=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_6174 0 1 24506 expr=0 pnnx.Expression pnnx_expr_6173 0 1 24507 expr=0 pnnx.Expression pnnx_expr_6172 0 1 24508 expr=0 pnnx.Expression pnnx_expr_6171 0 1 24509 expr=1 pnnx.Expression pnnx_expr_6170 0 1 24510 expr=0 pnnx.Expression pnnx_expr_6169 0 1 24511 expr=2 torch.permute torch.permute_2809 2 1 13806 13807 qkv1.10 $input=13806 $dims=13807 #13806=(36,64,3,6,32)f32 #qkv1.10=(3,36,6,64,32)f32 Tensor.select Tensor.select_860 3 1 qkv1.10 24506 24507 q.143 $input=qkv1.10 $dim=24506 $index=24507 #qkv1.10=(3,36,6,64,32)f32 #q.143=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6168 1 1 q.143 q1.10 expr=mul(@0,1.767767e-01) #q.143=(36,6,64,32)f32 #q1.10=(36,6,64,32)f32 Tensor.select Tensor.select_861 3 1 qkv1.10 24508 24509 k.143 $input=qkv1.10 $dim=24508 $index=24509 #qkv1.10=(3,36,6,64,32)f32 #k.143=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6166 0 1 13815 expr=[-1] Tensor.view Tensor.view_1760 2 1 relative_position_index.143 13815 13816 $input=relative_position_index.143 $shape=13815 #relative_position_index.143=(64,64)i64 #13816=(4096)i64 pnnx.Expression pnnx_expr_6165 1 1 13816 13817 expr=[@0] #13816=(4096)i64 pnnx.Expression pnnx_expr_6162 0 1 13819 expr=[64,64,-1] Tensor.index Tensor.index_396 2 1 relative_position_bias_table.143 13817 13818 $input=relative_position_bias_table.143 $expr=13817 #relative_position_bias_table.143=(225,6)f32 #13818=(4096,6)f32 pnnx.Expression pnnx_expr_6158 0 1 13821 expr=[2,0,1] Tensor.view Tensor.view_1761 2 1 13818 13819 relative_position_bias.143 $input=13818 $shape=13819 #13818=(4096,6)f32 #relative_position_bias.143=(64,64,6)f32 pnnx.Expression pnnx_expr_6157 0 1 24519 expr=0 torch.permute torch.permute_2810 2 1 relative_position_bias.143 13821 13822 $input=relative_position_bias.143 $dims=13821 #relative_position_bias.143=(64,64,6)f32 #13822=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_213 1 1 13822 relative_position_bias1.10 memory_format=torch.contiguous_format $input=13822 #13822=(6,64,64)f32 #relative_position_bias1.10=(6,64,64)f32 torch.transpose torch.transpose_3125 3 1 k.143 13781 13782 13813 $input=k.143 $dim0=13781 $dim1=13782 #k.143=(36,6,64,32)f32 #13813=(36,6,32,64)f32 torch.matmul torch.matmul_2344 2 1 q1.10 13813 attn.287 $input=q1.10 $other=13813 #q1.10=(36,6,64,32)f32 #13813=(36,6,32,64)f32 #attn.287=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3358 2 1 relative_position_bias1.10 24519 13824 $input=relative_position_bias1.10 $dim=24519 #relative_position_bias1.10=(6,64,64)f32 #13824=(1,6,64,64)f32 pnnx.Expression pnnx_expr_6155 2 1 attn.287 13824 input.323 expr=add(@0,@1) #attn.287=(36,6,64,64)f32 #13824=(1,6,64,64)f32 #input.323=(36,6,64,64)f32 nn.Softmax pnnx_unique_577 1 1 input.323 13826 dim=-1 #input.323=(36,6,64,64)f32 #13826=(36,6,64,64)f32 nn.Dropout pnnx_unique_578 1 1 13826 13827 #13826=(36,6,64,64)f32 #13827=(36,6,64,64)f32 Tensor.select Tensor.select_862 3 1 qkv1.10 24510 24511 v.143 $input=qkv1.10 $dim=24510 $index=24511 #qkv1.10=(3,36,6,64,32)f32 #v.143=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6154 0 1 24521 expr=1 pnnx.Expression pnnx_expr_6153 0 1 24522 expr=2 torch.matmul torch.matmul_2345 2 1 13827 v.143 13828 $input=13827 $other=v.143 #13827=(36,6,64,64)f32 #v.143=(36,6,64,32)f32 #13828=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6152 1 1 x6.10 13830 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x6.10=(36,64,192)f32 torch.transpose torch.transpose_3126 3 1 13828 24521 24522 13829 $input=13828 $dim0=24521 $dim1=24522 #13828=(36,6,64,32)f32 #13829=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_575 2 1 13829 13830 input1.10 $input=13829 $shape=13830 #13829=(36,64,6,32)f32 #input1.10=(36,64,192)f32 nn.Linear pnnx_unique_579 1 1 input1.10 13832 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.10=(36,64,192)f32 #13832=(36,64,192)f32 nn.Dropout pnnx_unique_580 1 1 13832 13833 #13832=(36,64,192)f32 #13833=(36,64,192)f32 pnnx.Expression pnnx_expr_6148 1 1 13718 13834 expr=[-1,8,8,int(size(@0,2))] #13718=(1,2304,192)f32 pnnx.Expression pnnx_expr_6135 1 1 157 13840 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_6128 0 1 13842 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1762 2 1 13833 13834 windows.143 $input=13833 $shape=13834 #13833=(36,64,192)f32 #windows.143=(36,8,8,192)f32 Tensor.view Tensor.view_1763 2 1 windows.143 13840 x7.10 $input=windows.143 $shape=13840 #windows.143=(36,8,8,192)f32 #x7.10=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_6125 1 1 157 13845 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2811 2 1 x7.10 13842 13843 $input=x7.10 $dims=13842 #x7.10=(1,6,6,8,8,192)f32 #13843=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_214 1 1 13843 13844 memory_format=torch.contiguous_format $input=13843 #13843=(1,6,8,6,8,192)f32 #13844=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_6122 2 1 13718 157 13849 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #13718=(1,2304,192)f32 #157=(1,192,48,48)f32 Tensor.view Tensor.view_1764 2 1 13844 13845 x8.10 $input=13844 $shape=13845 #13844=(1,6,8,6,8,192)f32 #x8.10=(1,48,48,192)f32 Tensor.view Tensor.view_1765 2 1 x8.10 13849 x9.10 $input=x8.10 $shape=13849 #x8.10=(1,48,48,192)f32 #x9.10=(1,2304,192)f32 pnnx.Expression pnnx_expr_6120 2 1 13718 x9.10 input.325 expr=add(@0,@1) #13718=(1,2304,192)f32 #x9.10=(1,2304,192)f32 #input.325=(1,2304,192)f32 nn.LayerNorm pnnx_unique_581 1 1 input.325 13853 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.325=(1,2304,192)f32 #13853=(1,2304,192)f32 nn.Linear pnnx_unique_582 1 1 13853 13858 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #13853=(1,2304,192)f32 #13858=(1,2304,384)f32 nn.GELU pnnx_unique_583 1 1 13858 13859 #13858=(1,2304,384)f32 #13859=(1,2304,384)f32 nn.Dropout pnnx_unique_584 1 1 13859 13860 #13859=(1,2304,384)f32 #13860=(1,2304,384)f32 nn.Linear pnnx_unique_585 1 1 13860 13861 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #13860=(1,2304,384)f32 #13861=(1,2304,192)f32 nn.Dropout pnnx_unique_586 1 1 13861 13862 #13861=(1,2304,192)f32 #13862=(1,2304,192)f32 pnnx.Expression pnnx_expr_6118 2 1 input.325 13862 13864 expr=add(@0,@1) #input.325=(1,2304,192)f32 #13862=(1,2304,192)f32 #13864=(1,2304,192)f32 pnnx.Attribute pnnx_unique_587 0 1 attn_mask.73 @attn_mask=(36,64,64)f32 #attn_mask.73=(36,64,64)f32 nn.LayerNorm pnnx_unique_588 1 1 13864 13893 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #13864=(1,2304,192)f32 #13893=(1,2304,192)f32 pnnx.Expression pnnx_expr_6095 2 1 13864 157 13894 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #13864=(1,2304,192)f32 #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_6093 0 1 13896 expr=[-4,-4] pnnx.Expression pnnx_expr_6091 0 1 13897 expr=[1,2] Tensor.view Tensor.view_1766 2 1 13893 13894 x.145 $input=13893 $shape=13894 #13893=(1,2304,192)f32 #x.145=(1,48,48,192)f32 torch.roll torch.roll_2490 3 1 x.145 13896 13897 x6.1 $input=x.145 $shifts=13896 $dims=13897 #x.145=(1,48,48,192)f32 #x6.1=(1,48,48,192)f32 pnnx.Expression pnnx_expr_6069 1 1 x6.1 13914 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x6.1=(1,48,48,192)f32 pnnx.Expression pnnx_expr_6064 0 1 13916 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1767 2 1 x6.1 13914 x7.1 $input=x6.1 $shape=13914 #x6.1=(1,48,48,192)f32 #x7.1=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_6061 1 1 x6.1 13919 expr=[-1,8,8,int(size(@0,3))] #x6.1=(1,48,48,192)f32 torch.permute torch.permute_2812 2 1 x7.1 13916 13917 $input=x7.1 $dims=13916 #x7.1=(1,6,8,6,8,192)f32 #13917=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_215 1 1 13917 13918 memory_format=torch.contiguous_format $input=13917 #13917=(1,6,6,8,8,192)f32 #13918=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_6059 1 1 13864 13921 expr=[-1,64,int(size(@0,2))] #13864=(1,2304,192)f32 pnnx.Expression pnnx_expr_6049 0 1 13932 expr=-2 pnnx.Expression pnnx_expr_6048 0 1 13933 expr=-1 pnnx.Attribute pnnx_unique_589 0 1 relative_position_bias_table.145 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.145=(225,6)f32 pnnx.Attribute pnnx_unique_590 0 1 relative_position_index.145 @relative_position_index=(64,64)i64 #relative_position_index.145=(64,64)i64 Tensor.view Tensor.view_1768 2 1 13918 13919 x_windows.145 $input=13918 $shape=13919 #13918=(1,6,6,8,8,192)f32 #x_windows.145=(36,8,8,192)f32 Tensor.view Tensor.view_1769 2 1 x_windows.145 13921 x8.1 $input=x_windows.145 $shape=13921 #x_windows.145=(36,8,8,192)f32 #x8.1=(36,64,192)f32 nn.Linear pnnx_unique_591 1 1 x8.1 13957 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x8.1=(36,64,192)f32 #13957=(36,64,576)f32 pnnx.Expression pnnx_expr_6029 1 1 x8.1 13960 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x8.1=(36,64,192)f32 pnnx.Expression pnnx_expr_6024 0 1 13962 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_576 2 1 13957 13960 13961 $input=13957 $shape=13960 #13957=(36,64,576)f32 #13961=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_6023 0 1 24565 expr=0 pnnx.Expression pnnx_expr_6022 0 1 24566 expr=0 pnnx.Expression pnnx_expr_6021 0 1 24567 expr=0 pnnx.Expression pnnx_expr_6020 0 1 24568 expr=1 pnnx.Expression pnnx_expr_6019 0 1 24569 expr=0 pnnx.Expression pnnx_expr_6018 0 1 24570 expr=2 torch.permute torch.permute_2813 2 1 13961 13962 qkv1.1 $input=13961 $dims=13962 #13961=(36,64,3,6,32)f32 #qkv1.1=(3,36,6,64,32)f32 Tensor.select Tensor.select_863 3 1 qkv1.1 24565 24566 q.145 $input=qkv1.1 $dim=24565 $index=24566 #qkv1.1=(3,36,6,64,32)f32 #q.145=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6017 1 1 q.145 q1.1 expr=mul(@0,1.767767e-01) #q.145=(36,6,64,32)f32 #q1.1=(36,6,64,32)f32 Tensor.select Tensor.select_864 3 1 qkv1.1 24567 24568 k.145 $input=qkv1.1 $dim=24567 $index=24568 #qkv1.1=(3,36,6,64,32)f32 #k.145=(36,6,64,32)f32 pnnx.Expression pnnx_expr_6015 0 1 13970 expr=[-1] Tensor.view Tensor.view_1770 2 1 relative_position_index.145 13970 13971 $input=relative_position_index.145 $shape=13970 #relative_position_index.145=(64,64)i64 #13971=(4096)i64 pnnx.Expression pnnx_expr_6014 1 1 13971 13972 expr=[@0] #13971=(4096)i64 pnnx.Expression pnnx_expr_6011 0 1 13974 expr=[64,64,-1] Tensor.index Tensor.index_397 2 1 relative_position_bias_table.145 13972 13973 $input=relative_position_bias_table.145 $expr=13972 #relative_position_bias_table.145=(225,6)f32 #13973=(4096,6)f32 pnnx.Expression pnnx_expr_6007 0 1 13976 expr=[2,0,1] Tensor.view Tensor.view_1771 2 1 13973 13974 relative_position_bias.145 $input=13973 $shape=13974 #13973=(4096,6)f32 #relative_position_bias.145=(64,64,6)f32 pnnx.Expression pnnx_expr_6006 0 1 24578 expr=0 torch.permute torch.permute_2814 2 1 relative_position_bias.145 13976 13977 $input=relative_position_bias.145 $dims=13976 #relative_position_bias.145=(64,64,6)f32 #13977=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_216 1 1 13977 relative_position_bias1.1 memory_format=torch.contiguous_format $input=13977 #13977=(6,64,64)f32 #relative_position_bias1.1=(6,64,64)f32 torch.transpose torch.transpose_3127 3 1 k.145 13932 13933 13968 $input=k.145 $dim0=13932 $dim1=13933 #k.145=(36,6,64,32)f32 #13968=(36,6,32,64)f32 torch.matmul torch.matmul_2346 2 1 q1.1 13968 attn.291 $input=q1.1 $other=13968 #q1.1=(36,6,64,32)f32 #13968=(36,6,32,64)f32 #attn.291=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3359 2 1 relative_position_bias1.1 24578 13979 $input=relative_position_bias1.1 $dim=24578 #relative_position_bias1.1=(6,64,64)f32 #13979=(1,6,64,64)f32 pnnx.Expression pnnx_expr_6004 2 1 attn.291 13979 attn2.1 expr=add(@0,@1) #attn.291=(36,6,64,64)f32 #13979=(1,6,64,64)f32 #attn2.1=(36,6,64,64)f32 pnnx.Expression pnnx_expr_5995 2 1 x8.1 attn_mask.73 13986 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x8.1=(36,64,192)f32 #attn_mask.73=(36,64,64)f32 pnnx.Expression pnnx_expr_5994 0 1 24583 expr=1 pnnx.Expression pnnx_expr_5993 0 1 24584 expr=0 Tensor.view Tensor.view_1772 2 1 attn2.1 13986 13987 $input=attn2.1 $shape=13986 #attn2.1=(36,6,64,64)f32 #13987=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3360 2 1 attn_mask.73 24583 13988 $input=attn_mask.73 $dim=24583 #attn_mask.73=(36,64,64)f32 #13988=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3361 2 1 13988 24584 13989 $input=13988 $dim=24584 #13988=(36,1,64,64)f32 #13989=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_5991 2 1 13987 13989 attn3.1 expr=add(@0,@1) #13987=(1,36,6,64,64)f32 #13989=(1,36,1,64,64)f32 #attn3.1=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_5988 1 1 x8.1 13991 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x8.1=(36,64,192)f32 Tensor.view Tensor.view_1773 2 1 attn3.1 13991 input.327 $input=attn3.1 $shape=13991 #attn3.1=(1,36,6,64,64)f32 #input.327=(36,6,64,64)f32 nn.Softmax pnnx_unique_592 1 1 input.327 13993 dim=-1 #input.327=(36,6,64,64)f32 #13993=(36,6,64,64)f32 nn.Dropout pnnx_unique_593 1 1 13993 13994 #13993=(36,6,64,64)f32 #13994=(36,6,64,64)f32 Tensor.select Tensor.select_865 3 1 qkv1.1 24569 24570 v.145 $input=qkv1.1 $dim=24569 $index=24570 #qkv1.1=(3,36,6,64,32)f32 #v.145=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5987 0 1 24588 expr=1 pnnx.Expression pnnx_expr_5986 0 1 24589 expr=2 torch.matmul torch.matmul_2347 2 1 13994 v.145 13995 $input=13994 $other=v.145 #13994=(36,6,64,64)f32 #v.145=(36,6,64,32)f32 #13995=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5985 1 1 x8.1 13997 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x8.1=(36,64,192)f32 torch.transpose torch.transpose_3128 3 1 13995 24588 24589 13996 $input=13995 $dim0=24588 $dim1=24589 #13995=(36,6,64,32)f32 #13996=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_577 2 1 13996 13997 input1.1 $input=13996 $shape=13997 #13996=(36,64,6,32)f32 #input1.1=(36,64,192)f32 nn.Linear pnnx_unique_594 1 1 input1.1 13999 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input1.1=(36,64,192)f32 #13999=(36,64,192)f32 nn.Dropout pnnx_unique_595 1 1 13999 14000 #13999=(36,64,192)f32 #14000=(36,64,192)f32 pnnx.Expression pnnx_expr_5981 1 1 13864 14001 expr=[-1,8,8,int(size(@0,2))] #13864=(1,2304,192)f32 pnnx.Expression pnnx_expr_5968 1 1 157 14007 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #157=(1,192,48,48)f32 pnnx.Expression pnnx_expr_5961 0 1 14009 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1774 2 1 14000 14001 windows.145 $input=14000 $shape=14001 #14000=(36,64,192)f32 #windows.145=(36,8,8,192)f32 Tensor.view Tensor.view_1775 2 1 windows.145 14007 x9.1 $input=windows.145 $shape=14007 #windows.145=(36,8,8,192)f32 #x9.1=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_5958 1 1 157 14012 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #157=(1,192,48,48)f32 torch.permute torch.permute_2815 2 1 x9.1 14009 14010 $input=x9.1 $dims=14009 #x9.1=(1,6,6,8,8,192)f32 #14010=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_217 1 1 14010 14011 memory_format=torch.contiguous_format $input=14010 #14010=(1,6,8,6,8,192)f32 #14011=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_5955 0 1 14014 expr=[4,4] pnnx.Expression pnnx_expr_5952 0 1 14015 expr=[1,2] Tensor.view Tensor.view_1776 2 1 14011 14012 shifted_x.73 $input=14011 $shape=14012 #14011=(1,6,8,6,8,192)f32 #shifted_x.73=(1,48,48,192)f32 pnnx.Expression pnnx_expr_5949 2 1 13864 157 14019 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #13864=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.roll torch.roll_2491 3 1 shifted_x.73 14014 14015 x10.1 $input=shifted_x.73 $shifts=14014 $dims=14015 #shifted_x.73=(1,48,48,192)f32 #x10.1=(1,48,48,192)f32 Tensor.view Tensor.view_1777 2 1 x10.1 14019 x11.1 $input=x10.1 $shape=14019 #x10.1=(1,48,48,192)f32 #x11.1=(1,2304,192)f32 pnnx.Expression pnnx_expr_5947 2 1 13864 x11.1 input.329 expr=add(@0,@1) #13864=(1,2304,192)f32 #x11.1=(1,2304,192)f32 #input.329=(1,2304,192)f32 nn.LayerNorm pnnx_unique_596 1 1 input.329 14023 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.329=(1,2304,192)f32 #14023=(1,2304,192)f32 nn.Linear pnnx_unique_597 1 1 14023 14028 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #14023=(1,2304,192)f32 #14028=(1,2304,384)f32 nn.GELU pnnx_unique_598 1 1 14028 14029 #14028=(1,2304,384)f32 #14029=(1,2304,384)f32 nn.Dropout pnnx_unique_599 1 1 14029 14030 #14029=(1,2304,384)f32 #14030=(1,2304,384)f32 nn.Linear pnnx_unique_600 1 1 14030 14031 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #14030=(1,2304,384)f32 #14031=(1,2304,192)f32 nn.Dropout pnnx_unique_601 1 1 14031 14032 #14031=(1,2304,192)f32 #14032=(1,2304,192)f32 pnnx.Expression pnnx_expr_5945 2 1 input.329 14032 14034 expr=add(@0,@1) #input.329=(1,2304,192)f32 #14032=(1,2304,192)f32 #14034=(1,2304,192)f32 pnnx.Expression pnnx_expr_5943 0 1 14036 expr=1 pnnx.Expression pnnx_expr_5942 0 1 14037 expr=2 pnnx.Expression pnnx_expr_5937 2 1 14034 157 14043 expr=[int(size(@0,0)),192,int(size(@1,2)),int(size(@1,3))] #14034=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.transpose torch.transpose_3129 3 1 14034 14036 14037 14042 $input=14034 $dim0=14036 $dim1=14037 #14034=(1,2304,192)f32 #14042=(1,192,2304)f32 Tensor.view Tensor.view_1778 2 1 14042 14043 input.301 $input=14042 $shape=14043 #14042=(1,192,2304)f32 #input.301=(1,192,48,48)f32 nn.Conv2d pnnx_unique_602 1 1 input.301 14045 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(192)f32 @weight=(192,192,3,3)f32 #input.301=(1,192,48,48)f32 #14045=(1,192,48,48)f32 pnnx.Expression pnnx_expr_5936 0 1 14046 expr=-1 pnnx.Expression pnnx_expr_5935 0 1 14047 expr=2 pnnx.Expression pnnx_expr_5934 0 1 14048 expr=1 pnnx.Expression pnnx_expr_5933 0 1 24616 expr=2 torch.flatten torch.flatten_2196 3 1 14045 14047 14046 14049 $input=14045 $start_dim=14047 $end_dim=14046 #14045=(1,192,48,48)f32 #14049=(1,192,2304)f32 torch.transpose torch.transpose_3130 3 1 14049 14048 24616 14050 $input=14049 $dim0=14048 $dim1=24616 #14049=(1,192,2304)f32 #14050=(1,2304,192)f32 pnnx.Expression pnnx_expr_5932 2 1 14050 13069 14051 expr=add(@0,@1) #14050=(1,2304,192)f32 #13069=(1,2304,192)f32 #14051=(1,2304,192)f32 nn.LayerNorm pnnx_unique_603 1 1 14051 1517 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #14051=(1,2304,192)f32 #1517=(1,2304,192)f32 pnnx.Expression pnnx_expr_5930 0 1 14053 expr=1 pnnx.Expression pnnx_expr_5929 0 1 14054 expr=2 pnnx.Expression pnnx_expr_5924 2 1 1517 157 14060 expr=[int(size(@0,0)),192,int(size(@1,2)),int(size(@1,3))] #1517=(1,2304,192)f32 #157=(1,192,48,48)f32 torch.transpose torch.transpose_3131 3 1 1517 14053 14054 14059 $input=1517 $dim0=14053 $dim1=14054 #1517=(1,2304,192)f32 #14059=(1,192,2304)f32 Tensor.view Tensor.view_1779 2 1 14059 14060 input.303 $input=14059 $shape=14060 #14059=(1,192,2304)f32 #input.303=(1,192,48,48)f32 nn.Conv2d pnnx_unique_604 1 1 input.303 1523 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(192)f32 @weight=(192,192,3,3)f32 #input.303=(1,192,48,48)f32 #1523=(1,192,48,48)f32 pnnx.Expression pnnx_expr_5922 2 1 1523 157 x_b.1 expr=add(@0,@1) #1523=(1,192,48,48)f32 #157=(1,192,48,48)f32 #x_b.1=(1,192,48,48)f32 pnnx.Expression pnnx_expr_5919 2 1 x_b.1 x_a.1 input.305 expr=sub(@0,@1) #x_b.1=(1,192,48,48)f32 #x_a.1=(1,192,48,48)f32 #input.305=(1,192,48,48)f32 nn.ReflectionPad2d manipulator.convblks.0.pad1 1 1 input.305 14072 padding=(3,3,3,3) #input.305=(1,192,48,48)f32 #14072=(1,192,54,54)f32 nn.Conv2d manipulator.convblks.0.conv1 1 1 14072 14073 bias=False dilation=(1,1) groups=1 in_channels=192 kernel_size=(7,7) out_channels=192 padding=(0,0) padding_mode=zeros stride=(1,1) @weight=(192,192,7,7)f32 #14072=(1,192,54,54)f32 #14073=(1,192,48,48)f32 nn.ReLU manipulator.convblks.0.relu 1 1 14073 14074 #14073=(1,192,48,48)f32 #14074=(1,192,48,48)f32 pnnx.Expression pnnx_expr_5918 1 1 14074 input0.77 expr=mul(@0,1.900000e+01) #14074=(1,192,48,48)f32 #input0.77=(1,192,48,48)f32 nn.ReflectionPad2d manipulator.convblks_after.0.pad1 1 1 input0.77 14079 padding=(1,1,1,1) #input0.77=(1,192,48,48)f32 #14079=(1,192,50,50)f32 nn.Conv2d manipulator.convblks_after.0.conv1 1 1 14079 14080 bias=False dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(0,0) padding_mode=zeros stride=(1,1) @weight=(192,192,3,3)f32 #14079=(1,192,50,50)f32 #14080=(1,192,48,48)f32 nn.ReflectionPad2d manipulator.resblks.0.pad1 1 1 14080 14088 padding=(1,1,1,1) #14080=(1,192,48,48)f32 #14088=(1,192,50,50)f32 nn.Conv2d manipulator.resblks.0.conv1 1 1 14088 14089 bias=False dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(0,0) padding_mode=zeros stride=(1,1) @weight=(192,192,3,3)f32 #14088=(1,192,50,50)f32 #14089=(1,192,48,48)f32 nn.ReLU manipulator.resblks.0.relu 1 1 14089 14090 #14089=(1,192,48,48)f32 #14090=(1,192,48,48)f32 nn.ReflectionPad2d manipulator.resblks.0.pad2 1 1 14090 14091 padding=(1,1,1,1) #14090=(1,192,48,48)f32 #14091=(1,192,50,50)f32 nn.Conv2d manipulator.resblks.0.conv2 1 1 14091 14092 bias=False dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(0,0) padding_mode=zeros stride=(1,1) @weight=(192,192,3,3)f32 #14091=(1,192,50,50)f32 #14092=(1,192,48,48)f32 pnnx.Expression pnnx_expr_5914 3 1 x_b.1 14092 14080 14094 expr=add(@0,add(@1,@2)) #x_b.1=(1,192,48,48)f32 #14092=(1,192,48,48)f32 #14080=(1,192,48,48)f32 #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_5749 0 1 14095 expr=-1 pnnx.Expression pnnx_expr_5748 0 1 14096 expr=2 pnnx.Expression pnnx_expr_5747 0 1 14097 expr=1 pnnx.Expression pnnx_expr_5746 0 1 24621 expr=2 torch.flatten torch.flatten_2197 3 1 14094 14096 14095 14099 $input=14094 $start_dim=14096 $end_dim=14095 #14094=(1,192,48,48)f32 #14099=(1,192,2304)f32 torch.transpose torch.transpose_3132 3 1 14099 14097 24621 input.331 $input=14099 $dim0=14097 $dim1=24621 #14099=(1,192,2304)f32 #input.331=(1,2304,192)f32 nn.LayerNorm patch_embed_mmsa.norm 1 1 input.331 14101 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.331=(1,2304,192)f32 #14101=(1,2304,192)f32 nn.Dropout pos_drop_mmsa 1 1 14101 2016 #14101=(1,2304,192)f32 #2016=(1,2304,192)f32 nn.LayerNorm layers_mmsa.0.residual_group.blocks.0.norm1 1 1 2016 14145 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #2016=(1,2304,192)f32 #14145=(1,2304,192)f32 pnnx.Expression pnnx_expr_5723 2 1 2016 14094 14146 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #2016=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_1780 2 1 14145 14146 x.147 $input=14145 $shape=14146 #14145=(1,2304,192)f32 #x.147=(1,48,48,192)f32 pnnx.Expression pnnx_expr_5702 1 1 x.147 14163 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.147=(1,48,48,192)f32 pnnx.Expression pnnx_expr_5697 0 1 14165 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1781 2 1 x.147 14163 x0.75 $input=x.147 $shape=14163 #x.147=(1,48,48,192)f32 #x0.75=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_5694 1 1 x.147 14168 expr=[-1,8,8,int(size(@0,3))] #x.147=(1,48,48,192)f32 torch.permute torch.permute_2816 2 1 x0.75 14165 14166 $input=x0.75 $dims=14165 #x0.75=(1,6,8,6,8,192)f32 #14166=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_218 1 1 14166 14167 memory_format=torch.contiguous_format $input=14166 #14166=(1,6,6,8,8,192)f32 #14167=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_5692 1 1 2016 14170 expr=[-1,64,int(size(@0,2))] #2016=(1,2304,192)f32 pnnx.Expression pnnx_expr_5682 0 1 14181 expr=-2 pnnx.Expression pnnx_expr_5681 0 1 14182 expr=-1 pnnx.Attribute layers_mmsa.0.residual_group.blocks.0.attn 0 1 relative_position_bias_table.147 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.147=(225,6)f32 pnnx.Attribute pnnx_unique_605 0 1 relative_position_index.147 @relative_position_index=(64,64)i64 #relative_position_index.147=(64,64)i64 Tensor.view Tensor.view_1782 2 1 14167 14168 x_windows.147 $input=14167 $shape=14168 #14167=(1,6,6,8,8,192)f32 #x_windows.147=(36,8,8,192)f32 Tensor.view Tensor.view_1783 2 1 x_windows.147 14170 x1.75 $input=x_windows.147 $shape=14170 #x_windows.147=(36,8,8,192)f32 #x1.75=(36,64,192)f32 nn.Linear layers_mmsa.0.residual_group.blocks.0.attn.qkv 1 1 x1.75 14202 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.75=(36,64,192)f32 #14202=(36,64,576)f32 pnnx.Expression pnnx_expr_5666 1 1 x1.75 14205 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.75=(36,64,192)f32 pnnx.Expression pnnx_expr_5661 0 1 14207 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_578 2 1 14202 14205 14206 $input=14202 $shape=14205 #14202=(36,64,576)f32 #14206=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_5660 0 1 24639 expr=0 pnnx.Expression pnnx_expr_5659 0 1 24640 expr=0 pnnx.Expression pnnx_expr_5658 0 1 24641 expr=0 pnnx.Expression pnnx_expr_5657 0 1 24642 expr=1 pnnx.Expression pnnx_expr_5656 0 1 24643 expr=0 pnnx.Expression pnnx_expr_5655 0 1 24644 expr=2 torch.permute torch.permute_2817 2 1 14206 14207 qkv0.75 $input=14206 $dims=14207 #14206=(36,64,3,6,32)f32 #qkv0.75=(3,36,6,64,32)f32 Tensor.select Tensor.select_866 3 1 qkv0.75 24639 24640 q.147 $input=qkv0.75 $dim=24639 $index=24640 #qkv0.75=(3,36,6,64,32)f32 #q.147=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5654 1 1 q.147 q0.75 expr=mul(@0,1.767767e-01) #q.147=(36,6,64,32)f32 #q0.75=(36,6,64,32)f32 Tensor.select Tensor.select_867 3 1 qkv0.75 24641 24642 k.147 $input=qkv0.75 $dim=24641 $index=24642 #qkv0.75=(3,36,6,64,32)f32 #k.147=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5652 0 1 14215 expr=[-1] Tensor.view Tensor.view_1784 2 1 relative_position_index.147 14215 14216 $input=relative_position_index.147 $shape=14215 #relative_position_index.147=(64,64)i64 #14216=(4096)i64 pnnx.Expression pnnx_expr_5651 1 1 14216 14217 expr=[@0] #14216=(4096)i64 pnnx.Expression pnnx_expr_5648 0 1 14219 expr=[64,64,-1] Tensor.index Tensor.index_398 2 1 relative_position_bias_table.147 14217 14218 $input=relative_position_bias_table.147 $expr=14217 #relative_position_bias_table.147=(225,6)f32 #14218=(4096,6)f32 pnnx.Expression pnnx_expr_5644 0 1 14221 expr=[2,0,1] Tensor.view Tensor.view_1785 2 1 14218 14219 relative_position_bias.147 $input=14218 $shape=14219 #14218=(4096,6)f32 #relative_position_bias.147=(64,64,6)f32 pnnx.Expression pnnx_expr_5643 0 1 24652 expr=0 torch.permute torch.permute_2818 2 1 relative_position_bias.147 14221 14222 $input=relative_position_bias.147 $dims=14221 #relative_position_bias.147=(64,64,6)f32 #14222=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_219 1 1 14222 relative_position_bias0.75 memory_format=torch.contiguous_format $input=14222 #14222=(6,64,64)f32 #relative_position_bias0.75=(6,64,64)f32 torch.transpose torch.transpose_3133 3 1 k.147 14181 14182 14213 $input=k.147 $dim0=14181 $dim1=14182 #k.147=(36,6,64,32)f32 #14213=(36,6,32,64)f32 torch.matmul torch.matmul_2348 2 1 q0.75 14213 attn.295 $input=q0.75 $other=14213 #q0.75=(36,6,64,32)f32 #14213=(36,6,32,64)f32 #attn.295=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3362 2 1 relative_position_bias0.75 24652 14224 $input=relative_position_bias0.75 $dim=24652 #relative_position_bias0.75=(6,64,64)f32 #14224=(1,6,64,64)f32 pnnx.Expression pnnx_expr_5641 2 1 attn.295 14224 input.333 expr=add(@0,@1) #attn.295=(36,6,64,64)f32 #14224=(1,6,64,64)f32 #input.333=(36,6,64,64)f32 nn.Softmax layers_mmsa.0.residual_group.blocks.0.attn.softmax 1 1 input.333 14226 dim=-1 #input.333=(36,6,64,64)f32 #14226=(36,6,64,64)f32 nn.Dropout layers_mmsa.0.residual_group.blocks.0.attn.attn_drop 1 1 14226 14227 #14226=(36,6,64,64)f32 #14227=(36,6,64,64)f32 Tensor.select Tensor.select_868 3 1 qkv0.75 24643 24644 v.147 $input=qkv0.75 $dim=24643 $index=24644 #qkv0.75=(3,36,6,64,32)f32 #v.147=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5640 0 1 24654 expr=1 pnnx.Expression pnnx_expr_5639 0 1 24655 expr=2 torch.matmul torch.matmul_2349 2 1 14227 v.147 14228 $input=14227 $other=v.147 #14227=(36,6,64,64)f32 #v.147=(36,6,64,32)f32 #14228=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5638 1 1 x1.75 14230 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.75=(36,64,192)f32 torch.transpose torch.transpose_3134 3 1 14228 24654 24655 14229 $input=14228 $dim0=24654 $dim1=24655 #14228=(36,6,64,32)f32 #14229=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_579 2 1 14229 14230 input0.79 $input=14229 $shape=14230 #14229=(36,64,6,32)f32 #input0.79=(36,64,192)f32 nn.Linear layers_mmsa.0.residual_group.blocks.0.attn.proj 1 1 input0.79 14232 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.79=(36,64,192)f32 #14232=(36,64,192)f32 nn.Dropout layers_mmsa.0.residual_group.blocks.0.attn.proj_drop 1 1 14232 14233 #14232=(36,64,192)f32 #14233=(36,64,192)f32 pnnx.Expression pnnx_expr_5634 1 1 2016 14234 expr=[-1,8,8,int(size(@0,2))] #2016=(1,2304,192)f32 pnnx.Expression pnnx_expr_5621 1 1 14094 14240 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_5614 0 1 14242 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1786 2 1 14233 14234 windows.147 $input=14233 $shape=14234 #14233=(36,64,192)f32 #windows.147=(36,8,8,192)f32 Tensor.view Tensor.view_1787 2 1 windows.147 14240 x2.75 $input=windows.147 $shape=14240 #windows.147=(36,8,8,192)f32 #x2.75=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_5611 1 1 14094 14245 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2819 2 1 x2.75 14242 14243 $input=x2.75 $dims=14242 #x2.75=(1,6,6,8,8,192)f32 #14243=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_220 1 1 14243 14244 memory_format=torch.contiguous_format $input=14243 #14243=(1,6,8,6,8,192)f32 #14244=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_5608 2 1 2016 14094 14249 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #2016=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_1788 2 1 14244 14245 x3.75 $input=14244 $shape=14245 #14244=(1,6,8,6,8,192)f32 #x3.75=(1,48,48,192)f32 Tensor.view Tensor.view_1789 2 1 x3.75 14249 x4.75 $input=x3.75 $shape=14249 #x3.75=(1,48,48,192)f32 #x4.75=(1,2304,192)f32 pnnx.Expression pnnx_expr_5606 2 1 2016 x4.75 input.335 expr=add(@0,@1) #2016=(1,2304,192)f32 #x4.75=(1,2304,192)f32 #input.335=(1,2304,192)f32 nn.LayerNorm layers_mmsa.0.residual_group.blocks.0.norm2 1 1 input.335 14253 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.335=(1,2304,192)f32 #14253=(1,2304,192)f32 nn.Linear layers_mmsa.0.residual_group.blocks.0.mlp.fc1 1 1 14253 14258 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #14253=(1,2304,192)f32 #14258=(1,2304,384)f32 nn.GELU layers_mmsa.0.residual_group.blocks.0.mlp.act 1 1 14258 14259 #14258=(1,2304,384)f32 #14259=(1,2304,384)f32 nn.Dropout layers_mmsa.0.residual_group.blocks.0.mlp.drop 1 1 14259 14260 #14259=(1,2304,384)f32 #14260=(1,2304,384)f32 nn.Linear layers_mmsa.0.residual_group.blocks.0.mlp.fc2 1 1 14260 14261 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #14260=(1,2304,384)f32 #14261=(1,2304,192)f32 nn.Dropout pnnx_unique_606 1 1 14261 14262 #14261=(1,2304,192)f32 #14262=(1,2304,192)f32 pnnx.Expression pnnx_expr_5604 2 1 input.335 14262 14264 expr=add(@0,@1) #input.335=(1,2304,192)f32 #14262=(1,2304,192)f32 #14264=(1,2304,192)f32 pnnx.Attribute layers_mmsa.0.residual_group.blocks.1 0 1 attn_mask.75 @attn_mask=(36,64,64)f32 #attn_mask.75=(36,64,64)f32 nn.LayerNorm layers_mmsa.0.residual_group.blocks.1.norm1 1 1 14264 14293 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #14264=(1,2304,192)f32 #14293=(1,2304,192)f32 pnnx.Expression pnnx_expr_5581 2 1 14264 14094 14294 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #14264=(1,2304,192)f32 #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_5579 0 1 14296 expr=[-4,-4] pnnx.Expression pnnx_expr_5577 0 1 14297 expr=[1,2] Tensor.view Tensor.view_1790 2 1 14293 14294 x.149 $input=14293 $shape=14294 #14293=(1,2304,192)f32 #x.149=(1,48,48,192)f32 torch.roll torch.roll_2492 3 1 x.149 14296 14297 x0.77 $input=x.149 $shifts=14296 $dims=14297 #x.149=(1,48,48,192)f32 #x0.77=(1,48,48,192)f32 pnnx.Expression pnnx_expr_5555 1 1 x0.77 14314 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.77=(1,48,48,192)f32 pnnx.Expression pnnx_expr_5550 0 1 14316 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1791 2 1 x0.77 14314 x1.77 $input=x0.77 $shape=14314 #x0.77=(1,48,48,192)f32 #x1.77=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_5547 1 1 x0.77 14319 expr=[-1,8,8,int(size(@0,3))] #x0.77=(1,48,48,192)f32 torch.permute torch.permute_2820 2 1 x1.77 14316 14317 $input=x1.77 $dims=14316 #x1.77=(1,6,8,6,8,192)f32 #14317=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_221 1 1 14317 14318 memory_format=torch.contiguous_format $input=14317 #14317=(1,6,6,8,8,192)f32 #14318=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_5545 1 1 14264 14321 expr=[-1,64,int(size(@0,2))] #14264=(1,2304,192)f32 pnnx.Expression pnnx_expr_5535 0 1 14332 expr=-2 pnnx.Expression pnnx_expr_5534 0 1 14333 expr=-1 pnnx.Attribute layers_mmsa.0.residual_group.blocks.1.attn 0 1 relative_position_bias_table.149 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.149=(225,6)f32 pnnx.Attribute pnnx_unique_607 0 1 relative_position_index.149 @relative_position_index=(64,64)i64 #relative_position_index.149=(64,64)i64 Tensor.view Tensor.view_1792 2 1 14318 14319 x_windows.149 $input=14318 $shape=14319 #14318=(1,6,6,8,8,192)f32 #x_windows.149=(36,8,8,192)f32 Tensor.view Tensor.view_1793 2 1 x_windows.149 14321 x2.77 $input=x_windows.149 $shape=14321 #x_windows.149=(36,8,8,192)f32 #x2.77=(36,64,192)f32 nn.Linear layers_mmsa.0.residual_group.blocks.1.attn.qkv 1 1 x2.77 14357 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.77=(36,64,192)f32 #14357=(36,64,576)f32 pnnx.Expression pnnx_expr_5515 1 1 x2.77 14360 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.77=(36,64,192)f32 pnnx.Expression pnnx_expr_5510 0 1 14362 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_580 2 1 14357 14360 14361 $input=14357 $shape=14360 #14357=(36,64,576)f32 #14361=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_5509 0 1 24698 expr=0 pnnx.Expression pnnx_expr_5508 0 1 24699 expr=0 pnnx.Expression pnnx_expr_5507 0 1 24700 expr=0 pnnx.Expression pnnx_expr_5506 0 1 24701 expr=1 pnnx.Expression pnnx_expr_5505 0 1 24702 expr=0 pnnx.Expression pnnx_expr_5504 0 1 24703 expr=2 torch.permute torch.permute_2821 2 1 14361 14362 qkv0.77 $input=14361 $dims=14362 #14361=(36,64,3,6,32)f32 #qkv0.77=(3,36,6,64,32)f32 Tensor.select Tensor.select_869 3 1 qkv0.77 24698 24699 q.149 $input=qkv0.77 $dim=24698 $index=24699 #qkv0.77=(3,36,6,64,32)f32 #q.149=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5503 1 1 q.149 q0.77 expr=mul(@0,1.767767e-01) #q.149=(36,6,64,32)f32 #q0.77=(36,6,64,32)f32 Tensor.select Tensor.select_870 3 1 qkv0.77 24700 24701 k.149 $input=qkv0.77 $dim=24700 $index=24701 #qkv0.77=(3,36,6,64,32)f32 #k.149=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5501 0 1 14370 expr=[-1] Tensor.view Tensor.view_1794 2 1 relative_position_index.149 14370 14371 $input=relative_position_index.149 $shape=14370 #relative_position_index.149=(64,64)i64 #14371=(4096)i64 pnnx.Expression pnnx_expr_5500 1 1 14371 14372 expr=[@0] #14371=(4096)i64 pnnx.Expression pnnx_expr_5497 0 1 14374 expr=[64,64,-1] Tensor.index Tensor.index_399 2 1 relative_position_bias_table.149 14372 14373 $input=relative_position_bias_table.149 $expr=14372 #relative_position_bias_table.149=(225,6)f32 #14373=(4096,6)f32 pnnx.Expression pnnx_expr_5493 0 1 14376 expr=[2,0,1] Tensor.view Tensor.view_1795 2 1 14373 14374 relative_position_bias.149 $input=14373 $shape=14374 #14373=(4096,6)f32 #relative_position_bias.149=(64,64,6)f32 pnnx.Expression pnnx_expr_5492 0 1 24711 expr=0 torch.permute torch.permute_2822 2 1 relative_position_bias.149 14376 14377 $input=relative_position_bias.149 $dims=14376 #relative_position_bias.149=(64,64,6)f32 #14377=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_222 1 1 14377 relative_position_bias0.77 memory_format=torch.contiguous_format $input=14377 #14377=(6,64,64)f32 #relative_position_bias0.77=(6,64,64)f32 torch.transpose torch.transpose_3135 3 1 k.149 14332 14333 14368 $input=k.149 $dim0=14332 $dim1=14333 #k.149=(36,6,64,32)f32 #14368=(36,6,32,64)f32 torch.matmul torch.matmul_2350 2 1 q0.77 14368 attn.299 $input=q0.77 $other=14368 #q0.77=(36,6,64,32)f32 #14368=(36,6,32,64)f32 #attn.299=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3363 2 1 relative_position_bias0.77 24711 14379 $input=relative_position_bias0.77 $dim=24711 #relative_position_bias0.77=(6,64,64)f32 #14379=(1,6,64,64)f32 pnnx.Expression pnnx_expr_5490 2 1 attn.299 14379 attn0.39 expr=add(@0,@1) #attn.299=(36,6,64,64)f32 #14379=(1,6,64,64)f32 #attn0.39=(36,6,64,64)f32 pnnx.Expression pnnx_expr_5481 2 1 x2.77 attn_mask.75 14386 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.77=(36,64,192)f32 #attn_mask.75=(36,64,64)f32 pnnx.Expression pnnx_expr_5480 0 1 24716 expr=1 pnnx.Expression pnnx_expr_5479 0 1 24717 expr=0 Tensor.view Tensor.view_1796 2 1 attn0.39 14386 14387 $input=attn0.39 $shape=14386 #attn0.39=(36,6,64,64)f32 #14387=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3364 2 1 attn_mask.75 24716 14388 $input=attn_mask.75 $dim=24716 #attn_mask.75=(36,64,64)f32 #14388=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3365 2 1 14388 24717 14389 $input=14388 $dim=24717 #14388=(36,1,64,64)f32 #14389=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_5477 2 1 14387 14389 attn1.39 expr=add(@0,@1) #14387=(1,36,6,64,64)f32 #14389=(1,36,1,64,64)f32 #attn1.39=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_5474 1 1 x2.77 14391 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.77=(36,64,192)f32 Tensor.view Tensor.view_1797 2 1 attn1.39 14391 input.337 $input=attn1.39 $shape=14391 #attn1.39=(1,36,6,64,64)f32 #input.337=(36,6,64,64)f32 nn.Softmax layers_mmsa.0.residual_group.blocks.1.attn.softmax 1 1 input.337 14393 dim=-1 #input.337=(36,6,64,64)f32 #14393=(36,6,64,64)f32 nn.Dropout layers_mmsa.0.residual_group.blocks.1.attn.attn_drop 1 1 14393 14394 #14393=(36,6,64,64)f32 #14394=(36,6,64,64)f32 Tensor.select Tensor.select_871 3 1 qkv0.77 24702 24703 v.149 $input=qkv0.77 $dim=24702 $index=24703 #qkv0.77=(3,36,6,64,32)f32 #v.149=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5473 0 1 24721 expr=1 pnnx.Expression pnnx_expr_5472 0 1 24722 expr=2 torch.matmul torch.matmul_2351 2 1 14394 v.149 14395 $input=14394 $other=v.149 #14394=(36,6,64,64)f32 #v.149=(36,6,64,32)f32 #14395=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5471 1 1 x2.77 14397 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.77=(36,64,192)f32 torch.transpose torch.transpose_3136 3 1 14395 24721 24722 14396 $input=14395 $dim0=24721 $dim1=24722 #14395=(36,6,64,32)f32 #14396=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_581 2 1 14396 14397 input0.81 $input=14396 $shape=14397 #14396=(36,64,6,32)f32 #input0.81=(36,64,192)f32 nn.Linear layers_mmsa.0.residual_group.blocks.1.attn.proj 1 1 input0.81 14399 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.81=(36,64,192)f32 #14399=(36,64,192)f32 nn.Dropout layers_mmsa.0.residual_group.blocks.1.attn.proj_drop 1 1 14399 14400 #14399=(36,64,192)f32 #14400=(36,64,192)f32 pnnx.Expression pnnx_expr_5467 1 1 14264 14401 expr=[-1,8,8,int(size(@0,2))] #14264=(1,2304,192)f32 pnnx.Expression pnnx_expr_5454 1 1 14094 14407 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_5447 0 1 14409 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1798 2 1 14400 14401 windows.149 $input=14400 $shape=14401 #14400=(36,64,192)f32 #windows.149=(36,8,8,192)f32 Tensor.view Tensor.view_1799 2 1 windows.149 14407 x3.77 $input=windows.149 $shape=14407 #windows.149=(36,8,8,192)f32 #x3.77=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_5444 1 1 14094 14412 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2823 2 1 x3.77 14409 14410 $input=x3.77 $dims=14409 #x3.77=(1,6,6,8,8,192)f32 #14410=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_223 1 1 14410 14411 memory_format=torch.contiguous_format $input=14410 #14410=(1,6,8,6,8,192)f32 #14411=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_5441 0 1 14414 expr=[4,4] pnnx.Expression pnnx_expr_5438 0 1 14415 expr=[1,2] Tensor.view Tensor.view_1800 2 1 14411 14412 shifted_x.75 $input=14411 $shape=14412 #14411=(1,6,8,6,8,192)f32 #shifted_x.75=(1,48,48,192)f32 pnnx.Expression pnnx_expr_5435 2 1 14264 14094 14419 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #14264=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.roll torch.roll_2493 3 1 shifted_x.75 14414 14415 x4.77 $input=shifted_x.75 $shifts=14414 $dims=14415 #shifted_x.75=(1,48,48,192)f32 #x4.77=(1,48,48,192)f32 Tensor.view Tensor.view_1801 2 1 x4.77 14419 x5.75 $input=x4.77 $shape=14419 #x4.77=(1,48,48,192)f32 #x5.75=(1,2304,192)f32 pnnx.Expression pnnx_expr_5433 2 1 14264 x5.75 input.339 expr=add(@0,@1) #14264=(1,2304,192)f32 #x5.75=(1,2304,192)f32 #input.339=(1,2304,192)f32 nn.LayerNorm layers_mmsa.0.residual_group.blocks.1.norm2 1 1 input.339 14423 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.339=(1,2304,192)f32 #14423=(1,2304,192)f32 nn.Linear layers_mmsa.0.residual_group.blocks.1.mlp.fc1 1 1 14423 14428 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #14423=(1,2304,192)f32 #14428=(1,2304,384)f32 nn.GELU layers_mmsa.0.residual_group.blocks.1.mlp.act 1 1 14428 14429 #14428=(1,2304,384)f32 #14429=(1,2304,384)f32 nn.Dropout layers_mmsa.0.residual_group.blocks.1.mlp.drop 1 1 14429 14430 #14429=(1,2304,384)f32 #14430=(1,2304,384)f32 nn.Linear layers_mmsa.0.residual_group.blocks.1.mlp.fc2 1 1 14430 14431 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #14430=(1,2304,384)f32 #14431=(1,2304,192)f32 nn.Dropout pnnx_unique_608 1 1 14431 14432 #14431=(1,2304,192)f32 #14432=(1,2304,192)f32 pnnx.Expression pnnx_expr_5431 2 1 input.339 14432 14434 expr=add(@0,@1) #input.339=(1,2304,192)f32 #14432=(1,2304,192)f32 #14434=(1,2304,192)f32 nn.LayerNorm layers_mmsa.0.residual_group.blocks.2.norm1 1 1 14434 14461 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #14434=(1,2304,192)f32 #14461=(1,2304,192)f32 pnnx.Expression pnnx_expr_5409 2 1 14434 14094 14462 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #14434=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_1802 2 1 14461 14462 x.151 $input=14461 $shape=14462 #14461=(1,2304,192)f32 #x.151=(1,48,48,192)f32 pnnx.Expression pnnx_expr_5388 1 1 x.151 14479 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.151=(1,48,48,192)f32 pnnx.Expression pnnx_expr_5383 0 1 14481 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1803 2 1 x.151 14479 x0.79 $input=x.151 $shape=14479 #x.151=(1,48,48,192)f32 #x0.79=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_5380 1 1 x.151 14484 expr=[-1,8,8,int(size(@0,3))] #x.151=(1,48,48,192)f32 torch.permute torch.permute_2824 2 1 x0.79 14481 14482 $input=x0.79 $dims=14481 #x0.79=(1,6,8,6,8,192)f32 #14482=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_224 1 1 14482 14483 memory_format=torch.contiguous_format $input=14482 #14482=(1,6,6,8,8,192)f32 #14483=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_5378 1 1 14434 14486 expr=[-1,64,int(size(@0,2))] #14434=(1,2304,192)f32 pnnx.Expression pnnx_expr_5368 0 1 14497 expr=-2 pnnx.Expression pnnx_expr_5367 0 1 14498 expr=-1 pnnx.Attribute layers_mmsa.0.residual_group.blocks.2.attn 0 1 relative_position_bias_table.151 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.151=(225,6)f32 pnnx.Attribute pnnx_unique_609 0 1 relative_position_index.151 @relative_position_index=(64,64)i64 #relative_position_index.151=(64,64)i64 Tensor.view Tensor.view_1804 2 1 14483 14484 x_windows.151 $input=14483 $shape=14484 #14483=(1,6,6,8,8,192)f32 #x_windows.151=(36,8,8,192)f32 Tensor.view Tensor.view_1805 2 1 x_windows.151 14486 x1.79 $input=x_windows.151 $shape=14486 #x_windows.151=(36,8,8,192)f32 #x1.79=(36,64,192)f32 nn.Linear layers_mmsa.0.residual_group.blocks.2.attn.qkv 1 1 x1.79 14518 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.79=(36,64,192)f32 #14518=(36,64,576)f32 pnnx.Expression pnnx_expr_5352 1 1 x1.79 14521 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.79=(36,64,192)f32 pnnx.Expression pnnx_expr_5347 0 1 14523 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_582 2 1 14518 14521 14522 $input=14518 $shape=14521 #14518=(36,64,576)f32 #14522=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_5346 0 1 24766 expr=0 pnnx.Expression pnnx_expr_5345 0 1 24767 expr=0 pnnx.Expression pnnx_expr_5344 0 1 24768 expr=0 pnnx.Expression pnnx_expr_5343 0 1 24769 expr=1 pnnx.Expression pnnx_expr_5342 0 1 24770 expr=0 pnnx.Expression pnnx_expr_5341 0 1 24771 expr=2 torch.permute torch.permute_2825 2 1 14522 14523 qkv0.79 $input=14522 $dims=14523 #14522=(36,64,3,6,32)f32 #qkv0.79=(3,36,6,64,32)f32 Tensor.select Tensor.select_872 3 1 qkv0.79 24766 24767 q.151 $input=qkv0.79 $dim=24766 $index=24767 #qkv0.79=(3,36,6,64,32)f32 #q.151=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5340 1 1 q.151 q0.79 expr=mul(@0,1.767767e-01) #q.151=(36,6,64,32)f32 #q0.79=(36,6,64,32)f32 Tensor.select Tensor.select_873 3 1 qkv0.79 24768 24769 k.151 $input=qkv0.79 $dim=24768 $index=24769 #qkv0.79=(3,36,6,64,32)f32 #k.151=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5338 0 1 14531 expr=[-1] Tensor.view Tensor.view_1806 2 1 relative_position_index.151 14531 14532 $input=relative_position_index.151 $shape=14531 #relative_position_index.151=(64,64)i64 #14532=(4096)i64 pnnx.Expression pnnx_expr_5337 1 1 14532 14533 expr=[@0] #14532=(4096)i64 pnnx.Expression pnnx_expr_5334 0 1 14535 expr=[64,64,-1] Tensor.index Tensor.index_400 2 1 relative_position_bias_table.151 14533 14534 $input=relative_position_bias_table.151 $expr=14533 #relative_position_bias_table.151=(225,6)f32 #14534=(4096,6)f32 pnnx.Expression pnnx_expr_5330 0 1 14537 expr=[2,0,1] Tensor.view Tensor.view_1807 2 1 14534 14535 relative_position_bias.151 $input=14534 $shape=14535 #14534=(4096,6)f32 #relative_position_bias.151=(64,64,6)f32 pnnx.Expression pnnx_expr_5329 0 1 24779 expr=0 torch.permute torch.permute_2826 2 1 relative_position_bias.151 14537 14538 $input=relative_position_bias.151 $dims=14537 #relative_position_bias.151=(64,64,6)f32 #14538=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_225 1 1 14538 relative_position_bias0.79 memory_format=torch.contiguous_format $input=14538 #14538=(6,64,64)f32 #relative_position_bias0.79=(6,64,64)f32 torch.transpose torch.transpose_3137 3 1 k.151 14497 14498 14529 $input=k.151 $dim0=14497 $dim1=14498 #k.151=(36,6,64,32)f32 #14529=(36,6,32,64)f32 torch.matmul torch.matmul_2352 2 1 q0.79 14529 attn.303 $input=q0.79 $other=14529 #q0.79=(36,6,64,32)f32 #14529=(36,6,32,64)f32 #attn.303=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3366 2 1 relative_position_bias0.79 24779 14540 $input=relative_position_bias0.79 $dim=24779 #relative_position_bias0.79=(6,64,64)f32 #14540=(1,6,64,64)f32 pnnx.Expression pnnx_expr_5327 2 1 attn.303 14540 input.341 expr=add(@0,@1) #attn.303=(36,6,64,64)f32 #14540=(1,6,64,64)f32 #input.341=(36,6,64,64)f32 nn.Softmax layers_mmsa.0.residual_group.blocks.2.attn.softmax 1 1 input.341 14542 dim=-1 #input.341=(36,6,64,64)f32 #14542=(36,6,64,64)f32 nn.Dropout layers_mmsa.0.residual_group.blocks.2.attn.attn_drop 1 1 14542 14543 #14542=(36,6,64,64)f32 #14543=(36,6,64,64)f32 Tensor.select Tensor.select_874 3 1 qkv0.79 24770 24771 v.151 $input=qkv0.79 $dim=24770 $index=24771 #qkv0.79=(3,36,6,64,32)f32 #v.151=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5326 0 1 24781 expr=1 pnnx.Expression pnnx_expr_5325 0 1 24782 expr=2 torch.matmul torch.matmul_2353 2 1 14543 v.151 14544 $input=14543 $other=v.151 #14543=(36,6,64,64)f32 #v.151=(36,6,64,32)f32 #14544=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5324 1 1 x1.79 14546 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.79=(36,64,192)f32 torch.transpose torch.transpose_3138 3 1 14544 24781 24782 14545 $input=14544 $dim0=24781 $dim1=24782 #14544=(36,6,64,32)f32 #14545=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_583 2 1 14545 14546 input0.83 $input=14545 $shape=14546 #14545=(36,64,6,32)f32 #input0.83=(36,64,192)f32 nn.Linear layers_mmsa.0.residual_group.blocks.2.attn.proj 1 1 input0.83 14548 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.83=(36,64,192)f32 #14548=(36,64,192)f32 nn.Dropout layers_mmsa.0.residual_group.blocks.2.attn.proj_drop 1 1 14548 14549 #14548=(36,64,192)f32 #14549=(36,64,192)f32 pnnx.Expression pnnx_expr_5320 1 1 14434 14550 expr=[-1,8,8,int(size(@0,2))] #14434=(1,2304,192)f32 pnnx.Expression pnnx_expr_5307 1 1 14094 14556 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_5300 0 1 14558 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1808 2 1 14549 14550 windows.151 $input=14549 $shape=14550 #14549=(36,64,192)f32 #windows.151=(36,8,8,192)f32 Tensor.view Tensor.view_1809 2 1 windows.151 14556 x2.79 $input=windows.151 $shape=14556 #windows.151=(36,8,8,192)f32 #x2.79=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_5297 1 1 14094 14561 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2827 2 1 x2.79 14558 14559 $input=x2.79 $dims=14558 #x2.79=(1,6,6,8,8,192)f32 #14559=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_226 1 1 14559 14560 memory_format=torch.contiguous_format $input=14559 #14559=(1,6,8,6,8,192)f32 #14560=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_5294 2 1 14434 14094 14565 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #14434=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_1810 2 1 14560 14561 x3.79 $input=14560 $shape=14561 #14560=(1,6,8,6,8,192)f32 #x3.79=(1,48,48,192)f32 Tensor.view Tensor.view_1811 2 1 x3.79 14565 x4.79 $input=x3.79 $shape=14565 #x3.79=(1,48,48,192)f32 #x4.79=(1,2304,192)f32 pnnx.Expression pnnx_expr_5292 2 1 14434 x4.79 input.343 expr=add(@0,@1) #14434=(1,2304,192)f32 #x4.79=(1,2304,192)f32 #input.343=(1,2304,192)f32 nn.LayerNorm layers_mmsa.0.residual_group.blocks.2.norm2 1 1 input.343 14569 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.343=(1,2304,192)f32 #14569=(1,2304,192)f32 nn.Linear layers_mmsa.0.residual_group.blocks.2.mlp.fc1 1 1 14569 14574 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #14569=(1,2304,192)f32 #14574=(1,2304,384)f32 nn.GELU layers_mmsa.0.residual_group.blocks.2.mlp.act 1 1 14574 14575 #14574=(1,2304,384)f32 #14575=(1,2304,384)f32 nn.Dropout layers_mmsa.0.residual_group.blocks.2.mlp.drop 1 1 14575 14576 #14575=(1,2304,384)f32 #14576=(1,2304,384)f32 nn.Linear layers_mmsa.0.residual_group.blocks.2.mlp.fc2 1 1 14576 14577 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #14576=(1,2304,384)f32 #14577=(1,2304,192)f32 nn.Dropout pnnx_unique_610 1 1 14577 14578 #14577=(1,2304,192)f32 #14578=(1,2304,192)f32 pnnx.Expression pnnx_expr_5290 2 1 input.343 14578 14580 expr=add(@0,@1) #input.343=(1,2304,192)f32 #14578=(1,2304,192)f32 #14580=(1,2304,192)f32 pnnx.Attribute layers_mmsa.0.residual_group.blocks.3 0 1 attn_mask.77 @attn_mask=(36,64,64)f32 #attn_mask.77=(36,64,64)f32 nn.LayerNorm layers_mmsa.0.residual_group.blocks.3.norm1 1 1 14580 14609 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #14580=(1,2304,192)f32 #14609=(1,2304,192)f32 pnnx.Expression pnnx_expr_5267 2 1 14580 14094 14610 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #14580=(1,2304,192)f32 #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_5265 0 1 14612 expr=[-4,-4] pnnx.Expression pnnx_expr_5263 0 1 14613 expr=[1,2] Tensor.view Tensor.view_1812 2 1 14609 14610 x.153 $input=14609 $shape=14610 #14609=(1,2304,192)f32 #x.153=(1,48,48,192)f32 torch.roll torch.roll_2494 3 1 x.153 14612 14613 x0.81 $input=x.153 $shifts=14612 $dims=14613 #x.153=(1,48,48,192)f32 #x0.81=(1,48,48,192)f32 pnnx.Expression pnnx_expr_5241 1 1 x0.81 14630 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.81=(1,48,48,192)f32 pnnx.Expression pnnx_expr_5236 0 1 14632 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1813 2 1 x0.81 14630 x1.81 $input=x0.81 $shape=14630 #x0.81=(1,48,48,192)f32 #x1.81=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_5233 1 1 x0.81 14635 expr=[-1,8,8,int(size(@0,3))] #x0.81=(1,48,48,192)f32 torch.permute torch.permute_2828 2 1 x1.81 14632 14633 $input=x1.81 $dims=14632 #x1.81=(1,6,8,6,8,192)f32 #14633=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_227 1 1 14633 14634 memory_format=torch.contiguous_format $input=14633 #14633=(1,6,6,8,8,192)f32 #14634=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_5231 1 1 14580 14637 expr=[-1,64,int(size(@0,2))] #14580=(1,2304,192)f32 pnnx.Expression pnnx_expr_5221 0 1 14648 expr=-2 pnnx.Expression pnnx_expr_5220 0 1 14649 expr=-1 pnnx.Attribute layers_mmsa.0.residual_group.blocks.3.attn 0 1 relative_position_bias_table.153 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.153=(225,6)f32 pnnx.Attribute pnnx_unique_611 0 1 relative_position_index.153 @relative_position_index=(64,64)i64 #relative_position_index.153=(64,64)i64 Tensor.view Tensor.view_1814 2 1 14634 14635 x_windows.153 $input=14634 $shape=14635 #14634=(1,6,6,8,8,192)f32 #x_windows.153=(36,8,8,192)f32 Tensor.view Tensor.view_1815 2 1 x_windows.153 14637 x2.81 $input=x_windows.153 $shape=14637 #x_windows.153=(36,8,8,192)f32 #x2.81=(36,64,192)f32 nn.Linear layers_mmsa.0.residual_group.blocks.3.attn.qkv 1 1 x2.81 14673 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.81=(36,64,192)f32 #14673=(36,64,576)f32 pnnx.Expression pnnx_expr_5201 1 1 x2.81 14676 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.81=(36,64,192)f32 pnnx.Expression pnnx_expr_5196 0 1 14678 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_584 2 1 14673 14676 14677 $input=14673 $shape=14676 #14673=(36,64,576)f32 #14677=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_5195 0 1 24825 expr=0 pnnx.Expression pnnx_expr_5194 0 1 24826 expr=0 pnnx.Expression pnnx_expr_5193 0 1 24827 expr=0 pnnx.Expression pnnx_expr_5192 0 1 24828 expr=1 pnnx.Expression pnnx_expr_5191 0 1 24829 expr=0 pnnx.Expression pnnx_expr_5190 0 1 24830 expr=2 torch.permute torch.permute_2829 2 1 14677 14678 qkv0.81 $input=14677 $dims=14678 #14677=(36,64,3,6,32)f32 #qkv0.81=(3,36,6,64,32)f32 Tensor.select Tensor.select_875 3 1 qkv0.81 24825 24826 q.153 $input=qkv0.81 $dim=24825 $index=24826 #qkv0.81=(3,36,6,64,32)f32 #q.153=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5189 1 1 q.153 q0.81 expr=mul(@0,1.767767e-01) #q.153=(36,6,64,32)f32 #q0.81=(36,6,64,32)f32 Tensor.select Tensor.select_876 3 1 qkv0.81 24827 24828 k.153 $input=qkv0.81 $dim=24827 $index=24828 #qkv0.81=(3,36,6,64,32)f32 #k.153=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5187 0 1 14686 expr=[-1] Tensor.view Tensor.view_1816 2 1 relative_position_index.153 14686 14687 $input=relative_position_index.153 $shape=14686 #relative_position_index.153=(64,64)i64 #14687=(4096)i64 pnnx.Expression pnnx_expr_5186 1 1 14687 14688 expr=[@0] #14687=(4096)i64 pnnx.Expression pnnx_expr_5183 0 1 14690 expr=[64,64,-1] Tensor.index Tensor.index_401 2 1 relative_position_bias_table.153 14688 14689 $input=relative_position_bias_table.153 $expr=14688 #relative_position_bias_table.153=(225,6)f32 #14689=(4096,6)f32 pnnx.Expression pnnx_expr_5179 0 1 14692 expr=[2,0,1] Tensor.view Tensor.view_1817 2 1 14689 14690 relative_position_bias.153 $input=14689 $shape=14690 #14689=(4096,6)f32 #relative_position_bias.153=(64,64,6)f32 pnnx.Expression pnnx_expr_5178 0 1 24838 expr=0 torch.permute torch.permute_2830 2 1 relative_position_bias.153 14692 14693 $input=relative_position_bias.153 $dims=14692 #relative_position_bias.153=(64,64,6)f32 #14693=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_228 1 1 14693 relative_position_bias0.81 memory_format=torch.contiguous_format $input=14693 #14693=(6,64,64)f32 #relative_position_bias0.81=(6,64,64)f32 torch.transpose torch.transpose_3139 3 1 k.153 14648 14649 14684 $input=k.153 $dim0=14648 $dim1=14649 #k.153=(36,6,64,32)f32 #14684=(36,6,32,64)f32 torch.matmul torch.matmul_2354 2 1 q0.81 14684 attn.307 $input=q0.81 $other=14684 #q0.81=(36,6,64,32)f32 #14684=(36,6,32,64)f32 #attn.307=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3367 2 1 relative_position_bias0.81 24838 14695 $input=relative_position_bias0.81 $dim=24838 #relative_position_bias0.81=(6,64,64)f32 #14695=(1,6,64,64)f32 pnnx.Expression pnnx_expr_5176 2 1 attn.307 14695 attn0.41 expr=add(@0,@1) #attn.307=(36,6,64,64)f32 #14695=(1,6,64,64)f32 #attn0.41=(36,6,64,64)f32 pnnx.Expression pnnx_expr_5167 2 1 x2.81 attn_mask.77 14702 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.81=(36,64,192)f32 #attn_mask.77=(36,64,64)f32 pnnx.Expression pnnx_expr_5166 0 1 24843 expr=1 pnnx.Expression pnnx_expr_5165 0 1 24844 expr=0 Tensor.view Tensor.view_1818 2 1 attn0.41 14702 14703 $input=attn0.41 $shape=14702 #attn0.41=(36,6,64,64)f32 #14703=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3368 2 1 attn_mask.77 24843 14704 $input=attn_mask.77 $dim=24843 #attn_mask.77=(36,64,64)f32 #14704=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3369 2 1 14704 24844 14705 $input=14704 $dim=24844 #14704=(36,1,64,64)f32 #14705=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_5163 2 1 14703 14705 attn1.41 expr=add(@0,@1) #14703=(1,36,6,64,64)f32 #14705=(1,36,1,64,64)f32 #attn1.41=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_5160 1 1 x2.81 14707 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.81=(36,64,192)f32 Tensor.view Tensor.view_1819 2 1 attn1.41 14707 input.345 $input=attn1.41 $shape=14707 #attn1.41=(1,36,6,64,64)f32 #input.345=(36,6,64,64)f32 nn.Softmax layers_mmsa.0.residual_group.blocks.3.attn.softmax 1 1 input.345 14709 dim=-1 #input.345=(36,6,64,64)f32 #14709=(36,6,64,64)f32 nn.Dropout layers_mmsa.0.residual_group.blocks.3.attn.attn_drop 1 1 14709 14710 #14709=(36,6,64,64)f32 #14710=(36,6,64,64)f32 Tensor.select Tensor.select_877 3 1 qkv0.81 24829 24830 v.153 $input=qkv0.81 $dim=24829 $index=24830 #qkv0.81=(3,36,6,64,32)f32 #v.153=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5159 0 1 24848 expr=1 pnnx.Expression pnnx_expr_5158 0 1 24849 expr=2 torch.matmul torch.matmul_2355 2 1 14710 v.153 14711 $input=14710 $other=v.153 #14710=(36,6,64,64)f32 #v.153=(36,6,64,32)f32 #14711=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5157 1 1 x2.81 14713 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.81=(36,64,192)f32 torch.transpose torch.transpose_3140 3 1 14711 24848 24849 14712 $input=14711 $dim0=24848 $dim1=24849 #14711=(36,6,64,32)f32 #14712=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_585 2 1 14712 14713 input0.85 $input=14712 $shape=14713 #14712=(36,64,6,32)f32 #input0.85=(36,64,192)f32 nn.Linear layers_mmsa.0.residual_group.blocks.3.attn.proj 1 1 input0.85 14715 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.85=(36,64,192)f32 #14715=(36,64,192)f32 nn.Dropout layers_mmsa.0.residual_group.blocks.3.attn.proj_drop 1 1 14715 14716 #14715=(36,64,192)f32 #14716=(36,64,192)f32 pnnx.Expression pnnx_expr_5153 1 1 14580 14717 expr=[-1,8,8,int(size(@0,2))] #14580=(1,2304,192)f32 pnnx.Expression pnnx_expr_5140 1 1 14094 14723 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_5133 0 1 14725 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1820 2 1 14716 14717 windows.153 $input=14716 $shape=14717 #14716=(36,64,192)f32 #windows.153=(36,8,8,192)f32 Tensor.view Tensor.view_1821 2 1 windows.153 14723 x3.81 $input=windows.153 $shape=14723 #windows.153=(36,8,8,192)f32 #x3.81=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_5130 1 1 14094 14728 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2831 2 1 x3.81 14725 14726 $input=x3.81 $dims=14725 #x3.81=(1,6,6,8,8,192)f32 #14726=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_229 1 1 14726 14727 memory_format=torch.contiguous_format $input=14726 #14726=(1,6,8,6,8,192)f32 #14727=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_5127 0 1 14730 expr=[4,4] pnnx.Expression pnnx_expr_5124 0 1 14731 expr=[1,2] Tensor.view Tensor.view_1822 2 1 14727 14728 shifted_x.77 $input=14727 $shape=14728 #14727=(1,6,8,6,8,192)f32 #shifted_x.77=(1,48,48,192)f32 pnnx.Expression pnnx_expr_5121 2 1 14580 14094 14735 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #14580=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.roll torch.roll_2495 3 1 shifted_x.77 14730 14731 x4.81 $input=shifted_x.77 $shifts=14730 $dims=14731 #shifted_x.77=(1,48,48,192)f32 #x4.81=(1,48,48,192)f32 Tensor.view Tensor.view_1823 2 1 x4.81 14735 x5.77 $input=x4.81 $shape=14735 #x4.81=(1,48,48,192)f32 #x5.77=(1,2304,192)f32 pnnx.Expression pnnx_expr_5119 2 1 14580 x5.77 input.347 expr=add(@0,@1) #14580=(1,2304,192)f32 #x5.77=(1,2304,192)f32 #input.347=(1,2304,192)f32 nn.LayerNorm layers_mmsa.0.residual_group.blocks.3.norm2 1 1 input.347 14739 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.347=(1,2304,192)f32 #14739=(1,2304,192)f32 nn.Linear layers_mmsa.0.residual_group.blocks.3.mlp.fc1 1 1 14739 14744 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #14739=(1,2304,192)f32 #14744=(1,2304,384)f32 nn.GELU layers_mmsa.0.residual_group.blocks.3.mlp.act 1 1 14744 14745 #14744=(1,2304,384)f32 #14745=(1,2304,384)f32 nn.Dropout layers_mmsa.0.residual_group.blocks.3.mlp.drop 1 1 14745 14746 #14745=(1,2304,384)f32 #14746=(1,2304,384)f32 nn.Linear layers_mmsa.0.residual_group.blocks.3.mlp.fc2 1 1 14746 14747 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #14746=(1,2304,384)f32 #14747=(1,2304,192)f32 nn.Dropout pnnx_unique_612 1 1 14747 14748 #14747=(1,2304,192)f32 #14748=(1,2304,192)f32 pnnx.Expression pnnx_expr_5117 2 1 input.347 14748 14750 expr=add(@0,@1) #input.347=(1,2304,192)f32 #14748=(1,2304,192)f32 #14750=(1,2304,192)f32 nn.LayerNorm layers_mmsa.0.residual_group.blocks.4.norm1 1 1 14750 14777 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #14750=(1,2304,192)f32 #14777=(1,2304,192)f32 pnnx.Expression pnnx_expr_5095 2 1 14750 14094 14778 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #14750=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_1824 2 1 14777 14778 x.155 $input=14777 $shape=14778 #14777=(1,2304,192)f32 #x.155=(1,48,48,192)f32 pnnx.Expression pnnx_expr_5074 1 1 x.155 14795 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.155=(1,48,48,192)f32 pnnx.Expression pnnx_expr_5069 0 1 14797 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1825 2 1 x.155 14795 x0.83 $input=x.155 $shape=14795 #x.155=(1,48,48,192)f32 #x0.83=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_5066 1 1 x.155 14800 expr=[-1,8,8,int(size(@0,3))] #x.155=(1,48,48,192)f32 torch.permute torch.permute_2832 2 1 x0.83 14797 14798 $input=x0.83 $dims=14797 #x0.83=(1,6,8,6,8,192)f32 #14798=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_230 1 1 14798 14799 memory_format=torch.contiguous_format $input=14798 #14798=(1,6,6,8,8,192)f32 #14799=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_5064 1 1 14750 14802 expr=[-1,64,int(size(@0,2))] #14750=(1,2304,192)f32 pnnx.Expression pnnx_expr_5054 0 1 14813 expr=-2 pnnx.Expression pnnx_expr_5053 0 1 14814 expr=-1 pnnx.Attribute layers_mmsa.0.residual_group.blocks.4.attn 0 1 relative_position_bias_table.155 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.155=(225,6)f32 pnnx.Attribute pnnx_unique_613 0 1 relative_position_index.155 @relative_position_index=(64,64)i64 #relative_position_index.155=(64,64)i64 Tensor.view Tensor.view_1826 2 1 14799 14800 x_windows.155 $input=14799 $shape=14800 #14799=(1,6,6,8,8,192)f32 #x_windows.155=(36,8,8,192)f32 Tensor.view Tensor.view_1827 2 1 x_windows.155 14802 x1.83 $input=x_windows.155 $shape=14802 #x_windows.155=(36,8,8,192)f32 #x1.83=(36,64,192)f32 nn.Linear layers_mmsa.0.residual_group.blocks.4.attn.qkv 1 1 x1.83 14834 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.83=(36,64,192)f32 #14834=(36,64,576)f32 pnnx.Expression pnnx_expr_5038 1 1 x1.83 14837 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.83=(36,64,192)f32 pnnx.Expression pnnx_expr_5033 0 1 14839 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_586 2 1 14834 14837 14838 $input=14834 $shape=14837 #14834=(36,64,576)f32 #14838=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_5032 0 1 24893 expr=0 pnnx.Expression pnnx_expr_5031 0 1 24894 expr=0 pnnx.Expression pnnx_expr_5030 0 1 24895 expr=0 pnnx.Expression pnnx_expr_5029 0 1 24896 expr=1 pnnx.Expression pnnx_expr_5028 0 1 24897 expr=0 pnnx.Expression pnnx_expr_5027 0 1 24898 expr=2 torch.permute torch.permute_2833 2 1 14838 14839 qkv0.83 $input=14838 $dims=14839 #14838=(36,64,3,6,32)f32 #qkv0.83=(3,36,6,64,32)f32 Tensor.select Tensor.select_878 3 1 qkv0.83 24893 24894 q.155 $input=qkv0.83 $dim=24893 $index=24894 #qkv0.83=(3,36,6,64,32)f32 #q.155=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5026 1 1 q.155 q0.83 expr=mul(@0,1.767767e-01) #q.155=(36,6,64,32)f32 #q0.83=(36,6,64,32)f32 Tensor.select Tensor.select_879 3 1 qkv0.83 24895 24896 k.155 $input=qkv0.83 $dim=24895 $index=24896 #qkv0.83=(3,36,6,64,32)f32 #k.155=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5024 0 1 14847 expr=[-1] Tensor.view Tensor.view_1828 2 1 relative_position_index.155 14847 14848 $input=relative_position_index.155 $shape=14847 #relative_position_index.155=(64,64)i64 #14848=(4096)i64 pnnx.Expression pnnx_expr_5023 1 1 14848 14849 expr=[@0] #14848=(4096)i64 pnnx.Expression pnnx_expr_5020 0 1 14851 expr=[64,64,-1] Tensor.index Tensor.index_402 2 1 relative_position_bias_table.155 14849 14850 $input=relative_position_bias_table.155 $expr=14849 #relative_position_bias_table.155=(225,6)f32 #14850=(4096,6)f32 pnnx.Expression pnnx_expr_5016 0 1 14853 expr=[2,0,1] Tensor.view Tensor.view_1829 2 1 14850 14851 relative_position_bias.155 $input=14850 $shape=14851 #14850=(4096,6)f32 #relative_position_bias.155=(64,64,6)f32 pnnx.Expression pnnx_expr_5015 0 1 24906 expr=0 torch.permute torch.permute_2834 2 1 relative_position_bias.155 14853 14854 $input=relative_position_bias.155 $dims=14853 #relative_position_bias.155=(64,64,6)f32 #14854=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_231 1 1 14854 relative_position_bias0.83 memory_format=torch.contiguous_format $input=14854 #14854=(6,64,64)f32 #relative_position_bias0.83=(6,64,64)f32 torch.transpose torch.transpose_3141 3 1 k.155 14813 14814 14845 $input=k.155 $dim0=14813 $dim1=14814 #k.155=(36,6,64,32)f32 #14845=(36,6,32,64)f32 torch.matmul torch.matmul_2356 2 1 q0.83 14845 attn.311 $input=q0.83 $other=14845 #q0.83=(36,6,64,32)f32 #14845=(36,6,32,64)f32 #attn.311=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3370 2 1 relative_position_bias0.83 24906 14856 $input=relative_position_bias0.83 $dim=24906 #relative_position_bias0.83=(6,64,64)f32 #14856=(1,6,64,64)f32 pnnx.Expression pnnx_expr_5013 2 1 attn.311 14856 input.349 expr=add(@0,@1) #attn.311=(36,6,64,64)f32 #14856=(1,6,64,64)f32 #input.349=(36,6,64,64)f32 nn.Softmax layers_mmsa.0.residual_group.blocks.4.attn.softmax 1 1 input.349 14858 dim=-1 #input.349=(36,6,64,64)f32 #14858=(36,6,64,64)f32 nn.Dropout layers_mmsa.0.residual_group.blocks.4.attn.attn_drop 1 1 14858 14859 #14858=(36,6,64,64)f32 #14859=(36,6,64,64)f32 Tensor.select Tensor.select_880 3 1 qkv0.83 24897 24898 v.155 $input=qkv0.83 $dim=24897 $index=24898 #qkv0.83=(3,36,6,64,32)f32 #v.155=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5012 0 1 24908 expr=1 pnnx.Expression pnnx_expr_5011 0 1 24909 expr=2 torch.matmul torch.matmul_2357 2 1 14859 v.155 14860 $input=14859 $other=v.155 #14859=(36,6,64,64)f32 #v.155=(36,6,64,32)f32 #14860=(36,6,64,32)f32 pnnx.Expression pnnx_expr_5010 1 1 x1.83 14862 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.83=(36,64,192)f32 torch.transpose torch.transpose_3142 3 1 14860 24908 24909 14861 $input=14860 $dim0=24908 $dim1=24909 #14860=(36,6,64,32)f32 #14861=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_587 2 1 14861 14862 input0.87 $input=14861 $shape=14862 #14861=(36,64,6,32)f32 #input0.87=(36,64,192)f32 nn.Linear layers_mmsa.0.residual_group.blocks.4.attn.proj 1 1 input0.87 14864 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.87=(36,64,192)f32 #14864=(36,64,192)f32 nn.Dropout layers_mmsa.0.residual_group.blocks.4.attn.proj_drop 1 1 14864 14865 #14864=(36,64,192)f32 #14865=(36,64,192)f32 pnnx.Expression pnnx_expr_5006 1 1 14750 14866 expr=[-1,8,8,int(size(@0,2))] #14750=(1,2304,192)f32 pnnx.Expression pnnx_expr_4993 1 1 14094 14872 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_4986 0 1 14874 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1830 2 1 14865 14866 windows.155 $input=14865 $shape=14866 #14865=(36,64,192)f32 #windows.155=(36,8,8,192)f32 Tensor.view Tensor.view_1831 2 1 windows.155 14872 x2.83 $input=windows.155 $shape=14872 #windows.155=(36,8,8,192)f32 #x2.83=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_4983 1 1 14094 14877 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2835 2 1 x2.83 14874 14875 $input=x2.83 $dims=14874 #x2.83=(1,6,6,8,8,192)f32 #14875=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_232 1 1 14875 14876 memory_format=torch.contiguous_format $input=14875 #14875=(1,6,8,6,8,192)f32 #14876=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_4980 2 1 14750 14094 14881 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #14750=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_1832 2 1 14876 14877 x3.83 $input=14876 $shape=14877 #14876=(1,6,8,6,8,192)f32 #x3.83=(1,48,48,192)f32 Tensor.view Tensor.view_1833 2 1 x3.83 14881 x4.83 $input=x3.83 $shape=14881 #x3.83=(1,48,48,192)f32 #x4.83=(1,2304,192)f32 pnnx.Expression pnnx_expr_4978 2 1 14750 x4.83 input.351 expr=add(@0,@1) #14750=(1,2304,192)f32 #x4.83=(1,2304,192)f32 #input.351=(1,2304,192)f32 nn.LayerNorm layers_mmsa.0.residual_group.blocks.4.norm2 1 1 input.351 14885 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.351=(1,2304,192)f32 #14885=(1,2304,192)f32 nn.Linear layers_mmsa.0.residual_group.blocks.4.mlp.fc1 1 1 14885 14890 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #14885=(1,2304,192)f32 #14890=(1,2304,384)f32 nn.GELU layers_mmsa.0.residual_group.blocks.4.mlp.act 1 1 14890 14891 #14890=(1,2304,384)f32 #14891=(1,2304,384)f32 nn.Dropout layers_mmsa.0.residual_group.blocks.4.mlp.drop 1 1 14891 14892 #14891=(1,2304,384)f32 #14892=(1,2304,384)f32 nn.Linear layers_mmsa.0.residual_group.blocks.4.mlp.fc2 1 1 14892 14893 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #14892=(1,2304,384)f32 #14893=(1,2304,192)f32 nn.Dropout pnnx_unique_614 1 1 14893 14894 #14893=(1,2304,192)f32 #14894=(1,2304,192)f32 pnnx.Expression pnnx_expr_4976 2 1 input.351 14894 14896 expr=add(@0,@1) #input.351=(1,2304,192)f32 #14894=(1,2304,192)f32 #14896=(1,2304,192)f32 pnnx.Attribute layers_mmsa.0.residual_group.blocks.5 0 1 attn_mask.79 @attn_mask=(36,64,64)f32 #attn_mask.79=(36,64,64)f32 nn.LayerNorm layers_mmsa.0.residual_group.blocks.5.norm1 1 1 14896 14925 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #14896=(1,2304,192)f32 #14925=(1,2304,192)f32 pnnx.Expression pnnx_expr_4953 2 1 14896 14094 14926 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #14896=(1,2304,192)f32 #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_4951 0 1 14928 expr=[-4,-4] pnnx.Expression pnnx_expr_4949 0 1 14929 expr=[1,2] Tensor.view Tensor.view_1834 2 1 14925 14926 x.157 $input=14925 $shape=14926 #14925=(1,2304,192)f32 #x.157=(1,48,48,192)f32 torch.roll torch.roll_2496 3 1 x.157 14928 14929 x0.85 $input=x.157 $shifts=14928 $dims=14929 #x.157=(1,48,48,192)f32 #x0.85=(1,48,48,192)f32 pnnx.Expression pnnx_expr_4927 1 1 x0.85 14946 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.85=(1,48,48,192)f32 pnnx.Expression pnnx_expr_4922 0 1 14948 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1835 2 1 x0.85 14946 x1.85 $input=x0.85 $shape=14946 #x0.85=(1,48,48,192)f32 #x1.85=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_4919 1 1 x0.85 14951 expr=[-1,8,8,int(size(@0,3))] #x0.85=(1,48,48,192)f32 torch.permute torch.permute_2836 2 1 x1.85 14948 14949 $input=x1.85 $dims=14948 #x1.85=(1,6,8,6,8,192)f32 #14949=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_233 1 1 14949 14950 memory_format=torch.contiguous_format $input=14949 #14949=(1,6,6,8,8,192)f32 #14950=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_4917 1 1 14896 14953 expr=[-1,64,int(size(@0,2))] #14896=(1,2304,192)f32 pnnx.Expression pnnx_expr_4907 0 1 14964 expr=-2 pnnx.Expression pnnx_expr_4906 0 1 14965 expr=-1 pnnx.Attribute layers_mmsa.0.residual_group.blocks.5.attn 0 1 relative_position_bias_table.157 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.157=(225,6)f32 pnnx.Attribute pnnx_unique_615 0 1 relative_position_index.157 @relative_position_index=(64,64)i64 #relative_position_index.157=(64,64)i64 Tensor.view Tensor.view_1836 2 1 14950 14951 x_windows.157 $input=14950 $shape=14951 #14950=(1,6,6,8,8,192)f32 #x_windows.157=(36,8,8,192)f32 Tensor.view Tensor.view_1837 2 1 x_windows.157 14953 x2.85 $input=x_windows.157 $shape=14953 #x_windows.157=(36,8,8,192)f32 #x2.85=(36,64,192)f32 nn.Linear layers_mmsa.0.residual_group.blocks.5.attn.qkv 1 1 x2.85 14989 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.85=(36,64,192)f32 #14989=(36,64,576)f32 pnnx.Expression pnnx_expr_4887 1 1 x2.85 14992 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.85=(36,64,192)f32 pnnx.Expression pnnx_expr_4882 0 1 14994 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_588 2 1 14989 14992 14993 $input=14989 $shape=14992 #14989=(36,64,576)f32 #14993=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_4881 0 1 24952 expr=0 pnnx.Expression pnnx_expr_4880 0 1 24953 expr=0 pnnx.Expression pnnx_expr_4879 0 1 24954 expr=0 pnnx.Expression pnnx_expr_4878 0 1 24955 expr=1 pnnx.Expression pnnx_expr_4877 0 1 24956 expr=0 pnnx.Expression pnnx_expr_4876 0 1 24957 expr=2 torch.permute torch.permute_2837 2 1 14993 14994 qkv0.85 $input=14993 $dims=14994 #14993=(36,64,3,6,32)f32 #qkv0.85=(3,36,6,64,32)f32 Tensor.select Tensor.select_881 3 1 qkv0.85 24952 24953 q.157 $input=qkv0.85 $dim=24952 $index=24953 #qkv0.85=(3,36,6,64,32)f32 #q.157=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4875 1 1 q.157 q0.85 expr=mul(@0,1.767767e-01) #q.157=(36,6,64,32)f32 #q0.85=(36,6,64,32)f32 Tensor.select Tensor.select_882 3 1 qkv0.85 24954 24955 k.157 $input=qkv0.85 $dim=24954 $index=24955 #qkv0.85=(3,36,6,64,32)f32 #k.157=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4873 0 1 15002 expr=[-1] Tensor.view Tensor.view_1838 2 1 relative_position_index.157 15002 15003 $input=relative_position_index.157 $shape=15002 #relative_position_index.157=(64,64)i64 #15003=(4096)i64 pnnx.Expression pnnx_expr_4872 1 1 15003 15004 expr=[@0] #15003=(4096)i64 pnnx.Expression pnnx_expr_4869 0 1 15006 expr=[64,64,-1] Tensor.index Tensor.index_403 2 1 relative_position_bias_table.157 15004 15005 $input=relative_position_bias_table.157 $expr=15004 #relative_position_bias_table.157=(225,6)f32 #15005=(4096,6)f32 pnnx.Expression pnnx_expr_4865 0 1 15008 expr=[2,0,1] Tensor.view Tensor.view_1839 2 1 15005 15006 relative_position_bias.157 $input=15005 $shape=15006 #15005=(4096,6)f32 #relative_position_bias.157=(64,64,6)f32 pnnx.Expression pnnx_expr_4864 0 1 24965 expr=0 torch.permute torch.permute_2838 2 1 relative_position_bias.157 15008 15009 $input=relative_position_bias.157 $dims=15008 #relative_position_bias.157=(64,64,6)f32 #15009=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_234 1 1 15009 relative_position_bias0.85 memory_format=torch.contiguous_format $input=15009 #15009=(6,64,64)f32 #relative_position_bias0.85=(6,64,64)f32 torch.transpose torch.transpose_3143 3 1 k.157 14964 14965 15000 $input=k.157 $dim0=14964 $dim1=14965 #k.157=(36,6,64,32)f32 #15000=(36,6,32,64)f32 torch.matmul torch.matmul_2358 2 1 q0.85 15000 attn.315 $input=q0.85 $other=15000 #q0.85=(36,6,64,32)f32 #15000=(36,6,32,64)f32 #attn.315=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3371 2 1 relative_position_bias0.85 24965 15011 $input=relative_position_bias0.85 $dim=24965 #relative_position_bias0.85=(6,64,64)f32 #15011=(1,6,64,64)f32 pnnx.Expression pnnx_expr_4862 2 1 attn.315 15011 attn0.43 expr=add(@0,@1) #attn.315=(36,6,64,64)f32 #15011=(1,6,64,64)f32 #attn0.43=(36,6,64,64)f32 pnnx.Expression pnnx_expr_4853 2 1 x2.85 attn_mask.79 15018 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.85=(36,64,192)f32 #attn_mask.79=(36,64,64)f32 pnnx.Expression pnnx_expr_4852 0 1 24970 expr=1 pnnx.Expression pnnx_expr_4851 0 1 24971 expr=0 Tensor.view Tensor.view_1840 2 1 attn0.43 15018 15019 $input=attn0.43 $shape=15018 #attn0.43=(36,6,64,64)f32 #15019=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3372 2 1 attn_mask.79 24970 15020 $input=attn_mask.79 $dim=24970 #attn_mask.79=(36,64,64)f32 #15020=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3373 2 1 15020 24971 15021 $input=15020 $dim=24971 #15020=(36,1,64,64)f32 #15021=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_4849 2 1 15019 15021 attn1.43 expr=add(@0,@1) #15019=(1,36,6,64,64)f32 #15021=(1,36,1,64,64)f32 #attn1.43=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_4846 1 1 x2.85 15023 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.85=(36,64,192)f32 Tensor.view Tensor.view_1841 2 1 attn1.43 15023 input.353 $input=attn1.43 $shape=15023 #attn1.43=(1,36,6,64,64)f32 #input.353=(36,6,64,64)f32 nn.Softmax layers_mmsa.0.residual_group.blocks.5.attn.softmax 1 1 input.353 15025 dim=-1 #input.353=(36,6,64,64)f32 #15025=(36,6,64,64)f32 nn.Dropout layers_mmsa.0.residual_group.blocks.5.attn.attn_drop 1 1 15025 15026 #15025=(36,6,64,64)f32 #15026=(36,6,64,64)f32 Tensor.select Tensor.select_883 3 1 qkv0.85 24956 24957 v.157 $input=qkv0.85 $dim=24956 $index=24957 #qkv0.85=(3,36,6,64,32)f32 #v.157=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4845 0 1 24975 expr=1 pnnx.Expression pnnx_expr_4844 0 1 24976 expr=2 torch.matmul torch.matmul_2359 2 1 15026 v.157 15027 $input=15026 $other=v.157 #15026=(36,6,64,64)f32 #v.157=(36,6,64,32)f32 #15027=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4843 1 1 x2.85 15029 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.85=(36,64,192)f32 torch.transpose torch.transpose_3144 3 1 15027 24975 24976 15028 $input=15027 $dim0=24975 $dim1=24976 #15027=(36,6,64,32)f32 #15028=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_589 2 1 15028 15029 input0.89 $input=15028 $shape=15029 #15028=(36,64,6,32)f32 #input0.89=(36,64,192)f32 nn.Linear layers_mmsa.0.residual_group.blocks.5.attn.proj 1 1 input0.89 15031 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.89=(36,64,192)f32 #15031=(36,64,192)f32 nn.Dropout layers_mmsa.0.residual_group.blocks.5.attn.proj_drop 1 1 15031 15032 #15031=(36,64,192)f32 #15032=(36,64,192)f32 pnnx.Expression pnnx_expr_4839 1 1 14896 15033 expr=[-1,8,8,int(size(@0,2))] #14896=(1,2304,192)f32 pnnx.Expression pnnx_expr_4826 1 1 14094 15039 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_4819 0 1 15041 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1842 2 1 15032 15033 windows.157 $input=15032 $shape=15033 #15032=(36,64,192)f32 #windows.157=(36,8,8,192)f32 Tensor.view Tensor.view_1843 2 1 windows.157 15039 x3.85 $input=windows.157 $shape=15039 #windows.157=(36,8,8,192)f32 #x3.85=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_4816 1 1 14094 15044 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2839 2 1 x3.85 15041 15042 $input=x3.85 $dims=15041 #x3.85=(1,6,6,8,8,192)f32 #15042=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_235 1 1 15042 15043 memory_format=torch.contiguous_format $input=15042 #15042=(1,6,8,6,8,192)f32 #15043=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_4813 0 1 15046 expr=[4,4] pnnx.Expression pnnx_expr_4810 0 1 15047 expr=[1,2] Tensor.view Tensor.view_1844 2 1 15043 15044 shifted_x.79 $input=15043 $shape=15044 #15043=(1,6,8,6,8,192)f32 #shifted_x.79=(1,48,48,192)f32 pnnx.Expression pnnx_expr_4807 2 1 14896 14094 15051 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #14896=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.roll torch.roll_2497 3 1 shifted_x.79 15046 15047 x4.85 $input=shifted_x.79 $shifts=15046 $dims=15047 #shifted_x.79=(1,48,48,192)f32 #x4.85=(1,48,48,192)f32 Tensor.view Tensor.view_1845 2 1 x4.85 15051 x5.79 $input=x4.85 $shape=15051 #x4.85=(1,48,48,192)f32 #x5.79=(1,2304,192)f32 pnnx.Expression pnnx_expr_4805 2 1 14896 x5.79 input.355 expr=add(@0,@1) #14896=(1,2304,192)f32 #x5.79=(1,2304,192)f32 #input.355=(1,2304,192)f32 nn.LayerNorm layers_mmsa.0.residual_group.blocks.5.norm2 1 1 input.355 15055 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.355=(1,2304,192)f32 #15055=(1,2304,192)f32 nn.Linear layers_mmsa.0.residual_group.blocks.5.mlp.fc1 1 1 15055 15060 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #15055=(1,2304,192)f32 #15060=(1,2304,384)f32 nn.GELU layers_mmsa.0.residual_group.blocks.5.mlp.act 1 1 15060 15061 #15060=(1,2304,384)f32 #15061=(1,2304,384)f32 nn.Dropout layers_mmsa.0.residual_group.blocks.5.mlp.drop 1 1 15061 15062 #15061=(1,2304,384)f32 #15062=(1,2304,384)f32 nn.Linear layers_mmsa.0.residual_group.blocks.5.mlp.fc2 1 1 15062 15063 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #15062=(1,2304,384)f32 #15063=(1,2304,192)f32 nn.Dropout pnnx_unique_616 1 1 15063 15064 #15063=(1,2304,192)f32 #15064=(1,2304,192)f32 pnnx.Expression pnnx_expr_4803 2 1 input.355 15064 15066 expr=add(@0,@1) #input.355=(1,2304,192)f32 #15064=(1,2304,192)f32 #15066=(1,2304,192)f32 pnnx.Expression pnnx_expr_4801 0 1 15068 expr=1 pnnx.Expression pnnx_expr_4800 0 1 15069 expr=2 pnnx.Expression pnnx_expr_4795 2 1 15066 14094 15075 expr=[int(size(@0,0)),192,int(size(@1,2)),int(size(@1,3))] #15066=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.transpose torch.transpose_3145 3 1 15066 15068 15069 15074 $input=15066 $dim0=15068 $dim1=15069 #15066=(1,2304,192)f32 #15074=(1,192,2304)f32 Tensor.view Tensor.view_1846 2 1 15074 15075 input.357 $input=15074 $shape=15075 #15074=(1,192,2304)f32 #input.357=(1,192,48,48)f32 nn.Conv2d layers_mmsa.0.conv 1 1 input.357 15077 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(192)f32 @weight=(192,192,3,3)f32 #input.357=(1,192,48,48)f32 #15077=(1,192,48,48)f32 pnnx.Expression pnnx_expr_4794 0 1 15078 expr=-1 pnnx.Expression pnnx_expr_4793 0 1 15079 expr=2 pnnx.Expression pnnx_expr_4792 0 1 15080 expr=1 pnnx.Expression pnnx_expr_4791 0 1 25003 expr=2 torch.flatten torch.flatten_2198 3 1 15077 15079 15078 15081 $input=15077 $start_dim=15079 $end_dim=15078 #15077=(1,192,48,48)f32 #15081=(1,192,2304)f32 torch.transpose torch.transpose_3146 3 1 15081 15080 25003 15082 $input=15081 $dim0=15080 $dim1=25003 #15081=(1,192,2304)f32 #15082=(1,2304,192)f32 pnnx.Expression pnnx_expr_4790 2 1 15082 2016 15083 expr=add(@0,@1) #15082=(1,2304,192)f32 #2016=(1,2304,192)f32 #15083=(1,2304,192)f32 nn.LayerNorm layers_mmsa.1.residual_group.blocks.0.norm1 1 1 15083 15127 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #15083=(1,2304,192)f32 #15127=(1,2304,192)f32 pnnx.Expression pnnx_expr_4767 2 1 15083 14094 15128 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #15083=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_1847 2 1 15127 15128 x.159 $input=15127 $shape=15128 #15127=(1,2304,192)f32 #x.159=(1,48,48,192)f32 pnnx.Expression pnnx_expr_4746 1 1 x.159 15145 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.159=(1,48,48,192)f32 pnnx.Expression pnnx_expr_4741 0 1 15147 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1848 2 1 x.159 15145 x0.87 $input=x.159 $shape=15145 #x.159=(1,48,48,192)f32 #x0.87=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_4738 1 1 x.159 15150 expr=[-1,8,8,int(size(@0,3))] #x.159=(1,48,48,192)f32 torch.permute torch.permute_2840 2 1 x0.87 15147 15148 $input=x0.87 $dims=15147 #x0.87=(1,6,8,6,8,192)f32 #15148=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_236 1 1 15148 15149 memory_format=torch.contiguous_format $input=15148 #15148=(1,6,6,8,8,192)f32 #15149=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_4736 1 1 15083 15152 expr=[-1,64,int(size(@0,2))] #15083=(1,2304,192)f32 pnnx.Expression pnnx_expr_4726 0 1 15163 expr=-2 pnnx.Expression pnnx_expr_4725 0 1 15164 expr=-1 pnnx.Attribute layers_mmsa.1.residual_group.blocks.0.attn 0 1 relative_position_bias_table.159 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.159=(225,6)f32 pnnx.Attribute pnnx_unique_617 0 1 relative_position_index.159 @relative_position_index=(64,64)i64 #relative_position_index.159=(64,64)i64 Tensor.view Tensor.view_1849 2 1 15149 15150 x_windows.159 $input=15149 $shape=15150 #15149=(1,6,6,8,8,192)f32 #x_windows.159=(36,8,8,192)f32 Tensor.view Tensor.view_1850 2 1 x_windows.159 15152 x1.87 $input=x_windows.159 $shape=15152 #x_windows.159=(36,8,8,192)f32 #x1.87=(36,64,192)f32 nn.Linear layers_mmsa.1.residual_group.blocks.0.attn.qkv 1 1 x1.87 15184 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.87=(36,64,192)f32 #15184=(36,64,576)f32 pnnx.Expression pnnx_expr_4710 1 1 x1.87 15187 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.87=(36,64,192)f32 pnnx.Expression pnnx_expr_4705 0 1 15189 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_590 2 1 15184 15187 15188 $input=15184 $shape=15187 #15184=(36,64,576)f32 #15188=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_4704 0 1 25021 expr=0 pnnx.Expression pnnx_expr_4703 0 1 25022 expr=0 pnnx.Expression pnnx_expr_4702 0 1 25023 expr=0 pnnx.Expression pnnx_expr_4701 0 1 25024 expr=1 pnnx.Expression pnnx_expr_4700 0 1 25025 expr=0 pnnx.Expression pnnx_expr_4699 0 1 25026 expr=2 torch.permute torch.permute_2841 2 1 15188 15189 qkv0.87 $input=15188 $dims=15189 #15188=(36,64,3,6,32)f32 #qkv0.87=(3,36,6,64,32)f32 Tensor.select Tensor.select_884 3 1 qkv0.87 25021 25022 q.159 $input=qkv0.87 $dim=25021 $index=25022 #qkv0.87=(3,36,6,64,32)f32 #q.159=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4698 1 1 q.159 q0.87 expr=mul(@0,1.767767e-01) #q.159=(36,6,64,32)f32 #q0.87=(36,6,64,32)f32 Tensor.select Tensor.select_885 3 1 qkv0.87 25023 25024 k.159 $input=qkv0.87 $dim=25023 $index=25024 #qkv0.87=(3,36,6,64,32)f32 #k.159=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4696 0 1 15197 expr=[-1] Tensor.view Tensor.view_1851 2 1 relative_position_index.159 15197 15198 $input=relative_position_index.159 $shape=15197 #relative_position_index.159=(64,64)i64 #15198=(4096)i64 pnnx.Expression pnnx_expr_4695 1 1 15198 15199 expr=[@0] #15198=(4096)i64 pnnx.Expression pnnx_expr_4692 0 1 15201 expr=[64,64,-1] Tensor.index Tensor.index_404 2 1 relative_position_bias_table.159 15199 15200 $input=relative_position_bias_table.159 $expr=15199 #relative_position_bias_table.159=(225,6)f32 #15200=(4096,6)f32 pnnx.Expression pnnx_expr_4688 0 1 15203 expr=[2,0,1] Tensor.view Tensor.view_1852 2 1 15200 15201 relative_position_bias.159 $input=15200 $shape=15201 #15200=(4096,6)f32 #relative_position_bias.159=(64,64,6)f32 pnnx.Expression pnnx_expr_4687 0 1 25034 expr=0 torch.permute torch.permute_2842 2 1 relative_position_bias.159 15203 15204 $input=relative_position_bias.159 $dims=15203 #relative_position_bias.159=(64,64,6)f32 #15204=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_237 1 1 15204 relative_position_bias0.87 memory_format=torch.contiguous_format $input=15204 #15204=(6,64,64)f32 #relative_position_bias0.87=(6,64,64)f32 torch.transpose torch.transpose_3147 3 1 k.159 15163 15164 15195 $input=k.159 $dim0=15163 $dim1=15164 #k.159=(36,6,64,32)f32 #15195=(36,6,32,64)f32 torch.matmul torch.matmul_2360 2 1 q0.87 15195 attn.319 $input=q0.87 $other=15195 #q0.87=(36,6,64,32)f32 #15195=(36,6,32,64)f32 #attn.319=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3374 2 1 relative_position_bias0.87 25034 15206 $input=relative_position_bias0.87 $dim=25034 #relative_position_bias0.87=(6,64,64)f32 #15206=(1,6,64,64)f32 pnnx.Expression pnnx_expr_4685 2 1 attn.319 15206 input.359 expr=add(@0,@1) #attn.319=(36,6,64,64)f32 #15206=(1,6,64,64)f32 #input.359=(36,6,64,64)f32 nn.Softmax layers_mmsa.1.residual_group.blocks.0.attn.softmax 1 1 input.359 15208 dim=-1 #input.359=(36,6,64,64)f32 #15208=(36,6,64,64)f32 nn.Dropout layers_mmsa.1.residual_group.blocks.0.attn.attn_drop 1 1 15208 15209 #15208=(36,6,64,64)f32 #15209=(36,6,64,64)f32 Tensor.select Tensor.select_886 3 1 qkv0.87 25025 25026 v.159 $input=qkv0.87 $dim=25025 $index=25026 #qkv0.87=(3,36,6,64,32)f32 #v.159=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4684 0 1 25036 expr=1 pnnx.Expression pnnx_expr_4683 0 1 25037 expr=2 torch.matmul torch.matmul_2361 2 1 15209 v.159 15210 $input=15209 $other=v.159 #15209=(36,6,64,64)f32 #v.159=(36,6,64,32)f32 #15210=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4682 1 1 x1.87 15212 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.87=(36,64,192)f32 torch.transpose torch.transpose_3148 3 1 15210 25036 25037 15211 $input=15210 $dim0=25036 $dim1=25037 #15210=(36,6,64,32)f32 #15211=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_591 2 1 15211 15212 input0.91 $input=15211 $shape=15212 #15211=(36,64,6,32)f32 #input0.91=(36,64,192)f32 nn.Linear layers_mmsa.1.residual_group.blocks.0.attn.proj 1 1 input0.91 15214 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.91=(36,64,192)f32 #15214=(36,64,192)f32 nn.Dropout layers_mmsa.1.residual_group.blocks.0.attn.proj_drop 1 1 15214 15215 #15214=(36,64,192)f32 #15215=(36,64,192)f32 pnnx.Expression pnnx_expr_4678 1 1 15083 15216 expr=[-1,8,8,int(size(@0,2))] #15083=(1,2304,192)f32 pnnx.Expression pnnx_expr_4665 1 1 14094 15222 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_4658 0 1 15224 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1853 2 1 15215 15216 windows.159 $input=15215 $shape=15216 #15215=(36,64,192)f32 #windows.159=(36,8,8,192)f32 Tensor.view Tensor.view_1854 2 1 windows.159 15222 x2.87 $input=windows.159 $shape=15222 #windows.159=(36,8,8,192)f32 #x2.87=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_4655 1 1 14094 15227 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2843 2 1 x2.87 15224 15225 $input=x2.87 $dims=15224 #x2.87=(1,6,6,8,8,192)f32 #15225=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_238 1 1 15225 15226 memory_format=torch.contiguous_format $input=15225 #15225=(1,6,8,6,8,192)f32 #15226=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_4652 2 1 15083 14094 15231 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #15083=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_1855 2 1 15226 15227 x3.87 $input=15226 $shape=15227 #15226=(1,6,8,6,8,192)f32 #x3.87=(1,48,48,192)f32 Tensor.view Tensor.view_1856 2 1 x3.87 15231 x4.87 $input=x3.87 $shape=15231 #x3.87=(1,48,48,192)f32 #x4.87=(1,2304,192)f32 pnnx.Expression pnnx_expr_4650 2 1 15083 x4.87 input.361 expr=add(@0,@1) #15083=(1,2304,192)f32 #x4.87=(1,2304,192)f32 #input.361=(1,2304,192)f32 nn.LayerNorm layers_mmsa.1.residual_group.blocks.0.norm2 1 1 input.361 15235 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.361=(1,2304,192)f32 #15235=(1,2304,192)f32 nn.Linear layers_mmsa.1.residual_group.blocks.0.mlp.fc1 1 1 15235 15240 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #15235=(1,2304,192)f32 #15240=(1,2304,384)f32 nn.GELU layers_mmsa.1.residual_group.blocks.0.mlp.act 1 1 15240 15241 #15240=(1,2304,384)f32 #15241=(1,2304,384)f32 nn.Dropout layers_mmsa.1.residual_group.blocks.0.mlp.drop 1 1 15241 15242 #15241=(1,2304,384)f32 #15242=(1,2304,384)f32 nn.Linear layers_mmsa.1.residual_group.blocks.0.mlp.fc2 1 1 15242 15243 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #15242=(1,2304,384)f32 #15243=(1,2304,192)f32 nn.Dropout pnnx_unique_618 1 1 15243 15244 #15243=(1,2304,192)f32 #15244=(1,2304,192)f32 pnnx.Expression pnnx_expr_4648 2 1 input.361 15244 15246 expr=add(@0,@1) #input.361=(1,2304,192)f32 #15244=(1,2304,192)f32 #15246=(1,2304,192)f32 pnnx.Attribute layers_mmsa.1.residual_group.blocks.1 0 1 attn_mask.81 @attn_mask=(36,64,64)f32 #attn_mask.81=(36,64,64)f32 nn.LayerNorm layers_mmsa.1.residual_group.blocks.1.norm1 1 1 15246 15275 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #15246=(1,2304,192)f32 #15275=(1,2304,192)f32 pnnx.Expression pnnx_expr_4625 2 1 15246 14094 15276 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #15246=(1,2304,192)f32 #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_4623 0 1 15278 expr=[-4,-4] pnnx.Expression pnnx_expr_4621 0 1 15279 expr=[1,2] Tensor.view Tensor.view_1857 2 1 15275 15276 x.161 $input=15275 $shape=15276 #15275=(1,2304,192)f32 #x.161=(1,48,48,192)f32 torch.roll torch.roll_2498 3 1 x.161 15278 15279 x0.89 $input=x.161 $shifts=15278 $dims=15279 #x.161=(1,48,48,192)f32 #x0.89=(1,48,48,192)f32 pnnx.Expression pnnx_expr_4599 1 1 x0.89 15296 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.89=(1,48,48,192)f32 pnnx.Expression pnnx_expr_4594 0 1 15298 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1858 2 1 x0.89 15296 x1.89 $input=x0.89 $shape=15296 #x0.89=(1,48,48,192)f32 #x1.89=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_4591 1 1 x0.89 15301 expr=[-1,8,8,int(size(@0,3))] #x0.89=(1,48,48,192)f32 torch.permute torch.permute_2844 2 1 x1.89 15298 15299 $input=x1.89 $dims=15298 #x1.89=(1,6,8,6,8,192)f32 #15299=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_239 1 1 15299 15300 memory_format=torch.contiguous_format $input=15299 #15299=(1,6,6,8,8,192)f32 #15300=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_4589 1 1 15246 15303 expr=[-1,64,int(size(@0,2))] #15246=(1,2304,192)f32 pnnx.Expression pnnx_expr_4579 0 1 15314 expr=-2 pnnx.Expression pnnx_expr_4578 0 1 15315 expr=-1 pnnx.Attribute layers_mmsa.1.residual_group.blocks.1.attn 0 1 relative_position_bias_table.161 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.161=(225,6)f32 pnnx.Attribute pnnx_unique_619 0 1 relative_position_index.161 @relative_position_index=(64,64)i64 #relative_position_index.161=(64,64)i64 Tensor.view Tensor.view_1859 2 1 15300 15301 x_windows.161 $input=15300 $shape=15301 #15300=(1,6,6,8,8,192)f32 #x_windows.161=(36,8,8,192)f32 Tensor.view Tensor.view_1860 2 1 x_windows.161 15303 x2.89 $input=x_windows.161 $shape=15303 #x_windows.161=(36,8,8,192)f32 #x2.89=(36,64,192)f32 nn.Linear layers_mmsa.1.residual_group.blocks.1.attn.qkv 1 1 x2.89 15339 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.89=(36,64,192)f32 #15339=(36,64,576)f32 pnnx.Expression pnnx_expr_4559 1 1 x2.89 15342 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.89=(36,64,192)f32 pnnx.Expression pnnx_expr_4554 0 1 15344 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_592 2 1 15339 15342 15343 $input=15339 $shape=15342 #15339=(36,64,576)f32 #15343=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_4553 0 1 25080 expr=0 pnnx.Expression pnnx_expr_4552 0 1 25081 expr=0 pnnx.Expression pnnx_expr_4551 0 1 25082 expr=0 pnnx.Expression pnnx_expr_4550 0 1 25083 expr=1 pnnx.Expression pnnx_expr_4549 0 1 25084 expr=0 pnnx.Expression pnnx_expr_4548 0 1 25085 expr=2 torch.permute torch.permute_2845 2 1 15343 15344 qkv0.89 $input=15343 $dims=15344 #15343=(36,64,3,6,32)f32 #qkv0.89=(3,36,6,64,32)f32 Tensor.select Tensor.select_887 3 1 qkv0.89 25080 25081 q.161 $input=qkv0.89 $dim=25080 $index=25081 #qkv0.89=(3,36,6,64,32)f32 #q.161=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4547 1 1 q.161 q0.89 expr=mul(@0,1.767767e-01) #q.161=(36,6,64,32)f32 #q0.89=(36,6,64,32)f32 Tensor.select Tensor.select_888 3 1 qkv0.89 25082 25083 k.161 $input=qkv0.89 $dim=25082 $index=25083 #qkv0.89=(3,36,6,64,32)f32 #k.161=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4545 0 1 15352 expr=[-1] Tensor.view Tensor.view_1861 2 1 relative_position_index.161 15352 15353 $input=relative_position_index.161 $shape=15352 #relative_position_index.161=(64,64)i64 #15353=(4096)i64 pnnx.Expression pnnx_expr_4544 1 1 15353 15354 expr=[@0] #15353=(4096)i64 pnnx.Expression pnnx_expr_4541 0 1 15356 expr=[64,64,-1] Tensor.index Tensor.index_405 2 1 relative_position_bias_table.161 15354 15355 $input=relative_position_bias_table.161 $expr=15354 #relative_position_bias_table.161=(225,6)f32 #15355=(4096,6)f32 pnnx.Expression pnnx_expr_4537 0 1 15358 expr=[2,0,1] Tensor.view Tensor.view_1862 2 1 15355 15356 relative_position_bias.161 $input=15355 $shape=15356 #15355=(4096,6)f32 #relative_position_bias.161=(64,64,6)f32 pnnx.Expression pnnx_expr_4536 0 1 25093 expr=0 torch.permute torch.permute_2846 2 1 relative_position_bias.161 15358 15359 $input=relative_position_bias.161 $dims=15358 #relative_position_bias.161=(64,64,6)f32 #15359=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_240 1 1 15359 relative_position_bias0.89 memory_format=torch.contiguous_format $input=15359 #15359=(6,64,64)f32 #relative_position_bias0.89=(6,64,64)f32 torch.transpose torch.transpose_3149 3 1 k.161 15314 15315 15350 $input=k.161 $dim0=15314 $dim1=15315 #k.161=(36,6,64,32)f32 #15350=(36,6,32,64)f32 torch.matmul torch.matmul_2362 2 1 q0.89 15350 attn.323 $input=q0.89 $other=15350 #q0.89=(36,6,64,32)f32 #15350=(36,6,32,64)f32 #attn.323=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3375 2 1 relative_position_bias0.89 25093 15361 $input=relative_position_bias0.89 $dim=25093 #relative_position_bias0.89=(6,64,64)f32 #15361=(1,6,64,64)f32 pnnx.Expression pnnx_expr_4534 2 1 attn.323 15361 attn0.45 expr=add(@0,@1) #attn.323=(36,6,64,64)f32 #15361=(1,6,64,64)f32 #attn0.45=(36,6,64,64)f32 pnnx.Expression pnnx_expr_4525 2 1 x2.89 attn_mask.81 15368 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.89=(36,64,192)f32 #attn_mask.81=(36,64,64)f32 pnnx.Expression pnnx_expr_4524 0 1 25098 expr=1 pnnx.Expression pnnx_expr_4523 0 1 25099 expr=0 Tensor.view Tensor.view_1863 2 1 attn0.45 15368 15369 $input=attn0.45 $shape=15368 #attn0.45=(36,6,64,64)f32 #15369=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3376 2 1 attn_mask.81 25098 15370 $input=attn_mask.81 $dim=25098 #attn_mask.81=(36,64,64)f32 #15370=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3377 2 1 15370 25099 15371 $input=15370 $dim=25099 #15370=(36,1,64,64)f32 #15371=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_4521 2 1 15369 15371 attn1.45 expr=add(@0,@1) #15369=(1,36,6,64,64)f32 #15371=(1,36,1,64,64)f32 #attn1.45=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_4518 1 1 x2.89 15373 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.89=(36,64,192)f32 Tensor.view Tensor.view_1864 2 1 attn1.45 15373 input.363 $input=attn1.45 $shape=15373 #attn1.45=(1,36,6,64,64)f32 #input.363=(36,6,64,64)f32 nn.Softmax layers_mmsa.1.residual_group.blocks.1.attn.softmax 1 1 input.363 15375 dim=-1 #input.363=(36,6,64,64)f32 #15375=(36,6,64,64)f32 nn.Dropout layers_mmsa.1.residual_group.blocks.1.attn.attn_drop 1 1 15375 15376 #15375=(36,6,64,64)f32 #15376=(36,6,64,64)f32 Tensor.select Tensor.select_889 3 1 qkv0.89 25084 25085 v.161 $input=qkv0.89 $dim=25084 $index=25085 #qkv0.89=(3,36,6,64,32)f32 #v.161=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4517 0 1 25103 expr=1 pnnx.Expression pnnx_expr_4516 0 1 25104 expr=2 torch.matmul torch.matmul_2363 2 1 15376 v.161 15377 $input=15376 $other=v.161 #15376=(36,6,64,64)f32 #v.161=(36,6,64,32)f32 #15377=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4515 1 1 x2.89 15379 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.89=(36,64,192)f32 torch.transpose torch.transpose_3150 3 1 15377 25103 25104 15378 $input=15377 $dim0=25103 $dim1=25104 #15377=(36,6,64,32)f32 #15378=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_593 2 1 15378 15379 input0.93 $input=15378 $shape=15379 #15378=(36,64,6,32)f32 #input0.93=(36,64,192)f32 nn.Linear layers_mmsa.1.residual_group.blocks.1.attn.proj 1 1 input0.93 15381 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.93=(36,64,192)f32 #15381=(36,64,192)f32 nn.Dropout layers_mmsa.1.residual_group.blocks.1.attn.proj_drop 1 1 15381 15382 #15381=(36,64,192)f32 #15382=(36,64,192)f32 pnnx.Expression pnnx_expr_4511 1 1 15246 15383 expr=[-1,8,8,int(size(@0,2))] #15246=(1,2304,192)f32 pnnx.Expression pnnx_expr_4498 1 1 14094 15389 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_4491 0 1 15391 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1865 2 1 15382 15383 windows.161 $input=15382 $shape=15383 #15382=(36,64,192)f32 #windows.161=(36,8,8,192)f32 Tensor.view Tensor.view_1866 2 1 windows.161 15389 x3.89 $input=windows.161 $shape=15389 #windows.161=(36,8,8,192)f32 #x3.89=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_4488 1 1 14094 15394 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2847 2 1 x3.89 15391 15392 $input=x3.89 $dims=15391 #x3.89=(1,6,6,8,8,192)f32 #15392=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_241 1 1 15392 15393 memory_format=torch.contiguous_format $input=15392 #15392=(1,6,8,6,8,192)f32 #15393=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_4485 0 1 15396 expr=[4,4] pnnx.Expression pnnx_expr_4482 0 1 15397 expr=[1,2] Tensor.view Tensor.view_1867 2 1 15393 15394 shifted_x.81 $input=15393 $shape=15394 #15393=(1,6,8,6,8,192)f32 #shifted_x.81=(1,48,48,192)f32 pnnx.Expression pnnx_expr_4479 2 1 15246 14094 15401 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #15246=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.roll torch.roll_2499 3 1 shifted_x.81 15396 15397 x4.89 $input=shifted_x.81 $shifts=15396 $dims=15397 #shifted_x.81=(1,48,48,192)f32 #x4.89=(1,48,48,192)f32 Tensor.view Tensor.view_1868 2 1 x4.89 15401 x5.81 $input=x4.89 $shape=15401 #x4.89=(1,48,48,192)f32 #x5.81=(1,2304,192)f32 pnnx.Expression pnnx_expr_4477 2 1 15246 x5.81 input.365 expr=add(@0,@1) #15246=(1,2304,192)f32 #x5.81=(1,2304,192)f32 #input.365=(1,2304,192)f32 nn.LayerNorm layers_mmsa.1.residual_group.blocks.1.norm2 1 1 input.365 15405 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.365=(1,2304,192)f32 #15405=(1,2304,192)f32 nn.Linear layers_mmsa.1.residual_group.blocks.1.mlp.fc1 1 1 15405 15410 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #15405=(1,2304,192)f32 #15410=(1,2304,384)f32 nn.GELU layers_mmsa.1.residual_group.blocks.1.mlp.act 1 1 15410 15411 #15410=(1,2304,384)f32 #15411=(1,2304,384)f32 nn.Dropout layers_mmsa.1.residual_group.blocks.1.mlp.drop 1 1 15411 15412 #15411=(1,2304,384)f32 #15412=(1,2304,384)f32 nn.Linear layers_mmsa.1.residual_group.blocks.1.mlp.fc2 1 1 15412 15413 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #15412=(1,2304,384)f32 #15413=(1,2304,192)f32 nn.Dropout pnnx_unique_620 1 1 15413 15414 #15413=(1,2304,192)f32 #15414=(1,2304,192)f32 pnnx.Expression pnnx_expr_4475 2 1 input.365 15414 15416 expr=add(@0,@1) #input.365=(1,2304,192)f32 #15414=(1,2304,192)f32 #15416=(1,2304,192)f32 nn.LayerNorm layers_mmsa.1.residual_group.blocks.2.norm1 1 1 15416 15443 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #15416=(1,2304,192)f32 #15443=(1,2304,192)f32 pnnx.Expression pnnx_expr_4453 2 1 15416 14094 15444 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #15416=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_1869 2 1 15443 15444 x.163 $input=15443 $shape=15444 #15443=(1,2304,192)f32 #x.163=(1,48,48,192)f32 pnnx.Expression pnnx_expr_4432 1 1 x.163 15461 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.163=(1,48,48,192)f32 pnnx.Expression pnnx_expr_4427 0 1 15463 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1870 2 1 x.163 15461 x0.91 $input=x.163 $shape=15461 #x.163=(1,48,48,192)f32 #x0.91=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_4424 1 1 x.163 15466 expr=[-1,8,8,int(size(@0,3))] #x.163=(1,48,48,192)f32 torch.permute torch.permute_2848 2 1 x0.91 15463 15464 $input=x0.91 $dims=15463 #x0.91=(1,6,8,6,8,192)f32 #15464=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_242 1 1 15464 15465 memory_format=torch.contiguous_format $input=15464 #15464=(1,6,6,8,8,192)f32 #15465=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_4422 1 1 15416 15468 expr=[-1,64,int(size(@0,2))] #15416=(1,2304,192)f32 pnnx.Expression pnnx_expr_4412 0 1 15479 expr=-2 pnnx.Expression pnnx_expr_4411 0 1 15480 expr=-1 pnnx.Attribute layers_mmsa.1.residual_group.blocks.2.attn 0 1 relative_position_bias_table.163 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.163=(225,6)f32 pnnx.Attribute pnnx_unique_621 0 1 relative_position_index.163 @relative_position_index=(64,64)i64 #relative_position_index.163=(64,64)i64 Tensor.view Tensor.view_1871 2 1 15465 15466 x_windows.163 $input=15465 $shape=15466 #15465=(1,6,6,8,8,192)f32 #x_windows.163=(36,8,8,192)f32 Tensor.view Tensor.view_1872 2 1 x_windows.163 15468 x1.91 $input=x_windows.163 $shape=15468 #x_windows.163=(36,8,8,192)f32 #x1.91=(36,64,192)f32 nn.Linear layers_mmsa.1.residual_group.blocks.2.attn.qkv 1 1 x1.91 15500 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.91=(36,64,192)f32 #15500=(36,64,576)f32 pnnx.Expression pnnx_expr_4396 1 1 x1.91 15503 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.91=(36,64,192)f32 pnnx.Expression pnnx_expr_4391 0 1 15505 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_594 2 1 15500 15503 15504 $input=15500 $shape=15503 #15500=(36,64,576)f32 #15504=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_4390 0 1 25148 expr=0 pnnx.Expression pnnx_expr_4389 0 1 25149 expr=0 pnnx.Expression pnnx_expr_4388 0 1 25150 expr=0 pnnx.Expression pnnx_expr_4387 0 1 25151 expr=1 pnnx.Expression pnnx_expr_4386 0 1 25152 expr=0 pnnx.Expression pnnx_expr_4385 0 1 25153 expr=2 torch.permute torch.permute_2849 2 1 15504 15505 qkv0.91 $input=15504 $dims=15505 #15504=(36,64,3,6,32)f32 #qkv0.91=(3,36,6,64,32)f32 Tensor.select Tensor.select_890 3 1 qkv0.91 25148 25149 q.163 $input=qkv0.91 $dim=25148 $index=25149 #qkv0.91=(3,36,6,64,32)f32 #q.163=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4384 1 1 q.163 q0.91 expr=mul(@0,1.767767e-01) #q.163=(36,6,64,32)f32 #q0.91=(36,6,64,32)f32 Tensor.select Tensor.select_891 3 1 qkv0.91 25150 25151 k.163 $input=qkv0.91 $dim=25150 $index=25151 #qkv0.91=(3,36,6,64,32)f32 #k.163=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4382 0 1 15513 expr=[-1] Tensor.view Tensor.view_1873 2 1 relative_position_index.163 15513 15514 $input=relative_position_index.163 $shape=15513 #relative_position_index.163=(64,64)i64 #15514=(4096)i64 pnnx.Expression pnnx_expr_4381 1 1 15514 15515 expr=[@0] #15514=(4096)i64 pnnx.Expression pnnx_expr_4378 0 1 15517 expr=[64,64,-1] Tensor.index Tensor.index_406 2 1 relative_position_bias_table.163 15515 15516 $input=relative_position_bias_table.163 $expr=15515 #relative_position_bias_table.163=(225,6)f32 #15516=(4096,6)f32 pnnx.Expression pnnx_expr_4374 0 1 15519 expr=[2,0,1] Tensor.view Tensor.view_1874 2 1 15516 15517 relative_position_bias.163 $input=15516 $shape=15517 #15516=(4096,6)f32 #relative_position_bias.163=(64,64,6)f32 pnnx.Expression pnnx_expr_4373 0 1 25161 expr=0 torch.permute torch.permute_2850 2 1 relative_position_bias.163 15519 15520 $input=relative_position_bias.163 $dims=15519 #relative_position_bias.163=(64,64,6)f32 #15520=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_243 1 1 15520 relative_position_bias0.91 memory_format=torch.contiguous_format $input=15520 #15520=(6,64,64)f32 #relative_position_bias0.91=(6,64,64)f32 torch.transpose torch.transpose_3151 3 1 k.163 15479 15480 15511 $input=k.163 $dim0=15479 $dim1=15480 #k.163=(36,6,64,32)f32 #15511=(36,6,32,64)f32 torch.matmul torch.matmul_2364 2 1 q0.91 15511 attn.327 $input=q0.91 $other=15511 #q0.91=(36,6,64,32)f32 #15511=(36,6,32,64)f32 #attn.327=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3378 2 1 relative_position_bias0.91 25161 15522 $input=relative_position_bias0.91 $dim=25161 #relative_position_bias0.91=(6,64,64)f32 #15522=(1,6,64,64)f32 pnnx.Expression pnnx_expr_4371 2 1 attn.327 15522 input.367 expr=add(@0,@1) #attn.327=(36,6,64,64)f32 #15522=(1,6,64,64)f32 #input.367=(36,6,64,64)f32 nn.Softmax layers_mmsa.1.residual_group.blocks.2.attn.softmax 1 1 input.367 15524 dim=-1 #input.367=(36,6,64,64)f32 #15524=(36,6,64,64)f32 nn.Dropout layers_mmsa.1.residual_group.blocks.2.attn.attn_drop 1 1 15524 15525 #15524=(36,6,64,64)f32 #15525=(36,6,64,64)f32 Tensor.select Tensor.select_892 3 1 qkv0.91 25152 25153 v.163 $input=qkv0.91 $dim=25152 $index=25153 #qkv0.91=(3,36,6,64,32)f32 #v.163=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4370 0 1 25163 expr=1 pnnx.Expression pnnx_expr_4369 0 1 25164 expr=2 torch.matmul torch.matmul_2365 2 1 15525 v.163 15526 $input=15525 $other=v.163 #15525=(36,6,64,64)f32 #v.163=(36,6,64,32)f32 #15526=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4368 1 1 x1.91 15528 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.91=(36,64,192)f32 torch.transpose torch.transpose_3152 3 1 15526 25163 25164 15527 $input=15526 $dim0=25163 $dim1=25164 #15526=(36,6,64,32)f32 #15527=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_595 2 1 15527 15528 input0.95 $input=15527 $shape=15528 #15527=(36,64,6,32)f32 #input0.95=(36,64,192)f32 nn.Linear layers_mmsa.1.residual_group.blocks.2.attn.proj 1 1 input0.95 15530 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.95=(36,64,192)f32 #15530=(36,64,192)f32 nn.Dropout layers_mmsa.1.residual_group.blocks.2.attn.proj_drop 1 1 15530 15531 #15530=(36,64,192)f32 #15531=(36,64,192)f32 pnnx.Expression pnnx_expr_4364 1 1 15416 15532 expr=[-1,8,8,int(size(@0,2))] #15416=(1,2304,192)f32 pnnx.Expression pnnx_expr_4351 1 1 14094 15538 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_4344 0 1 15540 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1875 2 1 15531 15532 windows.163 $input=15531 $shape=15532 #15531=(36,64,192)f32 #windows.163=(36,8,8,192)f32 Tensor.view Tensor.view_1876 2 1 windows.163 15538 x2.91 $input=windows.163 $shape=15538 #windows.163=(36,8,8,192)f32 #x2.91=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_4341 1 1 14094 15543 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2851 2 1 x2.91 15540 15541 $input=x2.91 $dims=15540 #x2.91=(1,6,6,8,8,192)f32 #15541=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_244 1 1 15541 15542 memory_format=torch.contiguous_format $input=15541 #15541=(1,6,8,6,8,192)f32 #15542=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_4338 2 1 15416 14094 15547 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #15416=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_1877 2 1 15542 15543 x3.91 $input=15542 $shape=15543 #15542=(1,6,8,6,8,192)f32 #x3.91=(1,48,48,192)f32 Tensor.view Tensor.view_1878 2 1 x3.91 15547 x4.91 $input=x3.91 $shape=15547 #x3.91=(1,48,48,192)f32 #x4.91=(1,2304,192)f32 pnnx.Expression pnnx_expr_4336 2 1 15416 x4.91 input.369 expr=add(@0,@1) #15416=(1,2304,192)f32 #x4.91=(1,2304,192)f32 #input.369=(1,2304,192)f32 nn.LayerNorm layers_mmsa.1.residual_group.blocks.2.norm2 1 1 input.369 15551 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.369=(1,2304,192)f32 #15551=(1,2304,192)f32 nn.Linear layers_mmsa.1.residual_group.blocks.2.mlp.fc1 1 1 15551 15556 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #15551=(1,2304,192)f32 #15556=(1,2304,384)f32 nn.GELU layers_mmsa.1.residual_group.blocks.2.mlp.act 1 1 15556 15557 #15556=(1,2304,384)f32 #15557=(1,2304,384)f32 nn.Dropout layers_mmsa.1.residual_group.blocks.2.mlp.drop 1 1 15557 15558 #15557=(1,2304,384)f32 #15558=(1,2304,384)f32 nn.Linear layers_mmsa.1.residual_group.blocks.2.mlp.fc2 1 1 15558 15559 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #15558=(1,2304,384)f32 #15559=(1,2304,192)f32 nn.Dropout pnnx_unique_622 1 1 15559 15560 #15559=(1,2304,192)f32 #15560=(1,2304,192)f32 pnnx.Expression pnnx_expr_4334 2 1 input.369 15560 15562 expr=add(@0,@1) #input.369=(1,2304,192)f32 #15560=(1,2304,192)f32 #15562=(1,2304,192)f32 pnnx.Attribute layers_mmsa.1.residual_group.blocks.3 0 1 attn_mask.83 @attn_mask=(36,64,64)f32 #attn_mask.83=(36,64,64)f32 nn.LayerNorm layers_mmsa.1.residual_group.blocks.3.norm1 1 1 15562 15591 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #15562=(1,2304,192)f32 #15591=(1,2304,192)f32 pnnx.Expression pnnx_expr_4311 2 1 15562 14094 15592 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #15562=(1,2304,192)f32 #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_4309 0 1 15594 expr=[-4,-4] pnnx.Expression pnnx_expr_4307 0 1 15595 expr=[1,2] Tensor.view Tensor.view_1879 2 1 15591 15592 x.165 $input=15591 $shape=15592 #15591=(1,2304,192)f32 #x.165=(1,48,48,192)f32 torch.roll torch.roll_2500 3 1 x.165 15594 15595 x0.93 $input=x.165 $shifts=15594 $dims=15595 #x.165=(1,48,48,192)f32 #x0.93=(1,48,48,192)f32 pnnx.Expression pnnx_expr_4285 1 1 x0.93 15612 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.93=(1,48,48,192)f32 pnnx.Expression pnnx_expr_4280 0 1 15614 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1880 2 1 x0.93 15612 x1.93 $input=x0.93 $shape=15612 #x0.93=(1,48,48,192)f32 #x1.93=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_4277 1 1 x0.93 15617 expr=[-1,8,8,int(size(@0,3))] #x0.93=(1,48,48,192)f32 torch.permute torch.permute_2852 2 1 x1.93 15614 15615 $input=x1.93 $dims=15614 #x1.93=(1,6,8,6,8,192)f32 #15615=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_245 1 1 15615 15616 memory_format=torch.contiguous_format $input=15615 #15615=(1,6,6,8,8,192)f32 #15616=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_4275 1 1 15562 15619 expr=[-1,64,int(size(@0,2))] #15562=(1,2304,192)f32 pnnx.Expression pnnx_expr_4265 0 1 15630 expr=-2 pnnx.Expression pnnx_expr_4264 0 1 15631 expr=-1 pnnx.Attribute layers_mmsa.1.residual_group.blocks.3.attn 0 1 relative_position_bias_table.165 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.165=(225,6)f32 pnnx.Attribute pnnx_unique_623 0 1 relative_position_index.165 @relative_position_index=(64,64)i64 #relative_position_index.165=(64,64)i64 Tensor.view Tensor.view_1881 2 1 15616 15617 x_windows.165 $input=15616 $shape=15617 #15616=(1,6,6,8,8,192)f32 #x_windows.165=(36,8,8,192)f32 Tensor.view Tensor.view_1882 2 1 x_windows.165 15619 x2.93 $input=x_windows.165 $shape=15619 #x_windows.165=(36,8,8,192)f32 #x2.93=(36,64,192)f32 nn.Linear layers_mmsa.1.residual_group.blocks.3.attn.qkv 1 1 x2.93 15655 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.93=(36,64,192)f32 #15655=(36,64,576)f32 pnnx.Expression pnnx_expr_4245 1 1 x2.93 15658 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.93=(36,64,192)f32 pnnx.Expression pnnx_expr_4240 0 1 15660 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_596 2 1 15655 15658 15659 $input=15655 $shape=15658 #15655=(36,64,576)f32 #15659=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_4239 0 1 25207 expr=0 pnnx.Expression pnnx_expr_4238 0 1 25208 expr=0 pnnx.Expression pnnx_expr_4237 0 1 25209 expr=0 pnnx.Expression pnnx_expr_4236 0 1 25210 expr=1 pnnx.Expression pnnx_expr_4235 0 1 25211 expr=0 pnnx.Expression pnnx_expr_4234 0 1 25212 expr=2 torch.permute torch.permute_2853 2 1 15659 15660 qkv0.93 $input=15659 $dims=15660 #15659=(36,64,3,6,32)f32 #qkv0.93=(3,36,6,64,32)f32 Tensor.select Tensor.select_893 3 1 qkv0.93 25207 25208 q.165 $input=qkv0.93 $dim=25207 $index=25208 #qkv0.93=(3,36,6,64,32)f32 #q.165=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4233 1 1 q.165 q0.93 expr=mul(@0,1.767767e-01) #q.165=(36,6,64,32)f32 #q0.93=(36,6,64,32)f32 Tensor.select Tensor.select_894 3 1 qkv0.93 25209 25210 k.165 $input=qkv0.93 $dim=25209 $index=25210 #qkv0.93=(3,36,6,64,32)f32 #k.165=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4231 0 1 15668 expr=[-1] Tensor.view Tensor.view_1883 2 1 relative_position_index.165 15668 15669 $input=relative_position_index.165 $shape=15668 #relative_position_index.165=(64,64)i64 #15669=(4096)i64 pnnx.Expression pnnx_expr_4230 1 1 15669 15670 expr=[@0] #15669=(4096)i64 pnnx.Expression pnnx_expr_4227 0 1 15672 expr=[64,64,-1] Tensor.index Tensor.index_407 2 1 relative_position_bias_table.165 15670 15671 $input=relative_position_bias_table.165 $expr=15670 #relative_position_bias_table.165=(225,6)f32 #15671=(4096,6)f32 pnnx.Expression pnnx_expr_4223 0 1 15674 expr=[2,0,1] Tensor.view Tensor.view_1884 2 1 15671 15672 relative_position_bias.165 $input=15671 $shape=15672 #15671=(4096,6)f32 #relative_position_bias.165=(64,64,6)f32 pnnx.Expression pnnx_expr_4222 0 1 25220 expr=0 torch.permute torch.permute_2854 2 1 relative_position_bias.165 15674 15675 $input=relative_position_bias.165 $dims=15674 #relative_position_bias.165=(64,64,6)f32 #15675=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_246 1 1 15675 relative_position_bias0.93 memory_format=torch.contiguous_format $input=15675 #15675=(6,64,64)f32 #relative_position_bias0.93=(6,64,64)f32 torch.transpose torch.transpose_3153 3 1 k.165 15630 15631 15666 $input=k.165 $dim0=15630 $dim1=15631 #k.165=(36,6,64,32)f32 #15666=(36,6,32,64)f32 torch.matmul torch.matmul_2366 2 1 q0.93 15666 attn.331 $input=q0.93 $other=15666 #q0.93=(36,6,64,32)f32 #15666=(36,6,32,64)f32 #attn.331=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3379 2 1 relative_position_bias0.93 25220 15677 $input=relative_position_bias0.93 $dim=25220 #relative_position_bias0.93=(6,64,64)f32 #15677=(1,6,64,64)f32 pnnx.Expression pnnx_expr_4220 2 1 attn.331 15677 attn0.47 expr=add(@0,@1) #attn.331=(36,6,64,64)f32 #15677=(1,6,64,64)f32 #attn0.47=(36,6,64,64)f32 pnnx.Expression pnnx_expr_4211 2 1 x2.93 attn_mask.83 15684 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.93=(36,64,192)f32 #attn_mask.83=(36,64,64)f32 pnnx.Expression pnnx_expr_4210 0 1 25225 expr=1 pnnx.Expression pnnx_expr_4209 0 1 25226 expr=0 Tensor.view Tensor.view_1885 2 1 attn0.47 15684 15685 $input=attn0.47 $shape=15684 #attn0.47=(36,6,64,64)f32 #15685=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3380 2 1 attn_mask.83 25225 15686 $input=attn_mask.83 $dim=25225 #attn_mask.83=(36,64,64)f32 #15686=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3381 2 1 15686 25226 15687 $input=15686 $dim=25226 #15686=(36,1,64,64)f32 #15687=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_4207 2 1 15685 15687 attn1.47 expr=add(@0,@1) #15685=(1,36,6,64,64)f32 #15687=(1,36,1,64,64)f32 #attn1.47=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_4204 1 1 x2.93 15689 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.93=(36,64,192)f32 Tensor.view Tensor.view_1886 2 1 attn1.47 15689 input.371 $input=attn1.47 $shape=15689 #attn1.47=(1,36,6,64,64)f32 #input.371=(36,6,64,64)f32 nn.Softmax layers_mmsa.1.residual_group.blocks.3.attn.softmax 1 1 input.371 15691 dim=-1 #input.371=(36,6,64,64)f32 #15691=(36,6,64,64)f32 nn.Dropout layers_mmsa.1.residual_group.blocks.3.attn.attn_drop 1 1 15691 15692 #15691=(36,6,64,64)f32 #15692=(36,6,64,64)f32 Tensor.select Tensor.select_895 3 1 qkv0.93 25211 25212 v.165 $input=qkv0.93 $dim=25211 $index=25212 #qkv0.93=(3,36,6,64,32)f32 #v.165=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4203 0 1 25230 expr=1 pnnx.Expression pnnx_expr_4202 0 1 25231 expr=2 torch.matmul torch.matmul_2367 2 1 15692 v.165 15693 $input=15692 $other=v.165 #15692=(36,6,64,64)f32 #v.165=(36,6,64,32)f32 #15693=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4201 1 1 x2.93 15695 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.93=(36,64,192)f32 torch.transpose torch.transpose_3154 3 1 15693 25230 25231 15694 $input=15693 $dim0=25230 $dim1=25231 #15693=(36,6,64,32)f32 #15694=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_597 2 1 15694 15695 input0.97 $input=15694 $shape=15695 #15694=(36,64,6,32)f32 #input0.97=(36,64,192)f32 nn.Linear layers_mmsa.1.residual_group.blocks.3.attn.proj 1 1 input0.97 15697 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.97=(36,64,192)f32 #15697=(36,64,192)f32 nn.Dropout layers_mmsa.1.residual_group.blocks.3.attn.proj_drop 1 1 15697 15698 #15697=(36,64,192)f32 #15698=(36,64,192)f32 pnnx.Expression pnnx_expr_4197 1 1 15562 15699 expr=[-1,8,8,int(size(@0,2))] #15562=(1,2304,192)f32 pnnx.Expression pnnx_expr_4184 1 1 14094 15705 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_4177 0 1 15707 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1887 2 1 15698 15699 windows.165 $input=15698 $shape=15699 #15698=(36,64,192)f32 #windows.165=(36,8,8,192)f32 Tensor.view Tensor.view_1888 2 1 windows.165 15705 x3.93 $input=windows.165 $shape=15705 #windows.165=(36,8,8,192)f32 #x3.93=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_4174 1 1 14094 15710 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2855 2 1 x3.93 15707 15708 $input=x3.93 $dims=15707 #x3.93=(1,6,6,8,8,192)f32 #15708=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_247 1 1 15708 15709 memory_format=torch.contiguous_format $input=15708 #15708=(1,6,8,6,8,192)f32 #15709=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_4171 0 1 15712 expr=[4,4] pnnx.Expression pnnx_expr_4168 0 1 15713 expr=[1,2] Tensor.view Tensor.view_1889 2 1 15709 15710 shifted_x.83 $input=15709 $shape=15710 #15709=(1,6,8,6,8,192)f32 #shifted_x.83=(1,48,48,192)f32 pnnx.Expression pnnx_expr_4165 2 1 15562 14094 15717 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #15562=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.roll torch.roll_2501 3 1 shifted_x.83 15712 15713 x4.93 $input=shifted_x.83 $shifts=15712 $dims=15713 #shifted_x.83=(1,48,48,192)f32 #x4.93=(1,48,48,192)f32 Tensor.view Tensor.view_1890 2 1 x4.93 15717 x5.83 $input=x4.93 $shape=15717 #x4.93=(1,48,48,192)f32 #x5.83=(1,2304,192)f32 pnnx.Expression pnnx_expr_4163 2 1 15562 x5.83 input.373 expr=add(@0,@1) #15562=(1,2304,192)f32 #x5.83=(1,2304,192)f32 #input.373=(1,2304,192)f32 nn.LayerNorm layers_mmsa.1.residual_group.blocks.3.norm2 1 1 input.373 15721 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.373=(1,2304,192)f32 #15721=(1,2304,192)f32 nn.Linear layers_mmsa.1.residual_group.blocks.3.mlp.fc1 1 1 15721 15726 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #15721=(1,2304,192)f32 #15726=(1,2304,384)f32 nn.GELU layers_mmsa.1.residual_group.blocks.3.mlp.act 1 1 15726 15727 #15726=(1,2304,384)f32 #15727=(1,2304,384)f32 nn.Dropout layers_mmsa.1.residual_group.blocks.3.mlp.drop 1 1 15727 15728 #15727=(1,2304,384)f32 #15728=(1,2304,384)f32 nn.Linear layers_mmsa.1.residual_group.blocks.3.mlp.fc2 1 1 15728 15729 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #15728=(1,2304,384)f32 #15729=(1,2304,192)f32 nn.Dropout pnnx_unique_624 1 1 15729 15730 #15729=(1,2304,192)f32 #15730=(1,2304,192)f32 pnnx.Expression pnnx_expr_4161 2 1 input.373 15730 15732 expr=add(@0,@1) #input.373=(1,2304,192)f32 #15730=(1,2304,192)f32 #15732=(1,2304,192)f32 nn.LayerNorm layers_mmsa.1.residual_group.blocks.4.norm1 1 1 15732 15759 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #15732=(1,2304,192)f32 #15759=(1,2304,192)f32 pnnx.Expression pnnx_expr_4139 2 1 15732 14094 15760 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #15732=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_1891 2 1 15759 15760 x.167 $input=15759 $shape=15760 #15759=(1,2304,192)f32 #x.167=(1,48,48,192)f32 pnnx.Expression pnnx_expr_4118 1 1 x.167 15777 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.167=(1,48,48,192)f32 pnnx.Expression pnnx_expr_4113 0 1 15779 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1892 2 1 x.167 15777 x0.95 $input=x.167 $shape=15777 #x.167=(1,48,48,192)f32 #x0.95=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_4110 1 1 x.167 15782 expr=[-1,8,8,int(size(@0,3))] #x.167=(1,48,48,192)f32 torch.permute torch.permute_2856 2 1 x0.95 15779 15780 $input=x0.95 $dims=15779 #x0.95=(1,6,8,6,8,192)f32 #15780=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_248 1 1 15780 15781 memory_format=torch.contiguous_format $input=15780 #15780=(1,6,6,8,8,192)f32 #15781=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_4108 1 1 15732 15784 expr=[-1,64,int(size(@0,2))] #15732=(1,2304,192)f32 pnnx.Expression pnnx_expr_4098 0 1 15795 expr=-2 pnnx.Expression pnnx_expr_4097 0 1 15796 expr=-1 pnnx.Attribute layers_mmsa.1.residual_group.blocks.4.attn 0 1 relative_position_bias_table.167 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.167=(225,6)f32 pnnx.Attribute pnnx_unique_625 0 1 relative_position_index.167 @relative_position_index=(64,64)i64 #relative_position_index.167=(64,64)i64 Tensor.view Tensor.view_1893 2 1 15781 15782 x_windows.167 $input=15781 $shape=15782 #15781=(1,6,6,8,8,192)f32 #x_windows.167=(36,8,8,192)f32 Tensor.view Tensor.view_1894 2 1 x_windows.167 15784 x1.95 $input=x_windows.167 $shape=15784 #x_windows.167=(36,8,8,192)f32 #x1.95=(36,64,192)f32 nn.Linear layers_mmsa.1.residual_group.blocks.4.attn.qkv 1 1 x1.95 15816 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.95=(36,64,192)f32 #15816=(36,64,576)f32 pnnx.Expression pnnx_expr_4082 1 1 x1.95 15819 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.95=(36,64,192)f32 pnnx.Expression pnnx_expr_4077 0 1 15821 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_598 2 1 15816 15819 15820 $input=15816 $shape=15819 #15816=(36,64,576)f32 #15820=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_4076 0 1 25275 expr=0 pnnx.Expression pnnx_expr_4075 0 1 25276 expr=0 pnnx.Expression pnnx_expr_4074 0 1 25277 expr=0 pnnx.Expression pnnx_expr_4073 0 1 25278 expr=1 pnnx.Expression pnnx_expr_4072 0 1 25279 expr=0 pnnx.Expression pnnx_expr_4071 0 1 25280 expr=2 torch.permute torch.permute_2857 2 1 15820 15821 qkv0.95 $input=15820 $dims=15821 #15820=(36,64,3,6,32)f32 #qkv0.95=(3,36,6,64,32)f32 Tensor.select Tensor.select_896 3 1 qkv0.95 25275 25276 q.167 $input=qkv0.95 $dim=25275 $index=25276 #qkv0.95=(3,36,6,64,32)f32 #q.167=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4070 1 1 q.167 q0.95 expr=mul(@0,1.767767e-01) #q.167=(36,6,64,32)f32 #q0.95=(36,6,64,32)f32 Tensor.select Tensor.select_897 3 1 qkv0.95 25277 25278 k.167 $input=qkv0.95 $dim=25277 $index=25278 #qkv0.95=(3,36,6,64,32)f32 #k.167=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4068 0 1 15829 expr=[-1] Tensor.view Tensor.view_1895 2 1 relative_position_index.167 15829 15830 $input=relative_position_index.167 $shape=15829 #relative_position_index.167=(64,64)i64 #15830=(4096)i64 pnnx.Expression pnnx_expr_4067 1 1 15830 15831 expr=[@0] #15830=(4096)i64 pnnx.Expression pnnx_expr_4064 0 1 15833 expr=[64,64,-1] Tensor.index Tensor.index_408 2 1 relative_position_bias_table.167 15831 15832 $input=relative_position_bias_table.167 $expr=15831 #relative_position_bias_table.167=(225,6)f32 #15832=(4096,6)f32 pnnx.Expression pnnx_expr_4060 0 1 15835 expr=[2,0,1] Tensor.view Tensor.view_1896 2 1 15832 15833 relative_position_bias.167 $input=15832 $shape=15833 #15832=(4096,6)f32 #relative_position_bias.167=(64,64,6)f32 pnnx.Expression pnnx_expr_4059 0 1 25288 expr=0 torch.permute torch.permute_2858 2 1 relative_position_bias.167 15835 15836 $input=relative_position_bias.167 $dims=15835 #relative_position_bias.167=(64,64,6)f32 #15836=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_249 1 1 15836 relative_position_bias0.95 memory_format=torch.contiguous_format $input=15836 #15836=(6,64,64)f32 #relative_position_bias0.95=(6,64,64)f32 torch.transpose torch.transpose_3155 3 1 k.167 15795 15796 15827 $input=k.167 $dim0=15795 $dim1=15796 #k.167=(36,6,64,32)f32 #15827=(36,6,32,64)f32 torch.matmul torch.matmul_2368 2 1 q0.95 15827 attn.335 $input=q0.95 $other=15827 #q0.95=(36,6,64,32)f32 #15827=(36,6,32,64)f32 #attn.335=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3382 2 1 relative_position_bias0.95 25288 15838 $input=relative_position_bias0.95 $dim=25288 #relative_position_bias0.95=(6,64,64)f32 #15838=(1,6,64,64)f32 pnnx.Expression pnnx_expr_4057 2 1 attn.335 15838 input.375 expr=add(@0,@1) #attn.335=(36,6,64,64)f32 #15838=(1,6,64,64)f32 #input.375=(36,6,64,64)f32 nn.Softmax layers_mmsa.1.residual_group.blocks.4.attn.softmax 1 1 input.375 15840 dim=-1 #input.375=(36,6,64,64)f32 #15840=(36,6,64,64)f32 nn.Dropout layers_mmsa.1.residual_group.blocks.4.attn.attn_drop 1 1 15840 15841 #15840=(36,6,64,64)f32 #15841=(36,6,64,64)f32 Tensor.select Tensor.select_898 3 1 qkv0.95 25279 25280 v.167 $input=qkv0.95 $dim=25279 $index=25280 #qkv0.95=(3,36,6,64,32)f32 #v.167=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4056 0 1 25290 expr=1 pnnx.Expression pnnx_expr_4055 0 1 25291 expr=2 torch.matmul torch.matmul_2369 2 1 15841 v.167 15842 $input=15841 $other=v.167 #15841=(36,6,64,64)f32 #v.167=(36,6,64,32)f32 #15842=(36,6,64,32)f32 pnnx.Expression pnnx_expr_4054 1 1 x1.95 15844 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.95=(36,64,192)f32 torch.transpose torch.transpose_3156 3 1 15842 25290 25291 15843 $input=15842 $dim0=25290 $dim1=25291 #15842=(36,6,64,32)f32 #15843=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_599 2 1 15843 15844 input0.99 $input=15843 $shape=15844 #15843=(36,64,6,32)f32 #input0.99=(36,64,192)f32 nn.Linear layers_mmsa.1.residual_group.blocks.4.attn.proj 1 1 input0.99 15846 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.99=(36,64,192)f32 #15846=(36,64,192)f32 nn.Dropout layers_mmsa.1.residual_group.blocks.4.attn.proj_drop 1 1 15846 15847 #15846=(36,64,192)f32 #15847=(36,64,192)f32 pnnx.Expression pnnx_expr_4050 1 1 15732 15848 expr=[-1,8,8,int(size(@0,2))] #15732=(1,2304,192)f32 pnnx.Expression pnnx_expr_4037 1 1 14094 15854 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_4030 0 1 15856 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1897 2 1 15847 15848 windows.167 $input=15847 $shape=15848 #15847=(36,64,192)f32 #windows.167=(36,8,8,192)f32 Tensor.view Tensor.view_1898 2 1 windows.167 15854 x2.95 $input=windows.167 $shape=15854 #windows.167=(36,8,8,192)f32 #x2.95=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_4027 1 1 14094 15859 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2859 2 1 x2.95 15856 15857 $input=x2.95 $dims=15856 #x2.95=(1,6,6,8,8,192)f32 #15857=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_250 1 1 15857 15858 memory_format=torch.contiguous_format $input=15857 #15857=(1,6,8,6,8,192)f32 #15858=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_4024 2 1 15732 14094 15863 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #15732=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_1899 2 1 15858 15859 x3.95 $input=15858 $shape=15859 #15858=(1,6,8,6,8,192)f32 #x3.95=(1,48,48,192)f32 Tensor.view Tensor.view_1900 2 1 x3.95 15863 x4.95 $input=x3.95 $shape=15863 #x3.95=(1,48,48,192)f32 #x4.95=(1,2304,192)f32 pnnx.Expression pnnx_expr_4022 2 1 15732 x4.95 input.377 expr=add(@0,@1) #15732=(1,2304,192)f32 #x4.95=(1,2304,192)f32 #input.377=(1,2304,192)f32 nn.LayerNorm layers_mmsa.1.residual_group.blocks.4.norm2 1 1 input.377 15867 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.377=(1,2304,192)f32 #15867=(1,2304,192)f32 nn.Linear layers_mmsa.1.residual_group.blocks.4.mlp.fc1 1 1 15867 15872 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #15867=(1,2304,192)f32 #15872=(1,2304,384)f32 nn.GELU layers_mmsa.1.residual_group.blocks.4.mlp.act 1 1 15872 15873 #15872=(1,2304,384)f32 #15873=(1,2304,384)f32 nn.Dropout layers_mmsa.1.residual_group.blocks.4.mlp.drop 1 1 15873 15874 #15873=(1,2304,384)f32 #15874=(1,2304,384)f32 nn.Linear layers_mmsa.1.residual_group.blocks.4.mlp.fc2 1 1 15874 15875 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #15874=(1,2304,384)f32 #15875=(1,2304,192)f32 nn.Dropout pnnx_unique_626 1 1 15875 15876 #15875=(1,2304,192)f32 #15876=(1,2304,192)f32 pnnx.Expression pnnx_expr_4020 2 1 input.377 15876 15878 expr=add(@0,@1) #input.377=(1,2304,192)f32 #15876=(1,2304,192)f32 #15878=(1,2304,192)f32 pnnx.Attribute layers_mmsa.1.residual_group.blocks.5 0 1 attn_mask.85 @attn_mask=(36,64,64)f32 #attn_mask.85=(36,64,64)f32 nn.LayerNorm layers_mmsa.1.residual_group.blocks.5.norm1 1 1 15878 15907 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #15878=(1,2304,192)f32 #15907=(1,2304,192)f32 pnnx.Expression pnnx_expr_3997 2 1 15878 14094 15908 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #15878=(1,2304,192)f32 #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_3995 0 1 15910 expr=[-4,-4] pnnx.Expression pnnx_expr_3993 0 1 15911 expr=[1,2] Tensor.view Tensor.view_1901 2 1 15907 15908 x.169 $input=15907 $shape=15908 #15907=(1,2304,192)f32 #x.169=(1,48,48,192)f32 torch.roll torch.roll_2502 3 1 x.169 15910 15911 x0.97 $input=x.169 $shifts=15910 $dims=15911 #x.169=(1,48,48,192)f32 #x0.97=(1,48,48,192)f32 pnnx.Expression pnnx_expr_3971 1 1 x0.97 15928 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.97=(1,48,48,192)f32 pnnx.Expression pnnx_expr_3966 0 1 15930 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1902 2 1 x0.97 15928 x1.97 $input=x0.97 $shape=15928 #x0.97=(1,48,48,192)f32 #x1.97=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_3963 1 1 x0.97 15933 expr=[-1,8,8,int(size(@0,3))] #x0.97=(1,48,48,192)f32 torch.permute torch.permute_2860 2 1 x1.97 15930 15931 $input=x1.97 $dims=15930 #x1.97=(1,6,8,6,8,192)f32 #15931=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_251 1 1 15931 15932 memory_format=torch.contiguous_format $input=15931 #15931=(1,6,6,8,8,192)f32 #15932=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_3961 1 1 15878 15935 expr=[-1,64,int(size(@0,2))] #15878=(1,2304,192)f32 pnnx.Expression pnnx_expr_3951 0 1 15946 expr=-2 pnnx.Expression pnnx_expr_3950 0 1 15947 expr=-1 pnnx.Attribute layers_mmsa.1.residual_group.blocks.5.attn 0 1 relative_position_bias_table.169 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.169=(225,6)f32 pnnx.Attribute pnnx_unique_627 0 1 relative_position_index.169 @relative_position_index=(64,64)i64 #relative_position_index.169=(64,64)i64 Tensor.view Tensor.view_1903 2 1 15932 15933 x_windows.169 $input=15932 $shape=15933 #15932=(1,6,6,8,8,192)f32 #x_windows.169=(36,8,8,192)f32 Tensor.view Tensor.view_1904 2 1 x_windows.169 15935 x2.97 $input=x_windows.169 $shape=15935 #x_windows.169=(36,8,8,192)f32 #x2.97=(36,64,192)f32 nn.Linear layers_mmsa.1.residual_group.blocks.5.attn.qkv 1 1 x2.97 15971 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.97=(36,64,192)f32 #15971=(36,64,576)f32 pnnx.Expression pnnx_expr_3931 1 1 x2.97 15974 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.97=(36,64,192)f32 pnnx.Expression pnnx_expr_3926 0 1 15976 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_600 2 1 15971 15974 15975 $input=15971 $shape=15974 #15971=(36,64,576)f32 #15975=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_3925 0 1 25334 expr=0 pnnx.Expression pnnx_expr_3924 0 1 25335 expr=0 pnnx.Expression pnnx_expr_3923 0 1 25336 expr=0 pnnx.Expression pnnx_expr_3922 0 1 25337 expr=1 pnnx.Expression pnnx_expr_3921 0 1 25338 expr=0 pnnx.Expression pnnx_expr_3920 0 1 25339 expr=2 torch.permute torch.permute_2861 2 1 15975 15976 qkv0.97 $input=15975 $dims=15976 #15975=(36,64,3,6,32)f32 #qkv0.97=(3,36,6,64,32)f32 Tensor.select Tensor.select_899 3 1 qkv0.97 25334 25335 q.169 $input=qkv0.97 $dim=25334 $index=25335 #qkv0.97=(3,36,6,64,32)f32 #q.169=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3919 1 1 q.169 q0.97 expr=mul(@0,1.767767e-01) #q.169=(36,6,64,32)f32 #q0.97=(36,6,64,32)f32 Tensor.select Tensor.select_900 3 1 qkv0.97 25336 25337 k.169 $input=qkv0.97 $dim=25336 $index=25337 #qkv0.97=(3,36,6,64,32)f32 #k.169=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3917 0 1 15984 expr=[-1] Tensor.view Tensor.view_1905 2 1 relative_position_index.169 15984 15985 $input=relative_position_index.169 $shape=15984 #relative_position_index.169=(64,64)i64 #15985=(4096)i64 pnnx.Expression pnnx_expr_3916 1 1 15985 15986 expr=[@0] #15985=(4096)i64 pnnx.Expression pnnx_expr_3913 0 1 15988 expr=[64,64,-1] Tensor.index Tensor.index_409 2 1 relative_position_bias_table.169 15986 15987 $input=relative_position_bias_table.169 $expr=15986 #relative_position_bias_table.169=(225,6)f32 #15987=(4096,6)f32 pnnx.Expression pnnx_expr_3909 0 1 15990 expr=[2,0,1] Tensor.view Tensor.view_1906 2 1 15987 15988 relative_position_bias.169 $input=15987 $shape=15988 #15987=(4096,6)f32 #relative_position_bias.169=(64,64,6)f32 pnnx.Expression pnnx_expr_3908 0 1 25347 expr=0 torch.permute torch.permute_2862 2 1 relative_position_bias.169 15990 15991 $input=relative_position_bias.169 $dims=15990 #relative_position_bias.169=(64,64,6)f32 #15991=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_252 1 1 15991 relative_position_bias0.97 memory_format=torch.contiguous_format $input=15991 #15991=(6,64,64)f32 #relative_position_bias0.97=(6,64,64)f32 torch.transpose torch.transpose_3157 3 1 k.169 15946 15947 15982 $input=k.169 $dim0=15946 $dim1=15947 #k.169=(36,6,64,32)f32 #15982=(36,6,32,64)f32 torch.matmul torch.matmul_2370 2 1 q0.97 15982 attn.339 $input=q0.97 $other=15982 #q0.97=(36,6,64,32)f32 #15982=(36,6,32,64)f32 #attn.339=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3383 2 1 relative_position_bias0.97 25347 15993 $input=relative_position_bias0.97 $dim=25347 #relative_position_bias0.97=(6,64,64)f32 #15993=(1,6,64,64)f32 pnnx.Expression pnnx_expr_3906 2 1 attn.339 15993 attn0.49 expr=add(@0,@1) #attn.339=(36,6,64,64)f32 #15993=(1,6,64,64)f32 #attn0.49=(36,6,64,64)f32 pnnx.Expression pnnx_expr_3897 2 1 x2.97 attn_mask.85 16000 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.97=(36,64,192)f32 #attn_mask.85=(36,64,64)f32 pnnx.Expression pnnx_expr_3896 0 1 25352 expr=1 pnnx.Expression pnnx_expr_3895 0 1 25353 expr=0 Tensor.view Tensor.view_1907 2 1 attn0.49 16000 16001 $input=attn0.49 $shape=16000 #attn0.49=(36,6,64,64)f32 #16001=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3384 2 1 attn_mask.85 25352 16002 $input=attn_mask.85 $dim=25352 #attn_mask.85=(36,64,64)f32 #16002=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3385 2 1 16002 25353 16003 $input=16002 $dim=25353 #16002=(36,1,64,64)f32 #16003=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_3893 2 1 16001 16003 attn1.49 expr=add(@0,@1) #16001=(1,36,6,64,64)f32 #16003=(1,36,1,64,64)f32 #attn1.49=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_3890 1 1 x2.97 16005 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.97=(36,64,192)f32 Tensor.view Tensor.view_1908 2 1 attn1.49 16005 input.379 $input=attn1.49 $shape=16005 #attn1.49=(1,36,6,64,64)f32 #input.379=(36,6,64,64)f32 nn.Softmax layers_mmsa.1.residual_group.blocks.5.attn.softmax 1 1 input.379 16007 dim=-1 #input.379=(36,6,64,64)f32 #16007=(36,6,64,64)f32 nn.Dropout layers_mmsa.1.residual_group.blocks.5.attn.attn_drop 1 1 16007 16008 #16007=(36,6,64,64)f32 #16008=(36,6,64,64)f32 Tensor.select Tensor.select_901 3 1 qkv0.97 25338 25339 v.169 $input=qkv0.97 $dim=25338 $index=25339 #qkv0.97=(3,36,6,64,32)f32 #v.169=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3889 0 1 25357 expr=1 pnnx.Expression pnnx_expr_3888 0 1 25358 expr=2 torch.matmul torch.matmul_2371 2 1 16008 v.169 16009 $input=16008 $other=v.169 #16008=(36,6,64,64)f32 #v.169=(36,6,64,32)f32 #16009=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3887 1 1 x2.97 16011 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.97=(36,64,192)f32 torch.transpose torch.transpose_3158 3 1 16009 25357 25358 16010 $input=16009 $dim0=25357 $dim1=25358 #16009=(36,6,64,32)f32 #16010=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_601 2 1 16010 16011 input0.101 $input=16010 $shape=16011 #16010=(36,64,6,32)f32 #input0.101=(36,64,192)f32 nn.Linear layers_mmsa.1.residual_group.blocks.5.attn.proj 1 1 input0.101 16013 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.101=(36,64,192)f32 #16013=(36,64,192)f32 nn.Dropout layers_mmsa.1.residual_group.blocks.5.attn.proj_drop 1 1 16013 16014 #16013=(36,64,192)f32 #16014=(36,64,192)f32 pnnx.Expression pnnx_expr_3883 1 1 15878 16015 expr=[-1,8,8,int(size(@0,2))] #15878=(1,2304,192)f32 pnnx.Expression pnnx_expr_3870 1 1 14094 16021 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_3863 0 1 16023 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1909 2 1 16014 16015 windows.169 $input=16014 $shape=16015 #16014=(36,64,192)f32 #windows.169=(36,8,8,192)f32 Tensor.view Tensor.view_1910 2 1 windows.169 16021 x3.97 $input=windows.169 $shape=16021 #windows.169=(36,8,8,192)f32 #x3.97=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_3860 1 1 14094 16026 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2863 2 1 x3.97 16023 16024 $input=x3.97 $dims=16023 #x3.97=(1,6,6,8,8,192)f32 #16024=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_253 1 1 16024 16025 memory_format=torch.contiguous_format $input=16024 #16024=(1,6,8,6,8,192)f32 #16025=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_3857 0 1 16028 expr=[4,4] pnnx.Expression pnnx_expr_3854 0 1 16029 expr=[1,2] Tensor.view Tensor.view_1911 2 1 16025 16026 shifted_x.85 $input=16025 $shape=16026 #16025=(1,6,8,6,8,192)f32 #shifted_x.85=(1,48,48,192)f32 pnnx.Expression pnnx_expr_3851 2 1 15878 14094 16033 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #15878=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.roll torch.roll_2503 3 1 shifted_x.85 16028 16029 x4.97 $input=shifted_x.85 $shifts=16028 $dims=16029 #shifted_x.85=(1,48,48,192)f32 #x4.97=(1,48,48,192)f32 Tensor.view Tensor.view_1912 2 1 x4.97 16033 x5.85 $input=x4.97 $shape=16033 #x4.97=(1,48,48,192)f32 #x5.85=(1,2304,192)f32 pnnx.Expression pnnx_expr_3849 2 1 15878 x5.85 input.381 expr=add(@0,@1) #15878=(1,2304,192)f32 #x5.85=(1,2304,192)f32 #input.381=(1,2304,192)f32 nn.LayerNorm layers_mmsa.1.residual_group.blocks.5.norm2 1 1 input.381 16037 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.381=(1,2304,192)f32 #16037=(1,2304,192)f32 nn.Linear layers_mmsa.1.residual_group.blocks.5.mlp.fc1 1 1 16037 16042 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #16037=(1,2304,192)f32 #16042=(1,2304,384)f32 nn.GELU layers_mmsa.1.residual_group.blocks.5.mlp.act 1 1 16042 16043 #16042=(1,2304,384)f32 #16043=(1,2304,384)f32 nn.Dropout layers_mmsa.1.residual_group.blocks.5.mlp.drop 1 1 16043 16044 #16043=(1,2304,384)f32 #16044=(1,2304,384)f32 nn.Linear layers_mmsa.1.residual_group.blocks.5.mlp.fc2 1 1 16044 16045 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #16044=(1,2304,384)f32 #16045=(1,2304,192)f32 nn.Dropout pnnx_unique_628 1 1 16045 16046 #16045=(1,2304,192)f32 #16046=(1,2304,192)f32 pnnx.Expression pnnx_expr_3847 2 1 input.381 16046 16048 expr=add(@0,@1) #input.381=(1,2304,192)f32 #16046=(1,2304,192)f32 #16048=(1,2304,192)f32 pnnx.Expression pnnx_expr_3845 0 1 16050 expr=1 pnnx.Expression pnnx_expr_3844 0 1 16051 expr=2 pnnx.Expression pnnx_expr_3839 2 1 16048 14094 16057 expr=[int(size(@0,0)),192,int(size(@1,2)),int(size(@1,3))] #16048=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.transpose torch.transpose_3159 3 1 16048 16050 16051 16056 $input=16048 $dim0=16050 $dim1=16051 #16048=(1,2304,192)f32 #16056=(1,192,2304)f32 Tensor.view Tensor.view_1913 2 1 16056 16057 input.383 $input=16056 $shape=16057 #16056=(1,192,2304)f32 #input.383=(1,192,48,48)f32 nn.Conv2d layers_mmsa.1.conv 1 1 input.383 16059 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(192)f32 @weight=(192,192,3,3)f32 #input.383=(1,192,48,48)f32 #16059=(1,192,48,48)f32 pnnx.Expression pnnx_expr_3838 0 1 16060 expr=-1 pnnx.Expression pnnx_expr_3837 0 1 16061 expr=2 pnnx.Expression pnnx_expr_3836 0 1 16062 expr=1 pnnx.Expression pnnx_expr_3835 0 1 25385 expr=2 torch.flatten torch.flatten_2199 3 1 16059 16061 16060 16063 $input=16059 $start_dim=16061 $end_dim=16060 #16059=(1,192,48,48)f32 #16063=(1,192,2304)f32 torch.transpose torch.transpose_3160 3 1 16063 16062 25385 16064 $input=16063 $dim0=16062 $dim1=25385 #16063=(1,192,2304)f32 #16064=(1,2304,192)f32 pnnx.Expression pnnx_expr_3834 2 1 16064 15083 16065 expr=add(@0,@1) #16064=(1,2304,192)f32 #15083=(1,2304,192)f32 #16065=(1,2304,192)f32 nn.LayerNorm layers_mmsa.2.residual_group.blocks.0.norm1 1 1 16065 16109 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #16065=(1,2304,192)f32 #16109=(1,2304,192)f32 pnnx.Expression pnnx_expr_3811 2 1 16065 14094 16110 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #16065=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_1914 2 1 16109 16110 x.171 $input=16109 $shape=16110 #16109=(1,2304,192)f32 #x.171=(1,48,48,192)f32 pnnx.Expression pnnx_expr_3790 1 1 x.171 16127 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.171=(1,48,48,192)f32 pnnx.Expression pnnx_expr_3785 0 1 16129 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1915 2 1 x.171 16127 x0.99 $input=x.171 $shape=16127 #x.171=(1,48,48,192)f32 #x0.99=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_3782 1 1 x.171 16132 expr=[-1,8,8,int(size(@0,3))] #x.171=(1,48,48,192)f32 torch.permute torch.permute_2864 2 1 x0.99 16129 16130 $input=x0.99 $dims=16129 #x0.99=(1,6,8,6,8,192)f32 #16130=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_254 1 1 16130 16131 memory_format=torch.contiguous_format $input=16130 #16130=(1,6,6,8,8,192)f32 #16131=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_3780 1 1 16065 16134 expr=[-1,64,int(size(@0,2))] #16065=(1,2304,192)f32 pnnx.Expression pnnx_expr_3770 0 1 16145 expr=-2 pnnx.Expression pnnx_expr_3769 0 1 16146 expr=-1 pnnx.Attribute layers_mmsa.2.residual_group.blocks.0.attn 0 1 relative_position_bias_table.171 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.171=(225,6)f32 pnnx.Attribute pnnx_unique_629 0 1 relative_position_index.171 @relative_position_index=(64,64)i64 #relative_position_index.171=(64,64)i64 Tensor.view Tensor.view_1916 2 1 16131 16132 x_windows.171 $input=16131 $shape=16132 #16131=(1,6,6,8,8,192)f32 #x_windows.171=(36,8,8,192)f32 Tensor.view Tensor.view_1917 2 1 x_windows.171 16134 x1.99 $input=x_windows.171 $shape=16134 #x_windows.171=(36,8,8,192)f32 #x1.99=(36,64,192)f32 nn.Linear layers_mmsa.2.residual_group.blocks.0.attn.qkv 1 1 x1.99 16166 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.99=(36,64,192)f32 #16166=(36,64,576)f32 pnnx.Expression pnnx_expr_3754 1 1 x1.99 16169 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.99=(36,64,192)f32 pnnx.Expression pnnx_expr_3749 0 1 16171 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_602 2 1 16166 16169 16170 $input=16166 $shape=16169 #16166=(36,64,576)f32 #16170=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_3748 0 1 25403 expr=0 pnnx.Expression pnnx_expr_3747 0 1 25404 expr=0 pnnx.Expression pnnx_expr_3746 0 1 25405 expr=0 pnnx.Expression pnnx_expr_3745 0 1 25406 expr=1 pnnx.Expression pnnx_expr_3744 0 1 25407 expr=0 pnnx.Expression pnnx_expr_3743 0 1 25408 expr=2 torch.permute torch.permute_2865 2 1 16170 16171 qkv0.99 $input=16170 $dims=16171 #16170=(36,64,3,6,32)f32 #qkv0.99=(3,36,6,64,32)f32 Tensor.select Tensor.select_902 3 1 qkv0.99 25403 25404 q.171 $input=qkv0.99 $dim=25403 $index=25404 #qkv0.99=(3,36,6,64,32)f32 #q.171=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3742 1 1 q.171 q0.99 expr=mul(@0,1.767767e-01) #q.171=(36,6,64,32)f32 #q0.99=(36,6,64,32)f32 Tensor.select Tensor.select_903 3 1 qkv0.99 25405 25406 k.171 $input=qkv0.99 $dim=25405 $index=25406 #qkv0.99=(3,36,6,64,32)f32 #k.171=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3740 0 1 16179 expr=[-1] Tensor.view Tensor.view_1918 2 1 relative_position_index.171 16179 16180 $input=relative_position_index.171 $shape=16179 #relative_position_index.171=(64,64)i64 #16180=(4096)i64 pnnx.Expression pnnx_expr_3739 1 1 16180 16181 expr=[@0] #16180=(4096)i64 pnnx.Expression pnnx_expr_3736 0 1 16183 expr=[64,64,-1] Tensor.index Tensor.index_410 2 1 relative_position_bias_table.171 16181 16182 $input=relative_position_bias_table.171 $expr=16181 #relative_position_bias_table.171=(225,6)f32 #16182=(4096,6)f32 pnnx.Expression pnnx_expr_3732 0 1 16185 expr=[2,0,1] Tensor.view Tensor.view_1919 2 1 16182 16183 relative_position_bias.171 $input=16182 $shape=16183 #16182=(4096,6)f32 #relative_position_bias.171=(64,64,6)f32 pnnx.Expression pnnx_expr_3731 0 1 25416 expr=0 torch.permute torch.permute_2866 2 1 relative_position_bias.171 16185 16186 $input=relative_position_bias.171 $dims=16185 #relative_position_bias.171=(64,64,6)f32 #16186=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_255 1 1 16186 relative_position_bias0.99 memory_format=torch.contiguous_format $input=16186 #16186=(6,64,64)f32 #relative_position_bias0.99=(6,64,64)f32 torch.transpose torch.transpose_3161 3 1 k.171 16145 16146 16177 $input=k.171 $dim0=16145 $dim1=16146 #k.171=(36,6,64,32)f32 #16177=(36,6,32,64)f32 torch.matmul torch.matmul_2372 2 1 q0.99 16177 attn.343 $input=q0.99 $other=16177 #q0.99=(36,6,64,32)f32 #16177=(36,6,32,64)f32 #attn.343=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3386 2 1 relative_position_bias0.99 25416 16188 $input=relative_position_bias0.99 $dim=25416 #relative_position_bias0.99=(6,64,64)f32 #16188=(1,6,64,64)f32 pnnx.Expression pnnx_expr_3729 2 1 attn.343 16188 input.385 expr=add(@0,@1) #attn.343=(36,6,64,64)f32 #16188=(1,6,64,64)f32 #input.385=(36,6,64,64)f32 nn.Softmax layers_mmsa.2.residual_group.blocks.0.attn.softmax 1 1 input.385 16190 dim=-1 #input.385=(36,6,64,64)f32 #16190=(36,6,64,64)f32 nn.Dropout layers_mmsa.2.residual_group.blocks.0.attn.attn_drop 1 1 16190 16191 #16190=(36,6,64,64)f32 #16191=(36,6,64,64)f32 Tensor.select Tensor.select_904 3 1 qkv0.99 25407 25408 v.171 $input=qkv0.99 $dim=25407 $index=25408 #qkv0.99=(3,36,6,64,32)f32 #v.171=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3728 0 1 25418 expr=1 pnnx.Expression pnnx_expr_3727 0 1 25419 expr=2 torch.matmul torch.matmul_2373 2 1 16191 v.171 16192 $input=16191 $other=v.171 #16191=(36,6,64,64)f32 #v.171=(36,6,64,32)f32 #16192=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3726 1 1 x1.99 16194 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.99=(36,64,192)f32 torch.transpose torch.transpose_3162 3 1 16192 25418 25419 16193 $input=16192 $dim0=25418 $dim1=25419 #16192=(36,6,64,32)f32 #16193=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_603 2 1 16193 16194 input0.103 $input=16193 $shape=16194 #16193=(36,64,6,32)f32 #input0.103=(36,64,192)f32 nn.Linear layers_mmsa.2.residual_group.blocks.0.attn.proj 1 1 input0.103 16196 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.103=(36,64,192)f32 #16196=(36,64,192)f32 nn.Dropout layers_mmsa.2.residual_group.blocks.0.attn.proj_drop 1 1 16196 16197 #16196=(36,64,192)f32 #16197=(36,64,192)f32 pnnx.Expression pnnx_expr_3722 1 1 16065 16198 expr=[-1,8,8,int(size(@0,2))] #16065=(1,2304,192)f32 pnnx.Expression pnnx_expr_3709 1 1 14094 16204 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_3702 0 1 16206 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1920 2 1 16197 16198 windows.171 $input=16197 $shape=16198 #16197=(36,64,192)f32 #windows.171=(36,8,8,192)f32 Tensor.view Tensor.view_1921 2 1 windows.171 16204 x2.99 $input=windows.171 $shape=16204 #windows.171=(36,8,8,192)f32 #x2.99=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_3699 1 1 14094 16209 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2867 2 1 x2.99 16206 16207 $input=x2.99 $dims=16206 #x2.99=(1,6,6,8,8,192)f32 #16207=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_256 1 1 16207 16208 memory_format=torch.contiguous_format $input=16207 #16207=(1,6,8,6,8,192)f32 #16208=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_3696 2 1 16065 14094 16213 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #16065=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_1922 2 1 16208 16209 x3.99 $input=16208 $shape=16209 #16208=(1,6,8,6,8,192)f32 #x3.99=(1,48,48,192)f32 Tensor.view Tensor.view_1923 2 1 x3.99 16213 x4.99 $input=x3.99 $shape=16213 #x3.99=(1,48,48,192)f32 #x4.99=(1,2304,192)f32 pnnx.Expression pnnx_expr_3694 2 1 16065 x4.99 input.387 expr=add(@0,@1) #16065=(1,2304,192)f32 #x4.99=(1,2304,192)f32 #input.387=(1,2304,192)f32 nn.LayerNorm layers_mmsa.2.residual_group.blocks.0.norm2 1 1 input.387 16217 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.387=(1,2304,192)f32 #16217=(1,2304,192)f32 nn.Linear layers_mmsa.2.residual_group.blocks.0.mlp.fc1 1 1 16217 16222 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #16217=(1,2304,192)f32 #16222=(1,2304,384)f32 nn.GELU layers_mmsa.2.residual_group.blocks.0.mlp.act 1 1 16222 16223 #16222=(1,2304,384)f32 #16223=(1,2304,384)f32 nn.Dropout layers_mmsa.2.residual_group.blocks.0.mlp.drop 1 1 16223 16224 #16223=(1,2304,384)f32 #16224=(1,2304,384)f32 nn.Linear layers_mmsa.2.residual_group.blocks.0.mlp.fc2 1 1 16224 16225 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #16224=(1,2304,384)f32 #16225=(1,2304,192)f32 nn.Dropout pnnx_unique_630 1 1 16225 16226 #16225=(1,2304,192)f32 #16226=(1,2304,192)f32 pnnx.Expression pnnx_expr_3692 2 1 input.387 16226 16228 expr=add(@0,@1) #input.387=(1,2304,192)f32 #16226=(1,2304,192)f32 #16228=(1,2304,192)f32 pnnx.Attribute layers_mmsa.2.residual_group.blocks.1 0 1 attn_mask.87 @attn_mask=(36,64,64)f32 #attn_mask.87=(36,64,64)f32 nn.LayerNorm layers_mmsa.2.residual_group.blocks.1.norm1 1 1 16228 16257 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #16228=(1,2304,192)f32 #16257=(1,2304,192)f32 pnnx.Expression pnnx_expr_3669 2 1 16228 14094 16258 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #16228=(1,2304,192)f32 #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_3667 0 1 16260 expr=[-4,-4] pnnx.Expression pnnx_expr_3665 0 1 16261 expr=[1,2] Tensor.view Tensor.view_1924 2 1 16257 16258 x.173 $input=16257 $shape=16258 #16257=(1,2304,192)f32 #x.173=(1,48,48,192)f32 torch.roll torch.roll_2504 3 1 x.173 16260 16261 x0.101 $input=x.173 $shifts=16260 $dims=16261 #x.173=(1,48,48,192)f32 #x0.101=(1,48,48,192)f32 pnnx.Expression pnnx_expr_3643 1 1 x0.101 16278 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.101=(1,48,48,192)f32 pnnx.Expression pnnx_expr_3638 0 1 16280 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1925 2 1 x0.101 16278 x1.101 $input=x0.101 $shape=16278 #x0.101=(1,48,48,192)f32 #x1.101=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_3635 1 1 x0.101 16283 expr=[-1,8,8,int(size(@0,3))] #x0.101=(1,48,48,192)f32 torch.permute torch.permute_2868 2 1 x1.101 16280 16281 $input=x1.101 $dims=16280 #x1.101=(1,6,8,6,8,192)f32 #16281=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_257 1 1 16281 16282 memory_format=torch.contiguous_format $input=16281 #16281=(1,6,6,8,8,192)f32 #16282=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_3633 1 1 16228 16285 expr=[-1,64,int(size(@0,2))] #16228=(1,2304,192)f32 pnnx.Expression pnnx_expr_3623 0 1 16296 expr=-2 pnnx.Expression pnnx_expr_3622 0 1 16297 expr=-1 pnnx.Attribute layers_mmsa.2.residual_group.blocks.1.attn 0 1 relative_position_bias_table.173 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.173=(225,6)f32 pnnx.Attribute pnnx_unique_631 0 1 relative_position_index.173 @relative_position_index=(64,64)i64 #relative_position_index.173=(64,64)i64 Tensor.view Tensor.view_1926 2 1 16282 16283 x_windows.173 $input=16282 $shape=16283 #16282=(1,6,6,8,8,192)f32 #x_windows.173=(36,8,8,192)f32 Tensor.view Tensor.view_1927 2 1 x_windows.173 16285 x2.101 $input=x_windows.173 $shape=16285 #x_windows.173=(36,8,8,192)f32 #x2.101=(36,64,192)f32 nn.Linear layers_mmsa.2.residual_group.blocks.1.attn.qkv 1 1 x2.101 16321 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.101=(36,64,192)f32 #16321=(36,64,576)f32 pnnx.Expression pnnx_expr_3603 1 1 x2.101 16324 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.101=(36,64,192)f32 pnnx.Expression pnnx_expr_3598 0 1 16326 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_604 2 1 16321 16324 16325 $input=16321 $shape=16324 #16321=(36,64,576)f32 #16325=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_3597 0 1 25462 expr=0 pnnx.Expression pnnx_expr_3596 0 1 25463 expr=0 pnnx.Expression pnnx_expr_3595 0 1 25464 expr=0 pnnx.Expression pnnx_expr_3594 0 1 25465 expr=1 pnnx.Expression pnnx_expr_3593 0 1 25466 expr=0 pnnx.Expression pnnx_expr_3592 0 1 25467 expr=2 torch.permute torch.permute_2869 2 1 16325 16326 qkv0.101 $input=16325 $dims=16326 #16325=(36,64,3,6,32)f32 #qkv0.101=(3,36,6,64,32)f32 Tensor.select Tensor.select_905 3 1 qkv0.101 25462 25463 q.173 $input=qkv0.101 $dim=25462 $index=25463 #qkv0.101=(3,36,6,64,32)f32 #q.173=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3591 1 1 q.173 q0.101 expr=mul(@0,1.767767e-01) #q.173=(36,6,64,32)f32 #q0.101=(36,6,64,32)f32 Tensor.select Tensor.select_906 3 1 qkv0.101 25464 25465 k.173 $input=qkv0.101 $dim=25464 $index=25465 #qkv0.101=(3,36,6,64,32)f32 #k.173=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3589 0 1 16334 expr=[-1] Tensor.view Tensor.view_1928 2 1 relative_position_index.173 16334 16335 $input=relative_position_index.173 $shape=16334 #relative_position_index.173=(64,64)i64 #16335=(4096)i64 pnnx.Expression pnnx_expr_3588 1 1 16335 16336 expr=[@0] #16335=(4096)i64 pnnx.Expression pnnx_expr_3585 0 1 16338 expr=[64,64,-1] Tensor.index Tensor.index_411 2 1 relative_position_bias_table.173 16336 16337 $input=relative_position_bias_table.173 $expr=16336 #relative_position_bias_table.173=(225,6)f32 #16337=(4096,6)f32 pnnx.Expression pnnx_expr_3581 0 1 16340 expr=[2,0,1] Tensor.view Tensor.view_1929 2 1 16337 16338 relative_position_bias.173 $input=16337 $shape=16338 #16337=(4096,6)f32 #relative_position_bias.173=(64,64,6)f32 pnnx.Expression pnnx_expr_3580 0 1 25475 expr=0 torch.permute torch.permute_2870 2 1 relative_position_bias.173 16340 16341 $input=relative_position_bias.173 $dims=16340 #relative_position_bias.173=(64,64,6)f32 #16341=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_258 1 1 16341 relative_position_bias0.101 memory_format=torch.contiguous_format $input=16341 #16341=(6,64,64)f32 #relative_position_bias0.101=(6,64,64)f32 torch.transpose torch.transpose_3163 3 1 k.173 16296 16297 16332 $input=k.173 $dim0=16296 $dim1=16297 #k.173=(36,6,64,32)f32 #16332=(36,6,32,64)f32 torch.matmul torch.matmul_2374 2 1 q0.101 16332 attn.347 $input=q0.101 $other=16332 #q0.101=(36,6,64,32)f32 #16332=(36,6,32,64)f32 #attn.347=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3387 2 1 relative_position_bias0.101 25475 16343 $input=relative_position_bias0.101 $dim=25475 #relative_position_bias0.101=(6,64,64)f32 #16343=(1,6,64,64)f32 pnnx.Expression pnnx_expr_3578 2 1 attn.347 16343 attn0.51 expr=add(@0,@1) #attn.347=(36,6,64,64)f32 #16343=(1,6,64,64)f32 #attn0.51=(36,6,64,64)f32 pnnx.Expression pnnx_expr_3569 2 1 x2.101 attn_mask.87 16350 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.101=(36,64,192)f32 #attn_mask.87=(36,64,64)f32 pnnx.Expression pnnx_expr_3568 0 1 25480 expr=1 pnnx.Expression pnnx_expr_3567 0 1 25481 expr=0 Tensor.view Tensor.view_1930 2 1 attn0.51 16350 16351 $input=attn0.51 $shape=16350 #attn0.51=(36,6,64,64)f32 #16351=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3388 2 1 attn_mask.87 25480 16352 $input=attn_mask.87 $dim=25480 #attn_mask.87=(36,64,64)f32 #16352=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3389 2 1 16352 25481 16353 $input=16352 $dim=25481 #16352=(36,1,64,64)f32 #16353=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_3565 2 1 16351 16353 attn1.51 expr=add(@0,@1) #16351=(1,36,6,64,64)f32 #16353=(1,36,1,64,64)f32 #attn1.51=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_3562 1 1 x2.101 16355 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.101=(36,64,192)f32 Tensor.view Tensor.view_1931 2 1 attn1.51 16355 input.389 $input=attn1.51 $shape=16355 #attn1.51=(1,36,6,64,64)f32 #input.389=(36,6,64,64)f32 nn.Softmax layers_mmsa.2.residual_group.blocks.1.attn.softmax 1 1 input.389 16357 dim=-1 #input.389=(36,6,64,64)f32 #16357=(36,6,64,64)f32 nn.Dropout layers_mmsa.2.residual_group.blocks.1.attn.attn_drop 1 1 16357 16358 #16357=(36,6,64,64)f32 #16358=(36,6,64,64)f32 Tensor.select Tensor.select_907 3 1 qkv0.101 25466 25467 v.173 $input=qkv0.101 $dim=25466 $index=25467 #qkv0.101=(3,36,6,64,32)f32 #v.173=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3561 0 1 25485 expr=1 pnnx.Expression pnnx_expr_3560 0 1 25486 expr=2 torch.matmul torch.matmul_2375 2 1 16358 v.173 16359 $input=16358 $other=v.173 #16358=(36,6,64,64)f32 #v.173=(36,6,64,32)f32 #16359=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3559 1 1 x2.101 16361 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.101=(36,64,192)f32 torch.transpose torch.transpose_3164 3 1 16359 25485 25486 16360 $input=16359 $dim0=25485 $dim1=25486 #16359=(36,6,64,32)f32 #16360=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_605 2 1 16360 16361 input0.105 $input=16360 $shape=16361 #16360=(36,64,6,32)f32 #input0.105=(36,64,192)f32 nn.Linear layers_mmsa.2.residual_group.blocks.1.attn.proj 1 1 input0.105 16363 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.105=(36,64,192)f32 #16363=(36,64,192)f32 nn.Dropout layers_mmsa.2.residual_group.blocks.1.attn.proj_drop 1 1 16363 16364 #16363=(36,64,192)f32 #16364=(36,64,192)f32 pnnx.Expression pnnx_expr_3555 1 1 16228 16365 expr=[-1,8,8,int(size(@0,2))] #16228=(1,2304,192)f32 pnnx.Expression pnnx_expr_3542 1 1 14094 16371 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_3535 0 1 16373 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1932 2 1 16364 16365 windows.173 $input=16364 $shape=16365 #16364=(36,64,192)f32 #windows.173=(36,8,8,192)f32 Tensor.view Tensor.view_1933 2 1 windows.173 16371 x3.101 $input=windows.173 $shape=16371 #windows.173=(36,8,8,192)f32 #x3.101=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_3532 1 1 14094 16376 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2871 2 1 x3.101 16373 16374 $input=x3.101 $dims=16373 #x3.101=(1,6,6,8,8,192)f32 #16374=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_259 1 1 16374 16375 memory_format=torch.contiguous_format $input=16374 #16374=(1,6,8,6,8,192)f32 #16375=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_3529 0 1 16378 expr=[4,4] pnnx.Expression pnnx_expr_3526 0 1 16379 expr=[1,2] Tensor.view Tensor.view_1934 2 1 16375 16376 shifted_x.87 $input=16375 $shape=16376 #16375=(1,6,8,6,8,192)f32 #shifted_x.87=(1,48,48,192)f32 pnnx.Expression pnnx_expr_3523 2 1 16228 14094 16383 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #16228=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.roll torch.roll_2505 3 1 shifted_x.87 16378 16379 x4.101 $input=shifted_x.87 $shifts=16378 $dims=16379 #shifted_x.87=(1,48,48,192)f32 #x4.101=(1,48,48,192)f32 Tensor.view Tensor.view_1935 2 1 x4.101 16383 x5.87 $input=x4.101 $shape=16383 #x4.101=(1,48,48,192)f32 #x5.87=(1,2304,192)f32 pnnx.Expression pnnx_expr_3521 2 1 16228 x5.87 input.391 expr=add(@0,@1) #16228=(1,2304,192)f32 #x5.87=(1,2304,192)f32 #input.391=(1,2304,192)f32 nn.LayerNorm layers_mmsa.2.residual_group.blocks.1.norm2 1 1 input.391 16387 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.391=(1,2304,192)f32 #16387=(1,2304,192)f32 nn.Linear layers_mmsa.2.residual_group.blocks.1.mlp.fc1 1 1 16387 16392 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #16387=(1,2304,192)f32 #16392=(1,2304,384)f32 nn.GELU layers_mmsa.2.residual_group.blocks.1.mlp.act 1 1 16392 16393 #16392=(1,2304,384)f32 #16393=(1,2304,384)f32 nn.Dropout layers_mmsa.2.residual_group.blocks.1.mlp.drop 1 1 16393 16394 #16393=(1,2304,384)f32 #16394=(1,2304,384)f32 nn.Linear layers_mmsa.2.residual_group.blocks.1.mlp.fc2 1 1 16394 16395 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #16394=(1,2304,384)f32 #16395=(1,2304,192)f32 nn.Dropout pnnx_unique_632 1 1 16395 16396 #16395=(1,2304,192)f32 #16396=(1,2304,192)f32 pnnx.Expression pnnx_expr_3519 2 1 input.391 16396 16398 expr=add(@0,@1) #input.391=(1,2304,192)f32 #16396=(1,2304,192)f32 #16398=(1,2304,192)f32 nn.LayerNorm layers_mmsa.2.residual_group.blocks.2.norm1 1 1 16398 16425 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #16398=(1,2304,192)f32 #16425=(1,2304,192)f32 pnnx.Expression pnnx_expr_3497 2 1 16398 14094 16426 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #16398=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_1936 2 1 16425 16426 x.175 $input=16425 $shape=16426 #16425=(1,2304,192)f32 #x.175=(1,48,48,192)f32 pnnx.Expression pnnx_expr_3476 1 1 x.175 16443 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.175=(1,48,48,192)f32 pnnx.Expression pnnx_expr_3471 0 1 16445 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1937 2 1 x.175 16443 x0.103 $input=x.175 $shape=16443 #x.175=(1,48,48,192)f32 #x0.103=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_3468 1 1 x.175 16448 expr=[-1,8,8,int(size(@0,3))] #x.175=(1,48,48,192)f32 torch.permute torch.permute_2872 2 1 x0.103 16445 16446 $input=x0.103 $dims=16445 #x0.103=(1,6,8,6,8,192)f32 #16446=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_260 1 1 16446 16447 memory_format=torch.contiguous_format $input=16446 #16446=(1,6,6,8,8,192)f32 #16447=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_3466 1 1 16398 16450 expr=[-1,64,int(size(@0,2))] #16398=(1,2304,192)f32 pnnx.Expression pnnx_expr_3456 0 1 16461 expr=-2 pnnx.Expression pnnx_expr_3455 0 1 16462 expr=-1 pnnx.Attribute layers_mmsa.2.residual_group.blocks.2.attn 0 1 relative_position_bias_table.175 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.175=(225,6)f32 pnnx.Attribute pnnx_unique_633 0 1 relative_position_index.175 @relative_position_index=(64,64)i64 #relative_position_index.175=(64,64)i64 Tensor.view Tensor.view_1938 2 1 16447 16448 x_windows.175 $input=16447 $shape=16448 #16447=(1,6,6,8,8,192)f32 #x_windows.175=(36,8,8,192)f32 Tensor.view Tensor.view_1939 2 1 x_windows.175 16450 x1.103 $input=x_windows.175 $shape=16450 #x_windows.175=(36,8,8,192)f32 #x1.103=(36,64,192)f32 nn.Linear layers_mmsa.2.residual_group.blocks.2.attn.qkv 1 1 x1.103 16482 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.103=(36,64,192)f32 #16482=(36,64,576)f32 pnnx.Expression pnnx_expr_3440 1 1 x1.103 16485 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.103=(36,64,192)f32 pnnx.Expression pnnx_expr_3435 0 1 16487 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_606 2 1 16482 16485 16486 $input=16482 $shape=16485 #16482=(36,64,576)f32 #16486=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_3434 0 1 25530 expr=0 pnnx.Expression pnnx_expr_3433 0 1 25531 expr=0 pnnx.Expression pnnx_expr_3432 0 1 25532 expr=0 pnnx.Expression pnnx_expr_3431 0 1 25533 expr=1 pnnx.Expression pnnx_expr_3430 0 1 25534 expr=0 pnnx.Expression pnnx_expr_3429 0 1 25535 expr=2 torch.permute torch.permute_2873 2 1 16486 16487 qkv0.103 $input=16486 $dims=16487 #16486=(36,64,3,6,32)f32 #qkv0.103=(3,36,6,64,32)f32 Tensor.select Tensor.select_908 3 1 qkv0.103 25530 25531 q.175 $input=qkv0.103 $dim=25530 $index=25531 #qkv0.103=(3,36,6,64,32)f32 #q.175=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3428 1 1 q.175 q0.103 expr=mul(@0,1.767767e-01) #q.175=(36,6,64,32)f32 #q0.103=(36,6,64,32)f32 Tensor.select Tensor.select_909 3 1 qkv0.103 25532 25533 k.175 $input=qkv0.103 $dim=25532 $index=25533 #qkv0.103=(3,36,6,64,32)f32 #k.175=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3426 0 1 16495 expr=[-1] Tensor.view Tensor.view_1940 2 1 relative_position_index.175 16495 16496 $input=relative_position_index.175 $shape=16495 #relative_position_index.175=(64,64)i64 #16496=(4096)i64 pnnx.Expression pnnx_expr_3425 1 1 16496 16497 expr=[@0] #16496=(4096)i64 pnnx.Expression pnnx_expr_3422 0 1 16499 expr=[64,64,-1] Tensor.index Tensor.index_412 2 1 relative_position_bias_table.175 16497 16498 $input=relative_position_bias_table.175 $expr=16497 #relative_position_bias_table.175=(225,6)f32 #16498=(4096,6)f32 pnnx.Expression pnnx_expr_3418 0 1 16501 expr=[2,0,1] Tensor.view Tensor.view_1941 2 1 16498 16499 relative_position_bias.175 $input=16498 $shape=16499 #16498=(4096,6)f32 #relative_position_bias.175=(64,64,6)f32 pnnx.Expression pnnx_expr_3417 0 1 25543 expr=0 torch.permute torch.permute_2874 2 1 relative_position_bias.175 16501 16502 $input=relative_position_bias.175 $dims=16501 #relative_position_bias.175=(64,64,6)f32 #16502=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_261 1 1 16502 relative_position_bias0.103 memory_format=torch.contiguous_format $input=16502 #16502=(6,64,64)f32 #relative_position_bias0.103=(6,64,64)f32 torch.transpose torch.transpose_3165 3 1 k.175 16461 16462 16493 $input=k.175 $dim0=16461 $dim1=16462 #k.175=(36,6,64,32)f32 #16493=(36,6,32,64)f32 torch.matmul torch.matmul_2376 2 1 q0.103 16493 attn.351 $input=q0.103 $other=16493 #q0.103=(36,6,64,32)f32 #16493=(36,6,32,64)f32 #attn.351=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3390 2 1 relative_position_bias0.103 25543 16504 $input=relative_position_bias0.103 $dim=25543 #relative_position_bias0.103=(6,64,64)f32 #16504=(1,6,64,64)f32 pnnx.Expression pnnx_expr_3415 2 1 attn.351 16504 input.393 expr=add(@0,@1) #attn.351=(36,6,64,64)f32 #16504=(1,6,64,64)f32 #input.393=(36,6,64,64)f32 nn.Softmax layers_mmsa.2.residual_group.blocks.2.attn.softmax 1 1 input.393 16506 dim=-1 #input.393=(36,6,64,64)f32 #16506=(36,6,64,64)f32 nn.Dropout layers_mmsa.2.residual_group.blocks.2.attn.attn_drop 1 1 16506 16507 #16506=(36,6,64,64)f32 #16507=(36,6,64,64)f32 Tensor.select Tensor.select_910 3 1 qkv0.103 25534 25535 v.175 $input=qkv0.103 $dim=25534 $index=25535 #qkv0.103=(3,36,6,64,32)f32 #v.175=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3414 0 1 25545 expr=1 pnnx.Expression pnnx_expr_3413 0 1 25546 expr=2 torch.matmul torch.matmul_2377 2 1 16507 v.175 16508 $input=16507 $other=v.175 #16507=(36,6,64,64)f32 #v.175=(36,6,64,32)f32 #16508=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3412 1 1 x1.103 16510 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.103=(36,64,192)f32 torch.transpose torch.transpose_3166 3 1 16508 25545 25546 16509 $input=16508 $dim0=25545 $dim1=25546 #16508=(36,6,64,32)f32 #16509=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_607 2 1 16509 16510 input0.107 $input=16509 $shape=16510 #16509=(36,64,6,32)f32 #input0.107=(36,64,192)f32 nn.Linear layers_mmsa.2.residual_group.blocks.2.attn.proj 1 1 input0.107 16512 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.107=(36,64,192)f32 #16512=(36,64,192)f32 nn.Dropout layers_mmsa.2.residual_group.blocks.2.attn.proj_drop 1 1 16512 16513 #16512=(36,64,192)f32 #16513=(36,64,192)f32 pnnx.Expression pnnx_expr_3408 1 1 16398 16514 expr=[-1,8,8,int(size(@0,2))] #16398=(1,2304,192)f32 pnnx.Expression pnnx_expr_3395 1 1 14094 16520 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_3388 0 1 16522 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1942 2 1 16513 16514 windows.175 $input=16513 $shape=16514 #16513=(36,64,192)f32 #windows.175=(36,8,8,192)f32 Tensor.view Tensor.view_1943 2 1 windows.175 16520 x2.103 $input=windows.175 $shape=16520 #windows.175=(36,8,8,192)f32 #x2.103=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_3385 1 1 14094 16525 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2875 2 1 x2.103 16522 16523 $input=x2.103 $dims=16522 #x2.103=(1,6,6,8,8,192)f32 #16523=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_262 1 1 16523 16524 memory_format=torch.contiguous_format $input=16523 #16523=(1,6,8,6,8,192)f32 #16524=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_3382 2 1 16398 14094 16529 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #16398=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_1944 2 1 16524 16525 x3.103 $input=16524 $shape=16525 #16524=(1,6,8,6,8,192)f32 #x3.103=(1,48,48,192)f32 Tensor.view Tensor.view_1945 2 1 x3.103 16529 x4.103 $input=x3.103 $shape=16529 #x3.103=(1,48,48,192)f32 #x4.103=(1,2304,192)f32 pnnx.Expression pnnx_expr_3380 2 1 16398 x4.103 input.395 expr=add(@0,@1) #16398=(1,2304,192)f32 #x4.103=(1,2304,192)f32 #input.395=(1,2304,192)f32 nn.LayerNorm layers_mmsa.2.residual_group.blocks.2.norm2 1 1 input.395 16533 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.395=(1,2304,192)f32 #16533=(1,2304,192)f32 nn.Linear layers_mmsa.2.residual_group.blocks.2.mlp.fc1 1 1 16533 16538 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #16533=(1,2304,192)f32 #16538=(1,2304,384)f32 nn.GELU layers_mmsa.2.residual_group.blocks.2.mlp.act 1 1 16538 16539 #16538=(1,2304,384)f32 #16539=(1,2304,384)f32 nn.Dropout layers_mmsa.2.residual_group.blocks.2.mlp.drop 1 1 16539 16540 #16539=(1,2304,384)f32 #16540=(1,2304,384)f32 nn.Linear layers_mmsa.2.residual_group.blocks.2.mlp.fc2 1 1 16540 16541 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #16540=(1,2304,384)f32 #16541=(1,2304,192)f32 nn.Dropout pnnx_unique_634 1 1 16541 16542 #16541=(1,2304,192)f32 #16542=(1,2304,192)f32 pnnx.Expression pnnx_expr_3378 2 1 input.395 16542 16544 expr=add(@0,@1) #input.395=(1,2304,192)f32 #16542=(1,2304,192)f32 #16544=(1,2304,192)f32 pnnx.Attribute layers_mmsa.2.residual_group.blocks.3 0 1 attn_mask.89 @attn_mask=(36,64,64)f32 #attn_mask.89=(36,64,64)f32 nn.LayerNorm layers_mmsa.2.residual_group.blocks.3.norm1 1 1 16544 16573 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #16544=(1,2304,192)f32 #16573=(1,2304,192)f32 pnnx.Expression pnnx_expr_3355 2 1 16544 14094 16574 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #16544=(1,2304,192)f32 #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_3353 0 1 16576 expr=[-4,-4] pnnx.Expression pnnx_expr_3351 0 1 16577 expr=[1,2] Tensor.view Tensor.view_1946 2 1 16573 16574 x.177 $input=16573 $shape=16574 #16573=(1,2304,192)f32 #x.177=(1,48,48,192)f32 torch.roll torch.roll_2506 3 1 x.177 16576 16577 x0.105 $input=x.177 $shifts=16576 $dims=16577 #x.177=(1,48,48,192)f32 #x0.105=(1,48,48,192)f32 pnnx.Expression pnnx_expr_3329 1 1 x0.105 16594 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.105=(1,48,48,192)f32 pnnx.Expression pnnx_expr_3324 0 1 16596 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1947 2 1 x0.105 16594 x1.105 $input=x0.105 $shape=16594 #x0.105=(1,48,48,192)f32 #x1.105=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_3321 1 1 x0.105 16599 expr=[-1,8,8,int(size(@0,3))] #x0.105=(1,48,48,192)f32 torch.permute torch.permute_2876 2 1 x1.105 16596 16597 $input=x1.105 $dims=16596 #x1.105=(1,6,8,6,8,192)f32 #16597=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_263 1 1 16597 16598 memory_format=torch.contiguous_format $input=16597 #16597=(1,6,6,8,8,192)f32 #16598=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_3319 1 1 16544 16601 expr=[-1,64,int(size(@0,2))] #16544=(1,2304,192)f32 pnnx.Expression pnnx_expr_3309 0 1 16612 expr=-2 pnnx.Expression pnnx_expr_3308 0 1 16613 expr=-1 pnnx.Attribute layers_mmsa.2.residual_group.blocks.3.attn 0 1 relative_position_bias_table.177 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.177=(225,6)f32 pnnx.Attribute pnnx_unique_635 0 1 relative_position_index.177 @relative_position_index=(64,64)i64 #relative_position_index.177=(64,64)i64 Tensor.view Tensor.view_1948 2 1 16598 16599 x_windows.177 $input=16598 $shape=16599 #16598=(1,6,6,8,8,192)f32 #x_windows.177=(36,8,8,192)f32 Tensor.view Tensor.view_1949 2 1 x_windows.177 16601 x2.105 $input=x_windows.177 $shape=16601 #x_windows.177=(36,8,8,192)f32 #x2.105=(36,64,192)f32 nn.Linear layers_mmsa.2.residual_group.blocks.3.attn.qkv 1 1 x2.105 16637 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.105=(36,64,192)f32 #16637=(36,64,576)f32 pnnx.Expression pnnx_expr_3289 1 1 x2.105 16640 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.105=(36,64,192)f32 pnnx.Expression pnnx_expr_3284 0 1 16642 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_608 2 1 16637 16640 16641 $input=16637 $shape=16640 #16637=(36,64,576)f32 #16641=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_3283 0 1 25589 expr=0 pnnx.Expression pnnx_expr_3282 0 1 25590 expr=0 pnnx.Expression pnnx_expr_3281 0 1 25591 expr=0 pnnx.Expression pnnx_expr_3280 0 1 25592 expr=1 pnnx.Expression pnnx_expr_3279 0 1 25593 expr=0 pnnx.Expression pnnx_expr_3278 0 1 25594 expr=2 torch.permute torch.permute_2877 2 1 16641 16642 qkv0.105 $input=16641 $dims=16642 #16641=(36,64,3,6,32)f32 #qkv0.105=(3,36,6,64,32)f32 Tensor.select Tensor.select_911 3 1 qkv0.105 25589 25590 q.177 $input=qkv0.105 $dim=25589 $index=25590 #qkv0.105=(3,36,6,64,32)f32 #q.177=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3277 1 1 q.177 q0.105 expr=mul(@0,1.767767e-01) #q.177=(36,6,64,32)f32 #q0.105=(36,6,64,32)f32 Tensor.select Tensor.select_912 3 1 qkv0.105 25591 25592 k.177 $input=qkv0.105 $dim=25591 $index=25592 #qkv0.105=(3,36,6,64,32)f32 #k.177=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3275 0 1 16650 expr=[-1] Tensor.view Tensor.view_1950 2 1 relative_position_index.177 16650 16651 $input=relative_position_index.177 $shape=16650 #relative_position_index.177=(64,64)i64 #16651=(4096)i64 pnnx.Expression pnnx_expr_3274 1 1 16651 16652 expr=[@0] #16651=(4096)i64 pnnx.Expression pnnx_expr_3271 0 1 16654 expr=[64,64,-1] Tensor.index Tensor.index_413 2 1 relative_position_bias_table.177 16652 16653 $input=relative_position_bias_table.177 $expr=16652 #relative_position_bias_table.177=(225,6)f32 #16653=(4096,6)f32 pnnx.Expression pnnx_expr_3267 0 1 16656 expr=[2,0,1] Tensor.view Tensor.view_1951 2 1 16653 16654 relative_position_bias.177 $input=16653 $shape=16654 #16653=(4096,6)f32 #relative_position_bias.177=(64,64,6)f32 pnnx.Expression pnnx_expr_3266 0 1 25602 expr=0 torch.permute torch.permute_2878 2 1 relative_position_bias.177 16656 16657 $input=relative_position_bias.177 $dims=16656 #relative_position_bias.177=(64,64,6)f32 #16657=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_264 1 1 16657 relative_position_bias0.105 memory_format=torch.contiguous_format $input=16657 #16657=(6,64,64)f32 #relative_position_bias0.105=(6,64,64)f32 torch.transpose torch.transpose_3167 3 1 k.177 16612 16613 16648 $input=k.177 $dim0=16612 $dim1=16613 #k.177=(36,6,64,32)f32 #16648=(36,6,32,64)f32 torch.matmul torch.matmul_2378 2 1 q0.105 16648 attn.355 $input=q0.105 $other=16648 #q0.105=(36,6,64,32)f32 #16648=(36,6,32,64)f32 #attn.355=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3391 2 1 relative_position_bias0.105 25602 16659 $input=relative_position_bias0.105 $dim=25602 #relative_position_bias0.105=(6,64,64)f32 #16659=(1,6,64,64)f32 pnnx.Expression pnnx_expr_3264 2 1 attn.355 16659 attn0.53 expr=add(@0,@1) #attn.355=(36,6,64,64)f32 #16659=(1,6,64,64)f32 #attn0.53=(36,6,64,64)f32 pnnx.Expression pnnx_expr_3255 2 1 x2.105 attn_mask.89 16666 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.105=(36,64,192)f32 #attn_mask.89=(36,64,64)f32 pnnx.Expression pnnx_expr_3254 0 1 25607 expr=1 pnnx.Expression pnnx_expr_3253 0 1 25608 expr=0 Tensor.view Tensor.view_1952 2 1 attn0.53 16666 16667 $input=attn0.53 $shape=16666 #attn0.53=(36,6,64,64)f32 #16667=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3392 2 1 attn_mask.89 25607 16668 $input=attn_mask.89 $dim=25607 #attn_mask.89=(36,64,64)f32 #16668=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3393 2 1 16668 25608 16669 $input=16668 $dim=25608 #16668=(36,1,64,64)f32 #16669=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_3251 2 1 16667 16669 attn1.53 expr=add(@0,@1) #16667=(1,36,6,64,64)f32 #16669=(1,36,1,64,64)f32 #attn1.53=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_3248 1 1 x2.105 16671 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.105=(36,64,192)f32 Tensor.view Tensor.view_1953 2 1 attn1.53 16671 input.397 $input=attn1.53 $shape=16671 #attn1.53=(1,36,6,64,64)f32 #input.397=(36,6,64,64)f32 nn.Softmax layers_mmsa.2.residual_group.blocks.3.attn.softmax 1 1 input.397 16673 dim=-1 #input.397=(36,6,64,64)f32 #16673=(36,6,64,64)f32 nn.Dropout layers_mmsa.2.residual_group.blocks.3.attn.attn_drop 1 1 16673 16674 #16673=(36,6,64,64)f32 #16674=(36,6,64,64)f32 Tensor.select Tensor.select_913 3 1 qkv0.105 25593 25594 v.177 $input=qkv0.105 $dim=25593 $index=25594 #qkv0.105=(3,36,6,64,32)f32 #v.177=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3247 0 1 25612 expr=1 pnnx.Expression pnnx_expr_3246 0 1 25613 expr=2 torch.matmul torch.matmul_2379 2 1 16674 v.177 16675 $input=16674 $other=v.177 #16674=(36,6,64,64)f32 #v.177=(36,6,64,32)f32 #16675=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3245 1 1 x2.105 16677 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.105=(36,64,192)f32 torch.transpose torch.transpose_3168 3 1 16675 25612 25613 16676 $input=16675 $dim0=25612 $dim1=25613 #16675=(36,6,64,32)f32 #16676=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_609 2 1 16676 16677 input0.109 $input=16676 $shape=16677 #16676=(36,64,6,32)f32 #input0.109=(36,64,192)f32 nn.Linear layers_mmsa.2.residual_group.blocks.3.attn.proj 1 1 input0.109 16679 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.109=(36,64,192)f32 #16679=(36,64,192)f32 nn.Dropout layers_mmsa.2.residual_group.blocks.3.attn.proj_drop 1 1 16679 16680 #16679=(36,64,192)f32 #16680=(36,64,192)f32 pnnx.Expression pnnx_expr_3241 1 1 16544 16681 expr=[-1,8,8,int(size(@0,2))] #16544=(1,2304,192)f32 pnnx.Expression pnnx_expr_3228 1 1 14094 16687 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_3221 0 1 16689 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1954 2 1 16680 16681 windows.177 $input=16680 $shape=16681 #16680=(36,64,192)f32 #windows.177=(36,8,8,192)f32 Tensor.view Tensor.view_1955 2 1 windows.177 16687 x3.105 $input=windows.177 $shape=16687 #windows.177=(36,8,8,192)f32 #x3.105=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_3218 1 1 14094 16692 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2879 2 1 x3.105 16689 16690 $input=x3.105 $dims=16689 #x3.105=(1,6,6,8,8,192)f32 #16690=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_265 1 1 16690 16691 memory_format=torch.contiguous_format $input=16690 #16690=(1,6,8,6,8,192)f32 #16691=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_3215 0 1 16694 expr=[4,4] pnnx.Expression pnnx_expr_3212 0 1 16695 expr=[1,2] Tensor.view Tensor.view_1956 2 1 16691 16692 shifted_x.89 $input=16691 $shape=16692 #16691=(1,6,8,6,8,192)f32 #shifted_x.89=(1,48,48,192)f32 pnnx.Expression pnnx_expr_3209 2 1 16544 14094 16699 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #16544=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.roll torch.roll_2507 3 1 shifted_x.89 16694 16695 x4.105 $input=shifted_x.89 $shifts=16694 $dims=16695 #shifted_x.89=(1,48,48,192)f32 #x4.105=(1,48,48,192)f32 Tensor.view Tensor.view_1957 2 1 x4.105 16699 x5.89 $input=x4.105 $shape=16699 #x4.105=(1,48,48,192)f32 #x5.89=(1,2304,192)f32 pnnx.Expression pnnx_expr_3207 2 1 16544 x5.89 input.399 expr=add(@0,@1) #16544=(1,2304,192)f32 #x5.89=(1,2304,192)f32 #input.399=(1,2304,192)f32 nn.LayerNorm layers_mmsa.2.residual_group.blocks.3.norm2 1 1 input.399 16703 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.399=(1,2304,192)f32 #16703=(1,2304,192)f32 nn.Linear layers_mmsa.2.residual_group.blocks.3.mlp.fc1 1 1 16703 16708 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #16703=(1,2304,192)f32 #16708=(1,2304,384)f32 nn.GELU layers_mmsa.2.residual_group.blocks.3.mlp.act 1 1 16708 16709 #16708=(1,2304,384)f32 #16709=(1,2304,384)f32 nn.Dropout layers_mmsa.2.residual_group.blocks.3.mlp.drop 1 1 16709 16710 #16709=(1,2304,384)f32 #16710=(1,2304,384)f32 nn.Linear layers_mmsa.2.residual_group.blocks.3.mlp.fc2 1 1 16710 16711 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #16710=(1,2304,384)f32 #16711=(1,2304,192)f32 nn.Dropout pnnx_unique_636 1 1 16711 16712 #16711=(1,2304,192)f32 #16712=(1,2304,192)f32 pnnx.Expression pnnx_expr_3205 2 1 input.399 16712 16714 expr=add(@0,@1) #input.399=(1,2304,192)f32 #16712=(1,2304,192)f32 #16714=(1,2304,192)f32 nn.LayerNorm layers_mmsa.2.residual_group.blocks.4.norm1 1 1 16714 16741 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #16714=(1,2304,192)f32 #16741=(1,2304,192)f32 pnnx.Expression pnnx_expr_3183 2 1 16714 14094 16742 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #16714=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_1958 2 1 16741 16742 x.179 $input=16741 $shape=16742 #16741=(1,2304,192)f32 #x.179=(1,48,48,192)f32 pnnx.Expression pnnx_expr_3162 1 1 x.179 16759 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.179=(1,48,48,192)f32 pnnx.Expression pnnx_expr_3157 0 1 16761 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1959 2 1 x.179 16759 x0.107 $input=x.179 $shape=16759 #x.179=(1,48,48,192)f32 #x0.107=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_3154 1 1 x.179 16764 expr=[-1,8,8,int(size(@0,3))] #x.179=(1,48,48,192)f32 torch.permute torch.permute_2880 2 1 x0.107 16761 16762 $input=x0.107 $dims=16761 #x0.107=(1,6,8,6,8,192)f32 #16762=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_266 1 1 16762 16763 memory_format=torch.contiguous_format $input=16762 #16762=(1,6,6,8,8,192)f32 #16763=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_3152 1 1 16714 16766 expr=[-1,64,int(size(@0,2))] #16714=(1,2304,192)f32 pnnx.Expression pnnx_expr_3142 0 1 16777 expr=-2 pnnx.Expression pnnx_expr_3141 0 1 16778 expr=-1 pnnx.Attribute layers_mmsa.2.residual_group.blocks.4.attn 0 1 relative_position_bias_table.179 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.179=(225,6)f32 pnnx.Attribute pnnx_unique_637 0 1 relative_position_index.179 @relative_position_index=(64,64)i64 #relative_position_index.179=(64,64)i64 Tensor.view Tensor.view_1960 2 1 16763 16764 x_windows.179 $input=16763 $shape=16764 #16763=(1,6,6,8,8,192)f32 #x_windows.179=(36,8,8,192)f32 Tensor.view Tensor.view_1961 2 1 x_windows.179 16766 x1.107 $input=x_windows.179 $shape=16766 #x_windows.179=(36,8,8,192)f32 #x1.107=(36,64,192)f32 nn.Linear layers_mmsa.2.residual_group.blocks.4.attn.qkv 1 1 x1.107 16798 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.107=(36,64,192)f32 #16798=(36,64,576)f32 pnnx.Expression pnnx_expr_3126 1 1 x1.107 16801 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.107=(36,64,192)f32 pnnx.Expression pnnx_expr_3121 0 1 16803 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_610 2 1 16798 16801 16802 $input=16798 $shape=16801 #16798=(36,64,576)f32 #16802=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_3120 0 1 25657 expr=0 pnnx.Expression pnnx_expr_3119 0 1 25658 expr=0 pnnx.Expression pnnx_expr_3118 0 1 25659 expr=0 pnnx.Expression pnnx_expr_3117 0 1 25660 expr=1 pnnx.Expression pnnx_expr_3116 0 1 25661 expr=0 pnnx.Expression pnnx_expr_3115 0 1 25662 expr=2 torch.permute torch.permute_2881 2 1 16802 16803 qkv0.107 $input=16802 $dims=16803 #16802=(36,64,3,6,32)f32 #qkv0.107=(3,36,6,64,32)f32 Tensor.select Tensor.select_914 3 1 qkv0.107 25657 25658 q.179 $input=qkv0.107 $dim=25657 $index=25658 #qkv0.107=(3,36,6,64,32)f32 #q.179=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3114 1 1 q.179 q0.107 expr=mul(@0,1.767767e-01) #q.179=(36,6,64,32)f32 #q0.107=(36,6,64,32)f32 Tensor.select Tensor.select_915 3 1 qkv0.107 25659 25660 k.179 $input=qkv0.107 $dim=25659 $index=25660 #qkv0.107=(3,36,6,64,32)f32 #k.179=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3112 0 1 16811 expr=[-1] Tensor.view Tensor.view_1962 2 1 relative_position_index.179 16811 16812 $input=relative_position_index.179 $shape=16811 #relative_position_index.179=(64,64)i64 #16812=(4096)i64 pnnx.Expression pnnx_expr_3111 1 1 16812 16813 expr=[@0] #16812=(4096)i64 pnnx.Expression pnnx_expr_3108 0 1 16815 expr=[64,64,-1] Tensor.index Tensor.index_414 2 1 relative_position_bias_table.179 16813 16814 $input=relative_position_bias_table.179 $expr=16813 #relative_position_bias_table.179=(225,6)f32 #16814=(4096,6)f32 pnnx.Expression pnnx_expr_3104 0 1 16817 expr=[2,0,1] Tensor.view Tensor.view_1963 2 1 16814 16815 relative_position_bias.179 $input=16814 $shape=16815 #16814=(4096,6)f32 #relative_position_bias.179=(64,64,6)f32 pnnx.Expression pnnx_expr_3103 0 1 25670 expr=0 torch.permute torch.permute_2882 2 1 relative_position_bias.179 16817 16818 $input=relative_position_bias.179 $dims=16817 #relative_position_bias.179=(64,64,6)f32 #16818=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_267 1 1 16818 relative_position_bias0.107 memory_format=torch.contiguous_format $input=16818 #16818=(6,64,64)f32 #relative_position_bias0.107=(6,64,64)f32 torch.transpose torch.transpose_3169 3 1 k.179 16777 16778 16809 $input=k.179 $dim0=16777 $dim1=16778 #k.179=(36,6,64,32)f32 #16809=(36,6,32,64)f32 torch.matmul torch.matmul_2380 2 1 q0.107 16809 attn.359 $input=q0.107 $other=16809 #q0.107=(36,6,64,32)f32 #16809=(36,6,32,64)f32 #attn.359=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3394 2 1 relative_position_bias0.107 25670 16820 $input=relative_position_bias0.107 $dim=25670 #relative_position_bias0.107=(6,64,64)f32 #16820=(1,6,64,64)f32 pnnx.Expression pnnx_expr_3101 2 1 attn.359 16820 input.401 expr=add(@0,@1) #attn.359=(36,6,64,64)f32 #16820=(1,6,64,64)f32 #input.401=(36,6,64,64)f32 nn.Softmax layers_mmsa.2.residual_group.blocks.4.attn.softmax 1 1 input.401 16822 dim=-1 #input.401=(36,6,64,64)f32 #16822=(36,6,64,64)f32 nn.Dropout layers_mmsa.2.residual_group.blocks.4.attn.attn_drop 1 1 16822 16823 #16822=(36,6,64,64)f32 #16823=(36,6,64,64)f32 Tensor.select Tensor.select_916 3 1 qkv0.107 25661 25662 v.179 $input=qkv0.107 $dim=25661 $index=25662 #qkv0.107=(3,36,6,64,32)f32 #v.179=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3100 0 1 25672 expr=1 pnnx.Expression pnnx_expr_3099 0 1 25673 expr=2 torch.matmul torch.matmul_2381 2 1 16823 v.179 16824 $input=16823 $other=v.179 #16823=(36,6,64,64)f32 #v.179=(36,6,64,32)f32 #16824=(36,6,64,32)f32 pnnx.Expression pnnx_expr_3098 1 1 x1.107 16826 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.107=(36,64,192)f32 torch.transpose torch.transpose_3170 3 1 16824 25672 25673 16825 $input=16824 $dim0=25672 $dim1=25673 #16824=(36,6,64,32)f32 #16825=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_611 2 1 16825 16826 input0.111 $input=16825 $shape=16826 #16825=(36,64,6,32)f32 #input0.111=(36,64,192)f32 nn.Linear layers_mmsa.2.residual_group.blocks.4.attn.proj 1 1 input0.111 16828 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.111=(36,64,192)f32 #16828=(36,64,192)f32 nn.Dropout layers_mmsa.2.residual_group.blocks.4.attn.proj_drop 1 1 16828 16829 #16828=(36,64,192)f32 #16829=(36,64,192)f32 pnnx.Expression pnnx_expr_3094 1 1 16714 16830 expr=[-1,8,8,int(size(@0,2))] #16714=(1,2304,192)f32 pnnx.Expression pnnx_expr_3081 1 1 14094 16836 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_3074 0 1 16838 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1964 2 1 16829 16830 windows.179 $input=16829 $shape=16830 #16829=(36,64,192)f32 #windows.179=(36,8,8,192)f32 Tensor.view Tensor.view_1965 2 1 windows.179 16836 x2.107 $input=windows.179 $shape=16836 #windows.179=(36,8,8,192)f32 #x2.107=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_3071 1 1 14094 16841 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2883 2 1 x2.107 16838 16839 $input=x2.107 $dims=16838 #x2.107=(1,6,6,8,8,192)f32 #16839=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_268 1 1 16839 16840 memory_format=torch.contiguous_format $input=16839 #16839=(1,6,8,6,8,192)f32 #16840=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_3068 2 1 16714 14094 16845 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #16714=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_1966 2 1 16840 16841 x3.107 $input=16840 $shape=16841 #16840=(1,6,8,6,8,192)f32 #x3.107=(1,48,48,192)f32 Tensor.view Tensor.view_1967 2 1 x3.107 16845 x4.107 $input=x3.107 $shape=16845 #x3.107=(1,48,48,192)f32 #x4.107=(1,2304,192)f32 pnnx.Expression pnnx_expr_3066 2 1 16714 x4.107 input.403 expr=add(@0,@1) #16714=(1,2304,192)f32 #x4.107=(1,2304,192)f32 #input.403=(1,2304,192)f32 nn.LayerNorm layers_mmsa.2.residual_group.blocks.4.norm2 1 1 input.403 16849 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.403=(1,2304,192)f32 #16849=(1,2304,192)f32 nn.Linear layers_mmsa.2.residual_group.blocks.4.mlp.fc1 1 1 16849 16854 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #16849=(1,2304,192)f32 #16854=(1,2304,384)f32 nn.GELU layers_mmsa.2.residual_group.blocks.4.mlp.act 1 1 16854 16855 #16854=(1,2304,384)f32 #16855=(1,2304,384)f32 nn.Dropout layers_mmsa.2.residual_group.blocks.4.mlp.drop 1 1 16855 16856 #16855=(1,2304,384)f32 #16856=(1,2304,384)f32 nn.Linear layers_mmsa.2.residual_group.blocks.4.mlp.fc2 1 1 16856 16857 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #16856=(1,2304,384)f32 #16857=(1,2304,192)f32 nn.Dropout pnnx_unique_638 1 1 16857 16858 #16857=(1,2304,192)f32 #16858=(1,2304,192)f32 pnnx.Expression pnnx_expr_3064 2 1 input.403 16858 16860 expr=add(@0,@1) #input.403=(1,2304,192)f32 #16858=(1,2304,192)f32 #16860=(1,2304,192)f32 pnnx.Attribute layers_mmsa.2.residual_group.blocks.5 0 1 attn_mask.91 @attn_mask=(36,64,64)f32 #attn_mask.91=(36,64,64)f32 nn.LayerNorm layers_mmsa.2.residual_group.blocks.5.norm1 1 1 16860 16889 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #16860=(1,2304,192)f32 #16889=(1,2304,192)f32 pnnx.Expression pnnx_expr_3041 2 1 16860 14094 16890 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #16860=(1,2304,192)f32 #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_3039 0 1 16892 expr=[-4,-4] pnnx.Expression pnnx_expr_3037 0 1 16893 expr=[1,2] Tensor.view Tensor.view_1968 2 1 16889 16890 x.181 $input=16889 $shape=16890 #16889=(1,2304,192)f32 #x.181=(1,48,48,192)f32 torch.roll torch.roll_2508 3 1 x.181 16892 16893 x0.109 $input=x.181 $shifts=16892 $dims=16893 #x.181=(1,48,48,192)f32 #x0.109=(1,48,48,192)f32 pnnx.Expression pnnx_expr_3015 1 1 x0.109 16910 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.109=(1,48,48,192)f32 pnnx.Expression pnnx_expr_3010 0 1 16912 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1969 2 1 x0.109 16910 x1.109 $input=x0.109 $shape=16910 #x0.109=(1,48,48,192)f32 #x1.109=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_3007 1 1 x0.109 16915 expr=[-1,8,8,int(size(@0,3))] #x0.109=(1,48,48,192)f32 torch.permute torch.permute_2884 2 1 x1.109 16912 16913 $input=x1.109 $dims=16912 #x1.109=(1,6,8,6,8,192)f32 #16913=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_269 1 1 16913 16914 memory_format=torch.contiguous_format $input=16913 #16913=(1,6,6,8,8,192)f32 #16914=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_3005 1 1 16860 16917 expr=[-1,64,int(size(@0,2))] #16860=(1,2304,192)f32 pnnx.Expression pnnx_expr_2995 0 1 16928 expr=-2 pnnx.Expression pnnx_expr_2994 0 1 16929 expr=-1 pnnx.Attribute layers_mmsa.2.residual_group.blocks.5.attn 0 1 relative_position_bias_table.181 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.181=(225,6)f32 pnnx.Attribute pnnx_unique_639 0 1 relative_position_index.181 @relative_position_index=(64,64)i64 #relative_position_index.181=(64,64)i64 Tensor.view Tensor.view_1970 2 1 16914 16915 x_windows.181 $input=16914 $shape=16915 #16914=(1,6,6,8,8,192)f32 #x_windows.181=(36,8,8,192)f32 Tensor.view Tensor.view_1971 2 1 x_windows.181 16917 x2.109 $input=x_windows.181 $shape=16917 #x_windows.181=(36,8,8,192)f32 #x2.109=(36,64,192)f32 nn.Linear layers_mmsa.2.residual_group.blocks.5.attn.qkv 1 1 x2.109 16953 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.109=(36,64,192)f32 #16953=(36,64,576)f32 pnnx.Expression pnnx_expr_2975 1 1 x2.109 16956 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.109=(36,64,192)f32 pnnx.Expression pnnx_expr_2970 0 1 16958 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_612 2 1 16953 16956 16957 $input=16953 $shape=16956 #16953=(36,64,576)f32 #16957=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_2969 0 1 25716 expr=0 pnnx.Expression pnnx_expr_2968 0 1 25717 expr=0 pnnx.Expression pnnx_expr_2967 0 1 25718 expr=0 pnnx.Expression pnnx_expr_2966 0 1 25719 expr=1 pnnx.Expression pnnx_expr_2965 0 1 25720 expr=0 pnnx.Expression pnnx_expr_2964 0 1 25721 expr=2 torch.permute torch.permute_2885 2 1 16957 16958 qkv0.109 $input=16957 $dims=16958 #16957=(36,64,3,6,32)f32 #qkv0.109=(3,36,6,64,32)f32 Tensor.select Tensor.select_917 3 1 qkv0.109 25716 25717 q.181 $input=qkv0.109 $dim=25716 $index=25717 #qkv0.109=(3,36,6,64,32)f32 #q.181=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2963 1 1 q.181 q0.109 expr=mul(@0,1.767767e-01) #q.181=(36,6,64,32)f32 #q0.109=(36,6,64,32)f32 Tensor.select Tensor.select_918 3 1 qkv0.109 25718 25719 k.181 $input=qkv0.109 $dim=25718 $index=25719 #qkv0.109=(3,36,6,64,32)f32 #k.181=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2961 0 1 16966 expr=[-1] Tensor.view Tensor.view_1972 2 1 relative_position_index.181 16966 16967 $input=relative_position_index.181 $shape=16966 #relative_position_index.181=(64,64)i64 #16967=(4096)i64 pnnx.Expression pnnx_expr_2960 1 1 16967 16968 expr=[@0] #16967=(4096)i64 pnnx.Expression pnnx_expr_2957 0 1 16970 expr=[64,64,-1] Tensor.index Tensor.index_415 2 1 relative_position_bias_table.181 16968 16969 $input=relative_position_bias_table.181 $expr=16968 #relative_position_bias_table.181=(225,6)f32 #16969=(4096,6)f32 pnnx.Expression pnnx_expr_2953 0 1 16972 expr=[2,0,1] Tensor.view Tensor.view_1973 2 1 16969 16970 relative_position_bias.181 $input=16969 $shape=16970 #16969=(4096,6)f32 #relative_position_bias.181=(64,64,6)f32 pnnx.Expression pnnx_expr_2952 0 1 25729 expr=0 torch.permute torch.permute_2886 2 1 relative_position_bias.181 16972 16973 $input=relative_position_bias.181 $dims=16972 #relative_position_bias.181=(64,64,6)f32 #16973=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_270 1 1 16973 relative_position_bias0.109 memory_format=torch.contiguous_format $input=16973 #16973=(6,64,64)f32 #relative_position_bias0.109=(6,64,64)f32 torch.transpose torch.transpose_3171 3 1 k.181 16928 16929 16964 $input=k.181 $dim0=16928 $dim1=16929 #k.181=(36,6,64,32)f32 #16964=(36,6,32,64)f32 torch.matmul torch.matmul_2382 2 1 q0.109 16964 attn.363 $input=q0.109 $other=16964 #q0.109=(36,6,64,32)f32 #16964=(36,6,32,64)f32 #attn.363=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3395 2 1 relative_position_bias0.109 25729 16975 $input=relative_position_bias0.109 $dim=25729 #relative_position_bias0.109=(6,64,64)f32 #16975=(1,6,64,64)f32 pnnx.Expression pnnx_expr_2950 2 1 attn.363 16975 attn0.55 expr=add(@0,@1) #attn.363=(36,6,64,64)f32 #16975=(1,6,64,64)f32 #attn0.55=(36,6,64,64)f32 pnnx.Expression pnnx_expr_2941 2 1 x2.109 attn_mask.91 16982 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.109=(36,64,192)f32 #attn_mask.91=(36,64,64)f32 pnnx.Expression pnnx_expr_2940 0 1 25734 expr=1 pnnx.Expression pnnx_expr_2939 0 1 25735 expr=0 Tensor.view Tensor.view_1974 2 1 attn0.55 16982 16983 $input=attn0.55 $shape=16982 #attn0.55=(36,6,64,64)f32 #16983=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3396 2 1 attn_mask.91 25734 16984 $input=attn_mask.91 $dim=25734 #attn_mask.91=(36,64,64)f32 #16984=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3397 2 1 16984 25735 16985 $input=16984 $dim=25735 #16984=(36,1,64,64)f32 #16985=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_2937 2 1 16983 16985 attn1.55 expr=add(@0,@1) #16983=(1,36,6,64,64)f32 #16985=(1,36,1,64,64)f32 #attn1.55=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_2934 1 1 x2.109 16987 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.109=(36,64,192)f32 Tensor.view Tensor.view_1975 2 1 attn1.55 16987 input.405 $input=attn1.55 $shape=16987 #attn1.55=(1,36,6,64,64)f32 #input.405=(36,6,64,64)f32 nn.Softmax layers_mmsa.2.residual_group.blocks.5.attn.softmax 1 1 input.405 16989 dim=-1 #input.405=(36,6,64,64)f32 #16989=(36,6,64,64)f32 nn.Dropout layers_mmsa.2.residual_group.blocks.5.attn.attn_drop 1 1 16989 16990 #16989=(36,6,64,64)f32 #16990=(36,6,64,64)f32 Tensor.select Tensor.select_919 3 1 qkv0.109 25720 25721 v.181 $input=qkv0.109 $dim=25720 $index=25721 #qkv0.109=(3,36,6,64,32)f32 #v.181=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2933 0 1 25739 expr=1 pnnx.Expression pnnx_expr_2932 0 1 25740 expr=2 torch.matmul torch.matmul_2383 2 1 16990 v.181 16991 $input=16990 $other=v.181 #16990=(36,6,64,64)f32 #v.181=(36,6,64,32)f32 #16991=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2931 1 1 x2.109 16993 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.109=(36,64,192)f32 torch.transpose torch.transpose_3172 3 1 16991 25739 25740 16992 $input=16991 $dim0=25739 $dim1=25740 #16991=(36,6,64,32)f32 #16992=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_613 2 1 16992 16993 input0.113 $input=16992 $shape=16993 #16992=(36,64,6,32)f32 #input0.113=(36,64,192)f32 nn.Linear layers_mmsa.2.residual_group.blocks.5.attn.proj 1 1 input0.113 16995 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.113=(36,64,192)f32 #16995=(36,64,192)f32 nn.Dropout layers_mmsa.2.residual_group.blocks.5.attn.proj_drop 1 1 16995 16996 #16995=(36,64,192)f32 #16996=(36,64,192)f32 pnnx.Expression pnnx_expr_2927 1 1 16860 16997 expr=[-1,8,8,int(size(@0,2))] #16860=(1,2304,192)f32 pnnx.Expression pnnx_expr_2914 1 1 14094 17003 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_2907 0 1 17005 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1976 2 1 16996 16997 windows.181 $input=16996 $shape=16997 #16996=(36,64,192)f32 #windows.181=(36,8,8,192)f32 Tensor.view Tensor.view_1977 2 1 windows.181 17003 x3.109 $input=windows.181 $shape=17003 #windows.181=(36,8,8,192)f32 #x3.109=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_2904 1 1 14094 17008 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2887 2 1 x3.109 17005 17006 $input=x3.109 $dims=17005 #x3.109=(1,6,6,8,8,192)f32 #17006=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_271 1 1 17006 17007 memory_format=torch.contiguous_format $input=17006 #17006=(1,6,8,6,8,192)f32 #17007=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_2901 0 1 17010 expr=[4,4] pnnx.Expression pnnx_expr_2898 0 1 17011 expr=[1,2] Tensor.view Tensor.view_1978 2 1 17007 17008 shifted_x.91 $input=17007 $shape=17008 #17007=(1,6,8,6,8,192)f32 #shifted_x.91=(1,48,48,192)f32 pnnx.Expression pnnx_expr_2895 2 1 16860 14094 17015 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #16860=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.roll torch.roll_2509 3 1 shifted_x.91 17010 17011 x4.109 $input=shifted_x.91 $shifts=17010 $dims=17011 #shifted_x.91=(1,48,48,192)f32 #x4.109=(1,48,48,192)f32 Tensor.view Tensor.view_1979 2 1 x4.109 17015 x5.91 $input=x4.109 $shape=17015 #x4.109=(1,48,48,192)f32 #x5.91=(1,2304,192)f32 pnnx.Expression pnnx_expr_2893 2 1 16860 x5.91 input.407 expr=add(@0,@1) #16860=(1,2304,192)f32 #x5.91=(1,2304,192)f32 #input.407=(1,2304,192)f32 nn.LayerNorm layers_mmsa.2.residual_group.blocks.5.norm2 1 1 input.407 17019 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.407=(1,2304,192)f32 #17019=(1,2304,192)f32 nn.Linear layers_mmsa.2.residual_group.blocks.5.mlp.fc1 1 1 17019 17024 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #17019=(1,2304,192)f32 #17024=(1,2304,384)f32 nn.GELU layers_mmsa.2.residual_group.blocks.5.mlp.act 1 1 17024 17025 #17024=(1,2304,384)f32 #17025=(1,2304,384)f32 nn.Dropout layers_mmsa.2.residual_group.blocks.5.mlp.drop 1 1 17025 17026 #17025=(1,2304,384)f32 #17026=(1,2304,384)f32 nn.Linear layers_mmsa.2.residual_group.blocks.5.mlp.fc2 1 1 17026 17027 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #17026=(1,2304,384)f32 #17027=(1,2304,192)f32 nn.Dropout pnnx_unique_640 1 1 17027 17028 #17027=(1,2304,192)f32 #17028=(1,2304,192)f32 pnnx.Expression pnnx_expr_2891 2 1 input.407 17028 17030 expr=add(@0,@1) #input.407=(1,2304,192)f32 #17028=(1,2304,192)f32 #17030=(1,2304,192)f32 pnnx.Expression pnnx_expr_2889 0 1 17032 expr=1 pnnx.Expression pnnx_expr_2888 0 1 17033 expr=2 pnnx.Expression pnnx_expr_2883 2 1 17030 14094 17039 expr=[int(size(@0,0)),192,int(size(@1,2)),int(size(@1,3))] #17030=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.transpose torch.transpose_3173 3 1 17030 17032 17033 17038 $input=17030 $dim0=17032 $dim1=17033 #17030=(1,2304,192)f32 #17038=(1,192,2304)f32 Tensor.view Tensor.view_1980 2 1 17038 17039 input.409 $input=17038 $shape=17039 #17038=(1,192,2304)f32 #input.409=(1,192,48,48)f32 nn.Conv2d layers_mmsa.2.conv 1 1 input.409 17041 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(192)f32 @weight=(192,192,3,3)f32 #input.409=(1,192,48,48)f32 #17041=(1,192,48,48)f32 pnnx.Expression pnnx_expr_2882 0 1 17042 expr=-1 pnnx.Expression pnnx_expr_2881 0 1 17043 expr=2 pnnx.Expression pnnx_expr_2880 0 1 17044 expr=1 pnnx.Expression pnnx_expr_2879 0 1 25767 expr=2 torch.flatten torch.flatten_2200 3 1 17041 17043 17042 17045 $input=17041 $start_dim=17043 $end_dim=17042 #17041=(1,192,48,48)f32 #17045=(1,192,2304)f32 torch.transpose torch.transpose_3174 3 1 17045 17044 25767 17046 $input=17045 $dim0=17044 $dim1=25767 #17045=(1,192,2304)f32 #17046=(1,2304,192)f32 pnnx.Expression pnnx_expr_2878 2 1 17046 16065 17047 expr=add(@0,@1) #17046=(1,2304,192)f32 #16065=(1,2304,192)f32 #17047=(1,2304,192)f32 nn.LayerNorm layers_mmsa.3.residual_group.blocks.0.norm1 1 1 17047 17091 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #17047=(1,2304,192)f32 #17091=(1,2304,192)f32 pnnx.Expression pnnx_expr_2855 2 1 17047 14094 17092 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #17047=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_1981 2 1 17091 17092 x.183 $input=17091 $shape=17092 #17091=(1,2304,192)f32 #x.183=(1,48,48,192)f32 pnnx.Expression pnnx_expr_2834 1 1 x.183 17109 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.183=(1,48,48,192)f32 pnnx.Expression pnnx_expr_2829 0 1 17111 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1982 2 1 x.183 17109 x0.111 $input=x.183 $shape=17109 #x.183=(1,48,48,192)f32 #x0.111=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_2826 1 1 x.183 17114 expr=[-1,8,8,int(size(@0,3))] #x.183=(1,48,48,192)f32 torch.permute torch.permute_2888 2 1 x0.111 17111 17112 $input=x0.111 $dims=17111 #x0.111=(1,6,8,6,8,192)f32 #17112=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_272 1 1 17112 17113 memory_format=torch.contiguous_format $input=17112 #17112=(1,6,6,8,8,192)f32 #17113=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_2824 1 1 17047 17116 expr=[-1,64,int(size(@0,2))] #17047=(1,2304,192)f32 pnnx.Expression pnnx_expr_2814 0 1 17127 expr=-2 pnnx.Expression pnnx_expr_2813 0 1 17128 expr=-1 pnnx.Attribute layers_mmsa.3.residual_group.blocks.0.attn 0 1 relative_position_bias_table.183 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.183=(225,6)f32 pnnx.Attribute pnnx_unique_641 0 1 relative_position_index.183 @relative_position_index=(64,64)i64 #relative_position_index.183=(64,64)i64 Tensor.view Tensor.view_1983 2 1 17113 17114 x_windows.183 $input=17113 $shape=17114 #17113=(1,6,6,8,8,192)f32 #x_windows.183=(36,8,8,192)f32 Tensor.view Tensor.view_1984 2 1 x_windows.183 17116 x1.111 $input=x_windows.183 $shape=17116 #x_windows.183=(36,8,8,192)f32 #x1.111=(36,64,192)f32 nn.Linear layers_mmsa.3.residual_group.blocks.0.attn.qkv 1 1 x1.111 17148 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.111=(36,64,192)f32 #17148=(36,64,576)f32 pnnx.Expression pnnx_expr_2798 1 1 x1.111 17151 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.111=(36,64,192)f32 pnnx.Expression pnnx_expr_2793 0 1 17153 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_614 2 1 17148 17151 17152 $input=17148 $shape=17151 #17148=(36,64,576)f32 #17152=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_2792 0 1 25785 expr=0 pnnx.Expression pnnx_expr_2791 0 1 25786 expr=0 pnnx.Expression pnnx_expr_2790 0 1 25787 expr=0 pnnx.Expression pnnx_expr_2789 0 1 25788 expr=1 pnnx.Expression pnnx_expr_2788 0 1 25789 expr=0 pnnx.Expression pnnx_expr_2787 0 1 25790 expr=2 torch.permute torch.permute_2889 2 1 17152 17153 qkv0.111 $input=17152 $dims=17153 #17152=(36,64,3,6,32)f32 #qkv0.111=(3,36,6,64,32)f32 Tensor.select Tensor.select_920 3 1 qkv0.111 25785 25786 q.183 $input=qkv0.111 $dim=25785 $index=25786 #qkv0.111=(3,36,6,64,32)f32 #q.183=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2786 1 1 q.183 q0.111 expr=mul(@0,1.767767e-01) #q.183=(36,6,64,32)f32 #q0.111=(36,6,64,32)f32 Tensor.select Tensor.select_921 3 1 qkv0.111 25787 25788 k.183 $input=qkv0.111 $dim=25787 $index=25788 #qkv0.111=(3,36,6,64,32)f32 #k.183=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2784 0 1 17161 expr=[-1] Tensor.view Tensor.view_1985 2 1 relative_position_index.183 17161 17162 $input=relative_position_index.183 $shape=17161 #relative_position_index.183=(64,64)i64 #17162=(4096)i64 pnnx.Expression pnnx_expr_2783 1 1 17162 17163 expr=[@0] #17162=(4096)i64 pnnx.Expression pnnx_expr_2780 0 1 17165 expr=[64,64,-1] Tensor.index Tensor.index_416 2 1 relative_position_bias_table.183 17163 17164 $input=relative_position_bias_table.183 $expr=17163 #relative_position_bias_table.183=(225,6)f32 #17164=(4096,6)f32 pnnx.Expression pnnx_expr_2776 0 1 17167 expr=[2,0,1] Tensor.view Tensor.view_1986 2 1 17164 17165 relative_position_bias.183 $input=17164 $shape=17165 #17164=(4096,6)f32 #relative_position_bias.183=(64,64,6)f32 pnnx.Expression pnnx_expr_2775 0 1 25798 expr=0 torch.permute torch.permute_2890 2 1 relative_position_bias.183 17167 17168 $input=relative_position_bias.183 $dims=17167 #relative_position_bias.183=(64,64,6)f32 #17168=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_273 1 1 17168 relative_position_bias0.111 memory_format=torch.contiguous_format $input=17168 #17168=(6,64,64)f32 #relative_position_bias0.111=(6,64,64)f32 torch.transpose torch.transpose_3175 3 1 k.183 17127 17128 17159 $input=k.183 $dim0=17127 $dim1=17128 #k.183=(36,6,64,32)f32 #17159=(36,6,32,64)f32 torch.matmul torch.matmul_2384 2 1 q0.111 17159 attn.367 $input=q0.111 $other=17159 #q0.111=(36,6,64,32)f32 #17159=(36,6,32,64)f32 #attn.367=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3398 2 1 relative_position_bias0.111 25798 17170 $input=relative_position_bias0.111 $dim=25798 #relative_position_bias0.111=(6,64,64)f32 #17170=(1,6,64,64)f32 pnnx.Expression pnnx_expr_2773 2 1 attn.367 17170 input.411 expr=add(@0,@1) #attn.367=(36,6,64,64)f32 #17170=(1,6,64,64)f32 #input.411=(36,6,64,64)f32 nn.Softmax layers_mmsa.3.residual_group.blocks.0.attn.softmax 1 1 input.411 17172 dim=-1 #input.411=(36,6,64,64)f32 #17172=(36,6,64,64)f32 nn.Dropout layers_mmsa.3.residual_group.blocks.0.attn.attn_drop 1 1 17172 17173 #17172=(36,6,64,64)f32 #17173=(36,6,64,64)f32 Tensor.select Tensor.select_922 3 1 qkv0.111 25789 25790 v.183 $input=qkv0.111 $dim=25789 $index=25790 #qkv0.111=(3,36,6,64,32)f32 #v.183=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2772 0 1 25800 expr=1 pnnx.Expression pnnx_expr_2771 0 1 25801 expr=2 torch.matmul torch.matmul_2385 2 1 17173 v.183 17174 $input=17173 $other=v.183 #17173=(36,6,64,64)f32 #v.183=(36,6,64,32)f32 #17174=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2770 1 1 x1.111 17176 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.111=(36,64,192)f32 torch.transpose torch.transpose_3176 3 1 17174 25800 25801 17175 $input=17174 $dim0=25800 $dim1=25801 #17174=(36,6,64,32)f32 #17175=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_615 2 1 17175 17176 input0.115 $input=17175 $shape=17176 #17175=(36,64,6,32)f32 #input0.115=(36,64,192)f32 nn.Linear layers_mmsa.3.residual_group.blocks.0.attn.proj 1 1 input0.115 17178 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.115=(36,64,192)f32 #17178=(36,64,192)f32 nn.Dropout layers_mmsa.3.residual_group.blocks.0.attn.proj_drop 1 1 17178 17179 #17178=(36,64,192)f32 #17179=(36,64,192)f32 pnnx.Expression pnnx_expr_2766 1 1 17047 17180 expr=[-1,8,8,int(size(@0,2))] #17047=(1,2304,192)f32 pnnx.Expression pnnx_expr_2753 1 1 14094 17186 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_2746 0 1 17188 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1987 2 1 17179 17180 windows.183 $input=17179 $shape=17180 #17179=(36,64,192)f32 #windows.183=(36,8,8,192)f32 Tensor.view Tensor.view_1988 2 1 windows.183 17186 x2.111 $input=windows.183 $shape=17186 #windows.183=(36,8,8,192)f32 #x2.111=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_2743 1 1 14094 17191 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2891 2 1 x2.111 17188 17189 $input=x2.111 $dims=17188 #x2.111=(1,6,6,8,8,192)f32 #17189=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_274 1 1 17189 17190 memory_format=torch.contiguous_format $input=17189 #17189=(1,6,8,6,8,192)f32 #17190=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_2740 2 1 17047 14094 17195 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #17047=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_1989 2 1 17190 17191 x3.111 $input=17190 $shape=17191 #17190=(1,6,8,6,8,192)f32 #x3.111=(1,48,48,192)f32 Tensor.view Tensor.view_1990 2 1 x3.111 17195 x4.111 $input=x3.111 $shape=17195 #x3.111=(1,48,48,192)f32 #x4.111=(1,2304,192)f32 pnnx.Expression pnnx_expr_2738 2 1 17047 x4.111 input.413 expr=add(@0,@1) #17047=(1,2304,192)f32 #x4.111=(1,2304,192)f32 #input.413=(1,2304,192)f32 nn.LayerNorm layers_mmsa.3.residual_group.blocks.0.norm2 1 1 input.413 17199 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.413=(1,2304,192)f32 #17199=(1,2304,192)f32 nn.Linear layers_mmsa.3.residual_group.blocks.0.mlp.fc1 1 1 17199 17204 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #17199=(1,2304,192)f32 #17204=(1,2304,384)f32 nn.GELU layers_mmsa.3.residual_group.blocks.0.mlp.act 1 1 17204 17205 #17204=(1,2304,384)f32 #17205=(1,2304,384)f32 nn.Dropout layers_mmsa.3.residual_group.blocks.0.mlp.drop 1 1 17205 17206 #17205=(1,2304,384)f32 #17206=(1,2304,384)f32 nn.Linear layers_mmsa.3.residual_group.blocks.0.mlp.fc2 1 1 17206 17207 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #17206=(1,2304,384)f32 #17207=(1,2304,192)f32 nn.Dropout pnnx_unique_642 1 1 17207 17208 #17207=(1,2304,192)f32 #17208=(1,2304,192)f32 pnnx.Expression pnnx_expr_2736 2 1 input.413 17208 17210 expr=add(@0,@1) #input.413=(1,2304,192)f32 #17208=(1,2304,192)f32 #17210=(1,2304,192)f32 pnnx.Attribute layers_mmsa.3.residual_group.blocks.1 0 1 attn_mask.93 @attn_mask=(36,64,64)f32 #attn_mask.93=(36,64,64)f32 nn.LayerNorm layers_mmsa.3.residual_group.blocks.1.norm1 1 1 17210 17239 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #17210=(1,2304,192)f32 #17239=(1,2304,192)f32 pnnx.Expression pnnx_expr_2713 2 1 17210 14094 17240 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #17210=(1,2304,192)f32 #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_2711 0 1 17242 expr=[-4,-4] pnnx.Expression pnnx_expr_2709 0 1 17243 expr=[1,2] Tensor.view Tensor.view_1991 2 1 17239 17240 x.185 $input=17239 $shape=17240 #17239=(1,2304,192)f32 #x.185=(1,48,48,192)f32 torch.roll torch.roll_2510 3 1 x.185 17242 17243 x0.113 $input=x.185 $shifts=17242 $dims=17243 #x.185=(1,48,48,192)f32 #x0.113=(1,48,48,192)f32 pnnx.Expression pnnx_expr_2687 1 1 x0.113 17260 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.113=(1,48,48,192)f32 pnnx.Expression pnnx_expr_2682 0 1 17262 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1992 2 1 x0.113 17260 x1.113 $input=x0.113 $shape=17260 #x0.113=(1,48,48,192)f32 #x1.113=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_2679 1 1 x0.113 17265 expr=[-1,8,8,int(size(@0,3))] #x0.113=(1,48,48,192)f32 torch.permute torch.permute_2892 2 1 x1.113 17262 17263 $input=x1.113 $dims=17262 #x1.113=(1,6,8,6,8,192)f32 #17263=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_275 1 1 17263 17264 memory_format=torch.contiguous_format $input=17263 #17263=(1,6,6,8,8,192)f32 #17264=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_2677 1 1 17210 17267 expr=[-1,64,int(size(@0,2))] #17210=(1,2304,192)f32 pnnx.Expression pnnx_expr_2667 0 1 17278 expr=-2 pnnx.Expression pnnx_expr_2666 0 1 17279 expr=-1 pnnx.Attribute layers_mmsa.3.residual_group.blocks.1.attn 0 1 relative_position_bias_table.185 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.185=(225,6)f32 pnnx.Attribute pnnx_unique_643 0 1 relative_position_index.185 @relative_position_index=(64,64)i64 #relative_position_index.185=(64,64)i64 Tensor.view Tensor.view_1993 2 1 17264 17265 x_windows.185 $input=17264 $shape=17265 #17264=(1,6,6,8,8,192)f32 #x_windows.185=(36,8,8,192)f32 Tensor.view Tensor.view_1994 2 1 x_windows.185 17267 x2.113 $input=x_windows.185 $shape=17267 #x_windows.185=(36,8,8,192)f32 #x2.113=(36,64,192)f32 nn.Linear layers_mmsa.3.residual_group.blocks.1.attn.qkv 1 1 x2.113 17303 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.113=(36,64,192)f32 #17303=(36,64,576)f32 pnnx.Expression pnnx_expr_2647 1 1 x2.113 17306 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.113=(36,64,192)f32 pnnx.Expression pnnx_expr_2642 0 1 17308 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_616 2 1 17303 17306 17307 $input=17303 $shape=17306 #17303=(36,64,576)f32 #17307=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_2641 0 1 25844 expr=0 pnnx.Expression pnnx_expr_2640 0 1 25845 expr=0 pnnx.Expression pnnx_expr_2639 0 1 25846 expr=0 pnnx.Expression pnnx_expr_2638 0 1 25847 expr=1 pnnx.Expression pnnx_expr_2637 0 1 25848 expr=0 pnnx.Expression pnnx_expr_2636 0 1 25849 expr=2 torch.permute torch.permute_2893 2 1 17307 17308 qkv0.113 $input=17307 $dims=17308 #17307=(36,64,3,6,32)f32 #qkv0.113=(3,36,6,64,32)f32 Tensor.select Tensor.select_923 3 1 qkv0.113 25844 25845 q.185 $input=qkv0.113 $dim=25844 $index=25845 #qkv0.113=(3,36,6,64,32)f32 #q.185=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2635 1 1 q.185 q0.113 expr=mul(@0,1.767767e-01) #q.185=(36,6,64,32)f32 #q0.113=(36,6,64,32)f32 Tensor.select Tensor.select_924 3 1 qkv0.113 25846 25847 k.185 $input=qkv0.113 $dim=25846 $index=25847 #qkv0.113=(3,36,6,64,32)f32 #k.185=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2633 0 1 17316 expr=[-1] Tensor.view Tensor.view_1995 2 1 relative_position_index.185 17316 17317 $input=relative_position_index.185 $shape=17316 #relative_position_index.185=(64,64)i64 #17317=(4096)i64 pnnx.Expression pnnx_expr_2632 1 1 17317 17318 expr=[@0] #17317=(4096)i64 pnnx.Expression pnnx_expr_2629 0 1 17320 expr=[64,64,-1] Tensor.index Tensor.index_417 2 1 relative_position_bias_table.185 17318 17319 $input=relative_position_bias_table.185 $expr=17318 #relative_position_bias_table.185=(225,6)f32 #17319=(4096,6)f32 pnnx.Expression pnnx_expr_2625 0 1 17322 expr=[2,0,1] Tensor.view Tensor.view_1996 2 1 17319 17320 relative_position_bias.185 $input=17319 $shape=17320 #17319=(4096,6)f32 #relative_position_bias.185=(64,64,6)f32 pnnx.Expression pnnx_expr_2624 0 1 25857 expr=0 torch.permute torch.permute_2894 2 1 relative_position_bias.185 17322 17323 $input=relative_position_bias.185 $dims=17322 #relative_position_bias.185=(64,64,6)f32 #17323=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_276 1 1 17323 relative_position_bias0.113 memory_format=torch.contiguous_format $input=17323 #17323=(6,64,64)f32 #relative_position_bias0.113=(6,64,64)f32 torch.transpose torch.transpose_3177 3 1 k.185 17278 17279 17314 $input=k.185 $dim0=17278 $dim1=17279 #k.185=(36,6,64,32)f32 #17314=(36,6,32,64)f32 torch.matmul torch.matmul_2386 2 1 q0.113 17314 attn.371 $input=q0.113 $other=17314 #q0.113=(36,6,64,32)f32 #17314=(36,6,32,64)f32 #attn.371=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3399 2 1 relative_position_bias0.113 25857 17325 $input=relative_position_bias0.113 $dim=25857 #relative_position_bias0.113=(6,64,64)f32 #17325=(1,6,64,64)f32 pnnx.Expression pnnx_expr_2622 2 1 attn.371 17325 attn0.57 expr=add(@0,@1) #attn.371=(36,6,64,64)f32 #17325=(1,6,64,64)f32 #attn0.57=(36,6,64,64)f32 pnnx.Expression pnnx_expr_2613 2 1 x2.113 attn_mask.93 17332 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.113=(36,64,192)f32 #attn_mask.93=(36,64,64)f32 pnnx.Expression pnnx_expr_2612 0 1 25862 expr=1 pnnx.Expression pnnx_expr_2611 0 1 25863 expr=0 Tensor.view Tensor.view_1997 2 1 attn0.57 17332 17333 $input=attn0.57 $shape=17332 #attn0.57=(36,6,64,64)f32 #17333=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3400 2 1 attn_mask.93 25862 17334 $input=attn_mask.93 $dim=25862 #attn_mask.93=(36,64,64)f32 #17334=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3401 2 1 17334 25863 17335 $input=17334 $dim=25863 #17334=(36,1,64,64)f32 #17335=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_2609 2 1 17333 17335 attn1.57 expr=add(@0,@1) #17333=(1,36,6,64,64)f32 #17335=(1,36,1,64,64)f32 #attn1.57=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_2606 1 1 x2.113 17337 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.113=(36,64,192)f32 Tensor.view Tensor.view_1998 2 1 attn1.57 17337 input.415 $input=attn1.57 $shape=17337 #attn1.57=(1,36,6,64,64)f32 #input.415=(36,6,64,64)f32 nn.Softmax layers_mmsa.3.residual_group.blocks.1.attn.softmax 1 1 input.415 17339 dim=-1 #input.415=(36,6,64,64)f32 #17339=(36,6,64,64)f32 nn.Dropout layers_mmsa.3.residual_group.blocks.1.attn.attn_drop 1 1 17339 17340 #17339=(36,6,64,64)f32 #17340=(36,6,64,64)f32 Tensor.select Tensor.select_925 3 1 qkv0.113 25848 25849 v.185 $input=qkv0.113 $dim=25848 $index=25849 #qkv0.113=(3,36,6,64,32)f32 #v.185=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2605 0 1 25867 expr=1 pnnx.Expression pnnx_expr_2604 0 1 25868 expr=2 torch.matmul torch.matmul_2387 2 1 17340 v.185 17341 $input=17340 $other=v.185 #17340=(36,6,64,64)f32 #v.185=(36,6,64,32)f32 #17341=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2603 1 1 x2.113 17343 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.113=(36,64,192)f32 torch.transpose torch.transpose_3178 3 1 17341 25867 25868 17342 $input=17341 $dim0=25867 $dim1=25868 #17341=(36,6,64,32)f32 #17342=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_617 2 1 17342 17343 input0.117 $input=17342 $shape=17343 #17342=(36,64,6,32)f32 #input0.117=(36,64,192)f32 nn.Linear layers_mmsa.3.residual_group.blocks.1.attn.proj 1 1 input0.117 17345 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.117=(36,64,192)f32 #17345=(36,64,192)f32 nn.Dropout layers_mmsa.3.residual_group.blocks.1.attn.proj_drop 1 1 17345 17346 #17345=(36,64,192)f32 #17346=(36,64,192)f32 pnnx.Expression pnnx_expr_2599 1 1 17210 17347 expr=[-1,8,8,int(size(@0,2))] #17210=(1,2304,192)f32 pnnx.Expression pnnx_expr_2586 1 1 14094 17353 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_2579 0 1 17355 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_1999 2 1 17346 17347 windows.185 $input=17346 $shape=17347 #17346=(36,64,192)f32 #windows.185=(36,8,8,192)f32 Tensor.view Tensor.view_2000 2 1 windows.185 17353 x3.113 $input=windows.185 $shape=17353 #windows.185=(36,8,8,192)f32 #x3.113=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_2576 1 1 14094 17358 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2895 2 1 x3.113 17355 17356 $input=x3.113 $dims=17355 #x3.113=(1,6,6,8,8,192)f32 #17356=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_277 1 1 17356 17357 memory_format=torch.contiguous_format $input=17356 #17356=(1,6,8,6,8,192)f32 #17357=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_2573 0 1 17360 expr=[4,4] pnnx.Expression pnnx_expr_2570 0 1 17361 expr=[1,2] Tensor.view Tensor.view_2001 2 1 17357 17358 shifted_x.93 $input=17357 $shape=17358 #17357=(1,6,8,6,8,192)f32 #shifted_x.93=(1,48,48,192)f32 pnnx.Expression pnnx_expr_2567 2 1 17210 14094 17365 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #17210=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.roll torch.roll_2511 3 1 shifted_x.93 17360 17361 x4.113 $input=shifted_x.93 $shifts=17360 $dims=17361 #shifted_x.93=(1,48,48,192)f32 #x4.113=(1,48,48,192)f32 Tensor.view Tensor.view_2002 2 1 x4.113 17365 x5.93 $input=x4.113 $shape=17365 #x4.113=(1,48,48,192)f32 #x5.93=(1,2304,192)f32 pnnx.Expression pnnx_expr_2565 2 1 17210 x5.93 input.417 expr=add(@0,@1) #17210=(1,2304,192)f32 #x5.93=(1,2304,192)f32 #input.417=(1,2304,192)f32 nn.LayerNorm layers_mmsa.3.residual_group.blocks.1.norm2 1 1 input.417 17369 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.417=(1,2304,192)f32 #17369=(1,2304,192)f32 nn.Linear layers_mmsa.3.residual_group.blocks.1.mlp.fc1 1 1 17369 17374 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #17369=(1,2304,192)f32 #17374=(1,2304,384)f32 nn.GELU layers_mmsa.3.residual_group.blocks.1.mlp.act 1 1 17374 17375 #17374=(1,2304,384)f32 #17375=(1,2304,384)f32 nn.Dropout layers_mmsa.3.residual_group.blocks.1.mlp.drop 1 1 17375 17376 #17375=(1,2304,384)f32 #17376=(1,2304,384)f32 nn.Linear layers_mmsa.3.residual_group.blocks.1.mlp.fc2 1 1 17376 17377 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #17376=(1,2304,384)f32 #17377=(1,2304,192)f32 nn.Dropout pnnx_unique_644 1 1 17377 17378 #17377=(1,2304,192)f32 #17378=(1,2304,192)f32 pnnx.Expression pnnx_expr_2563 2 1 input.417 17378 17380 expr=add(@0,@1) #input.417=(1,2304,192)f32 #17378=(1,2304,192)f32 #17380=(1,2304,192)f32 nn.LayerNorm layers_mmsa.3.residual_group.blocks.2.norm1 1 1 17380 17407 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #17380=(1,2304,192)f32 #17407=(1,2304,192)f32 pnnx.Expression pnnx_expr_2541 2 1 17380 14094 17408 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #17380=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_2003 2 1 17407 17408 x.187 $input=17407 $shape=17408 #17407=(1,2304,192)f32 #x.187=(1,48,48,192)f32 pnnx.Expression pnnx_expr_2520 1 1 x.187 17425 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.187=(1,48,48,192)f32 pnnx.Expression pnnx_expr_2515 0 1 17427 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2004 2 1 x.187 17425 x0.115 $input=x.187 $shape=17425 #x.187=(1,48,48,192)f32 #x0.115=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_2512 1 1 x.187 17430 expr=[-1,8,8,int(size(@0,3))] #x.187=(1,48,48,192)f32 torch.permute torch.permute_2896 2 1 x0.115 17427 17428 $input=x0.115 $dims=17427 #x0.115=(1,6,8,6,8,192)f32 #17428=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_278 1 1 17428 17429 memory_format=torch.contiguous_format $input=17428 #17428=(1,6,6,8,8,192)f32 #17429=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_2510 1 1 17380 17432 expr=[-1,64,int(size(@0,2))] #17380=(1,2304,192)f32 pnnx.Expression pnnx_expr_2500 0 1 17443 expr=-2 pnnx.Expression pnnx_expr_2499 0 1 17444 expr=-1 pnnx.Attribute layers_mmsa.3.residual_group.blocks.2.attn 0 1 relative_position_bias_table.187 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.187=(225,6)f32 pnnx.Attribute pnnx_unique_645 0 1 relative_position_index.187 @relative_position_index=(64,64)i64 #relative_position_index.187=(64,64)i64 Tensor.view Tensor.view_2005 2 1 17429 17430 x_windows.187 $input=17429 $shape=17430 #17429=(1,6,6,8,8,192)f32 #x_windows.187=(36,8,8,192)f32 Tensor.view Tensor.view_2006 2 1 x_windows.187 17432 x1.115 $input=x_windows.187 $shape=17432 #x_windows.187=(36,8,8,192)f32 #x1.115=(36,64,192)f32 nn.Linear layers_mmsa.3.residual_group.blocks.2.attn.qkv 1 1 x1.115 17464 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.115=(36,64,192)f32 #17464=(36,64,576)f32 pnnx.Expression pnnx_expr_2484 1 1 x1.115 17467 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.115=(36,64,192)f32 pnnx.Expression pnnx_expr_2479 0 1 17469 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_618 2 1 17464 17467 17468 $input=17464 $shape=17467 #17464=(36,64,576)f32 #17468=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_2478 0 1 25912 expr=0 pnnx.Expression pnnx_expr_2477 0 1 25913 expr=0 pnnx.Expression pnnx_expr_2476 0 1 25914 expr=0 pnnx.Expression pnnx_expr_2475 0 1 25915 expr=1 pnnx.Expression pnnx_expr_2474 0 1 25916 expr=0 pnnx.Expression pnnx_expr_2473 0 1 25917 expr=2 torch.permute torch.permute_2897 2 1 17468 17469 qkv0.115 $input=17468 $dims=17469 #17468=(36,64,3,6,32)f32 #qkv0.115=(3,36,6,64,32)f32 Tensor.select Tensor.select_926 3 1 qkv0.115 25912 25913 q.187 $input=qkv0.115 $dim=25912 $index=25913 #qkv0.115=(3,36,6,64,32)f32 #q.187=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2472 1 1 q.187 q0.115 expr=mul(@0,1.767767e-01) #q.187=(36,6,64,32)f32 #q0.115=(36,6,64,32)f32 Tensor.select Tensor.select_927 3 1 qkv0.115 25914 25915 k.187 $input=qkv0.115 $dim=25914 $index=25915 #qkv0.115=(3,36,6,64,32)f32 #k.187=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2470 0 1 17477 expr=[-1] Tensor.view Tensor.view_2007 2 1 relative_position_index.187 17477 17478 $input=relative_position_index.187 $shape=17477 #relative_position_index.187=(64,64)i64 #17478=(4096)i64 pnnx.Expression pnnx_expr_2469 1 1 17478 17479 expr=[@0] #17478=(4096)i64 pnnx.Expression pnnx_expr_2466 0 1 17481 expr=[64,64,-1] Tensor.index Tensor.index_418 2 1 relative_position_bias_table.187 17479 17480 $input=relative_position_bias_table.187 $expr=17479 #relative_position_bias_table.187=(225,6)f32 #17480=(4096,6)f32 pnnx.Expression pnnx_expr_2462 0 1 17483 expr=[2,0,1] Tensor.view Tensor.view_2008 2 1 17480 17481 relative_position_bias.187 $input=17480 $shape=17481 #17480=(4096,6)f32 #relative_position_bias.187=(64,64,6)f32 pnnx.Expression pnnx_expr_2461 0 1 25925 expr=0 torch.permute torch.permute_2898 2 1 relative_position_bias.187 17483 17484 $input=relative_position_bias.187 $dims=17483 #relative_position_bias.187=(64,64,6)f32 #17484=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_279 1 1 17484 relative_position_bias0.115 memory_format=torch.contiguous_format $input=17484 #17484=(6,64,64)f32 #relative_position_bias0.115=(6,64,64)f32 torch.transpose torch.transpose_3179 3 1 k.187 17443 17444 17475 $input=k.187 $dim0=17443 $dim1=17444 #k.187=(36,6,64,32)f32 #17475=(36,6,32,64)f32 torch.matmul torch.matmul_2388 2 1 q0.115 17475 attn.375 $input=q0.115 $other=17475 #q0.115=(36,6,64,32)f32 #17475=(36,6,32,64)f32 #attn.375=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3402 2 1 relative_position_bias0.115 25925 17486 $input=relative_position_bias0.115 $dim=25925 #relative_position_bias0.115=(6,64,64)f32 #17486=(1,6,64,64)f32 pnnx.Expression pnnx_expr_2459 2 1 attn.375 17486 input.419 expr=add(@0,@1) #attn.375=(36,6,64,64)f32 #17486=(1,6,64,64)f32 #input.419=(36,6,64,64)f32 nn.Softmax layers_mmsa.3.residual_group.blocks.2.attn.softmax 1 1 input.419 17488 dim=-1 #input.419=(36,6,64,64)f32 #17488=(36,6,64,64)f32 nn.Dropout layers_mmsa.3.residual_group.blocks.2.attn.attn_drop 1 1 17488 17489 #17488=(36,6,64,64)f32 #17489=(36,6,64,64)f32 Tensor.select Tensor.select_928 3 1 qkv0.115 25916 25917 v.187 $input=qkv0.115 $dim=25916 $index=25917 #qkv0.115=(3,36,6,64,32)f32 #v.187=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2458 0 1 25927 expr=1 pnnx.Expression pnnx_expr_2457 0 1 25928 expr=2 torch.matmul torch.matmul_2389 2 1 17489 v.187 17490 $input=17489 $other=v.187 #17489=(36,6,64,64)f32 #v.187=(36,6,64,32)f32 #17490=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2456 1 1 x1.115 17492 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.115=(36,64,192)f32 torch.transpose torch.transpose_3180 3 1 17490 25927 25928 17491 $input=17490 $dim0=25927 $dim1=25928 #17490=(36,6,64,32)f32 #17491=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_619 2 1 17491 17492 input0.119 $input=17491 $shape=17492 #17491=(36,64,6,32)f32 #input0.119=(36,64,192)f32 nn.Linear layers_mmsa.3.residual_group.blocks.2.attn.proj 1 1 input0.119 17494 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.119=(36,64,192)f32 #17494=(36,64,192)f32 nn.Dropout layers_mmsa.3.residual_group.blocks.2.attn.proj_drop 1 1 17494 17495 #17494=(36,64,192)f32 #17495=(36,64,192)f32 pnnx.Expression pnnx_expr_2452 1 1 17380 17496 expr=[-1,8,8,int(size(@0,2))] #17380=(1,2304,192)f32 pnnx.Expression pnnx_expr_2439 1 1 14094 17502 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_2432 0 1 17504 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2009 2 1 17495 17496 windows.187 $input=17495 $shape=17496 #17495=(36,64,192)f32 #windows.187=(36,8,8,192)f32 Tensor.view Tensor.view_2010 2 1 windows.187 17502 x2.115 $input=windows.187 $shape=17502 #windows.187=(36,8,8,192)f32 #x2.115=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_2429 1 1 14094 17507 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2899 2 1 x2.115 17504 17505 $input=x2.115 $dims=17504 #x2.115=(1,6,6,8,8,192)f32 #17505=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_280 1 1 17505 17506 memory_format=torch.contiguous_format $input=17505 #17505=(1,6,8,6,8,192)f32 #17506=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_2426 2 1 17380 14094 17511 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #17380=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_2011 2 1 17506 17507 x3.115 $input=17506 $shape=17507 #17506=(1,6,8,6,8,192)f32 #x3.115=(1,48,48,192)f32 Tensor.view Tensor.view_2012 2 1 x3.115 17511 x4.115 $input=x3.115 $shape=17511 #x3.115=(1,48,48,192)f32 #x4.115=(1,2304,192)f32 pnnx.Expression pnnx_expr_2424 2 1 17380 x4.115 input.421 expr=add(@0,@1) #17380=(1,2304,192)f32 #x4.115=(1,2304,192)f32 #input.421=(1,2304,192)f32 nn.LayerNorm layers_mmsa.3.residual_group.blocks.2.norm2 1 1 input.421 17515 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.421=(1,2304,192)f32 #17515=(1,2304,192)f32 nn.Linear layers_mmsa.3.residual_group.blocks.2.mlp.fc1 1 1 17515 17520 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #17515=(1,2304,192)f32 #17520=(1,2304,384)f32 nn.GELU layers_mmsa.3.residual_group.blocks.2.mlp.act 1 1 17520 17521 #17520=(1,2304,384)f32 #17521=(1,2304,384)f32 nn.Dropout layers_mmsa.3.residual_group.blocks.2.mlp.drop 1 1 17521 17522 #17521=(1,2304,384)f32 #17522=(1,2304,384)f32 nn.Linear layers_mmsa.3.residual_group.blocks.2.mlp.fc2 1 1 17522 17523 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #17522=(1,2304,384)f32 #17523=(1,2304,192)f32 nn.Dropout pnnx_unique_646 1 1 17523 17524 #17523=(1,2304,192)f32 #17524=(1,2304,192)f32 pnnx.Expression pnnx_expr_2422 2 1 input.421 17524 17526 expr=add(@0,@1) #input.421=(1,2304,192)f32 #17524=(1,2304,192)f32 #17526=(1,2304,192)f32 pnnx.Attribute layers_mmsa.3.residual_group.blocks.3 0 1 attn_mask.95 @attn_mask=(36,64,64)f32 #attn_mask.95=(36,64,64)f32 nn.LayerNorm layers_mmsa.3.residual_group.blocks.3.norm1 1 1 17526 17555 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #17526=(1,2304,192)f32 #17555=(1,2304,192)f32 pnnx.Expression pnnx_expr_2399 2 1 17526 14094 17556 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #17526=(1,2304,192)f32 #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_2397 0 1 17558 expr=[-4,-4] pnnx.Expression pnnx_expr_2395 0 1 17559 expr=[1,2] Tensor.view Tensor.view_2013 2 1 17555 17556 x.189 $input=17555 $shape=17556 #17555=(1,2304,192)f32 #x.189=(1,48,48,192)f32 torch.roll torch.roll_2512 3 1 x.189 17558 17559 x0.117 $input=x.189 $shifts=17558 $dims=17559 #x.189=(1,48,48,192)f32 #x0.117=(1,48,48,192)f32 pnnx.Expression pnnx_expr_2373 1 1 x0.117 17576 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.117=(1,48,48,192)f32 pnnx.Expression pnnx_expr_2368 0 1 17578 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2014 2 1 x0.117 17576 x1.117 $input=x0.117 $shape=17576 #x0.117=(1,48,48,192)f32 #x1.117=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_2365 1 1 x0.117 17581 expr=[-1,8,8,int(size(@0,3))] #x0.117=(1,48,48,192)f32 torch.permute torch.permute_2900 2 1 x1.117 17578 17579 $input=x1.117 $dims=17578 #x1.117=(1,6,8,6,8,192)f32 #17579=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_281 1 1 17579 17580 memory_format=torch.contiguous_format $input=17579 #17579=(1,6,6,8,8,192)f32 #17580=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_2363 1 1 17526 17583 expr=[-1,64,int(size(@0,2))] #17526=(1,2304,192)f32 pnnx.Expression pnnx_expr_2353 0 1 17594 expr=-2 pnnx.Expression pnnx_expr_2352 0 1 17595 expr=-1 pnnx.Attribute layers_mmsa.3.residual_group.blocks.3.attn 0 1 relative_position_bias_table.189 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.189=(225,6)f32 pnnx.Attribute pnnx_unique_647 0 1 relative_position_index.189 @relative_position_index=(64,64)i64 #relative_position_index.189=(64,64)i64 Tensor.view Tensor.view_2015 2 1 17580 17581 x_windows.189 $input=17580 $shape=17581 #17580=(1,6,6,8,8,192)f32 #x_windows.189=(36,8,8,192)f32 Tensor.view Tensor.view_2016 2 1 x_windows.189 17583 x2.117 $input=x_windows.189 $shape=17583 #x_windows.189=(36,8,8,192)f32 #x2.117=(36,64,192)f32 nn.Linear layers_mmsa.3.residual_group.blocks.3.attn.qkv 1 1 x2.117 17619 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.117=(36,64,192)f32 #17619=(36,64,576)f32 pnnx.Expression pnnx_expr_2333 1 1 x2.117 17622 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.117=(36,64,192)f32 pnnx.Expression pnnx_expr_2328 0 1 17624 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_620 2 1 17619 17622 17623 $input=17619 $shape=17622 #17619=(36,64,576)f32 #17623=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_2327 0 1 25971 expr=0 pnnx.Expression pnnx_expr_2326 0 1 25972 expr=0 pnnx.Expression pnnx_expr_2325 0 1 25973 expr=0 pnnx.Expression pnnx_expr_2324 0 1 25974 expr=1 pnnx.Expression pnnx_expr_2323 0 1 25975 expr=0 pnnx.Expression pnnx_expr_2322 0 1 25976 expr=2 torch.permute torch.permute_2901 2 1 17623 17624 qkv0.117 $input=17623 $dims=17624 #17623=(36,64,3,6,32)f32 #qkv0.117=(3,36,6,64,32)f32 Tensor.select Tensor.select_929 3 1 qkv0.117 25971 25972 q.189 $input=qkv0.117 $dim=25971 $index=25972 #qkv0.117=(3,36,6,64,32)f32 #q.189=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2321 1 1 q.189 q0.117 expr=mul(@0,1.767767e-01) #q.189=(36,6,64,32)f32 #q0.117=(36,6,64,32)f32 Tensor.select Tensor.select_930 3 1 qkv0.117 25973 25974 k.189 $input=qkv0.117 $dim=25973 $index=25974 #qkv0.117=(3,36,6,64,32)f32 #k.189=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2319 0 1 17632 expr=[-1] Tensor.view Tensor.view_2017 2 1 relative_position_index.189 17632 17633 $input=relative_position_index.189 $shape=17632 #relative_position_index.189=(64,64)i64 #17633=(4096)i64 pnnx.Expression pnnx_expr_2318 1 1 17633 17634 expr=[@0] #17633=(4096)i64 pnnx.Expression pnnx_expr_2315 0 1 17636 expr=[64,64,-1] Tensor.index Tensor.index_419 2 1 relative_position_bias_table.189 17634 17635 $input=relative_position_bias_table.189 $expr=17634 #relative_position_bias_table.189=(225,6)f32 #17635=(4096,6)f32 pnnx.Expression pnnx_expr_2311 0 1 17638 expr=[2,0,1] Tensor.view Tensor.view_2018 2 1 17635 17636 relative_position_bias.189 $input=17635 $shape=17636 #17635=(4096,6)f32 #relative_position_bias.189=(64,64,6)f32 pnnx.Expression pnnx_expr_2310 0 1 25984 expr=0 torch.permute torch.permute_2902 2 1 relative_position_bias.189 17638 17639 $input=relative_position_bias.189 $dims=17638 #relative_position_bias.189=(64,64,6)f32 #17639=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_282 1 1 17639 relative_position_bias0.117 memory_format=torch.contiguous_format $input=17639 #17639=(6,64,64)f32 #relative_position_bias0.117=(6,64,64)f32 torch.transpose torch.transpose_3181 3 1 k.189 17594 17595 17630 $input=k.189 $dim0=17594 $dim1=17595 #k.189=(36,6,64,32)f32 #17630=(36,6,32,64)f32 torch.matmul torch.matmul_2390 2 1 q0.117 17630 attn.379 $input=q0.117 $other=17630 #q0.117=(36,6,64,32)f32 #17630=(36,6,32,64)f32 #attn.379=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3403 2 1 relative_position_bias0.117 25984 17641 $input=relative_position_bias0.117 $dim=25984 #relative_position_bias0.117=(6,64,64)f32 #17641=(1,6,64,64)f32 pnnx.Expression pnnx_expr_2308 2 1 attn.379 17641 attn0.59 expr=add(@0,@1) #attn.379=(36,6,64,64)f32 #17641=(1,6,64,64)f32 #attn0.59=(36,6,64,64)f32 pnnx.Expression pnnx_expr_2299 2 1 x2.117 attn_mask.95 17648 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.117=(36,64,192)f32 #attn_mask.95=(36,64,64)f32 pnnx.Expression pnnx_expr_2298 0 1 25989 expr=1 pnnx.Expression pnnx_expr_2297 0 1 25990 expr=0 Tensor.view Tensor.view_2019 2 1 attn0.59 17648 17649 $input=attn0.59 $shape=17648 #attn0.59=(36,6,64,64)f32 #17649=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3404 2 1 attn_mask.95 25989 17650 $input=attn_mask.95 $dim=25989 #attn_mask.95=(36,64,64)f32 #17650=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3405 2 1 17650 25990 17651 $input=17650 $dim=25990 #17650=(36,1,64,64)f32 #17651=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_2295 2 1 17649 17651 attn1.59 expr=add(@0,@1) #17649=(1,36,6,64,64)f32 #17651=(1,36,1,64,64)f32 #attn1.59=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_2292 1 1 x2.117 17653 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.117=(36,64,192)f32 Tensor.view Tensor.view_2020 2 1 attn1.59 17653 input.423 $input=attn1.59 $shape=17653 #attn1.59=(1,36,6,64,64)f32 #input.423=(36,6,64,64)f32 nn.Softmax layers_mmsa.3.residual_group.blocks.3.attn.softmax 1 1 input.423 17655 dim=-1 #input.423=(36,6,64,64)f32 #17655=(36,6,64,64)f32 nn.Dropout layers_mmsa.3.residual_group.blocks.3.attn.attn_drop 1 1 17655 17656 #17655=(36,6,64,64)f32 #17656=(36,6,64,64)f32 Tensor.select Tensor.select_931 3 1 qkv0.117 25975 25976 v.189 $input=qkv0.117 $dim=25975 $index=25976 #qkv0.117=(3,36,6,64,32)f32 #v.189=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2291 0 1 25994 expr=1 pnnx.Expression pnnx_expr_2290 0 1 25995 expr=2 torch.matmul torch.matmul_2391 2 1 17656 v.189 17657 $input=17656 $other=v.189 #17656=(36,6,64,64)f32 #v.189=(36,6,64,32)f32 #17657=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2289 1 1 x2.117 17659 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.117=(36,64,192)f32 torch.transpose torch.transpose_3182 3 1 17657 25994 25995 17658 $input=17657 $dim0=25994 $dim1=25995 #17657=(36,6,64,32)f32 #17658=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_621 2 1 17658 17659 input0.121 $input=17658 $shape=17659 #17658=(36,64,6,32)f32 #input0.121=(36,64,192)f32 nn.Linear layers_mmsa.3.residual_group.blocks.3.attn.proj 1 1 input0.121 17661 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.121=(36,64,192)f32 #17661=(36,64,192)f32 nn.Dropout layers_mmsa.3.residual_group.blocks.3.attn.proj_drop 1 1 17661 17662 #17661=(36,64,192)f32 #17662=(36,64,192)f32 pnnx.Expression pnnx_expr_2285 1 1 17526 17663 expr=[-1,8,8,int(size(@0,2))] #17526=(1,2304,192)f32 pnnx.Expression pnnx_expr_2272 1 1 14094 17669 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_2265 0 1 17671 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2021 2 1 17662 17663 windows.189 $input=17662 $shape=17663 #17662=(36,64,192)f32 #windows.189=(36,8,8,192)f32 Tensor.view Tensor.view_2022 2 1 windows.189 17669 x3.117 $input=windows.189 $shape=17669 #windows.189=(36,8,8,192)f32 #x3.117=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_2262 1 1 14094 17674 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2903 2 1 x3.117 17671 17672 $input=x3.117 $dims=17671 #x3.117=(1,6,6,8,8,192)f32 #17672=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_283 1 1 17672 17673 memory_format=torch.contiguous_format $input=17672 #17672=(1,6,8,6,8,192)f32 #17673=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_2259 0 1 17676 expr=[4,4] pnnx.Expression pnnx_expr_2256 0 1 17677 expr=[1,2] Tensor.view Tensor.view_2023 2 1 17673 17674 shifted_x.95 $input=17673 $shape=17674 #17673=(1,6,8,6,8,192)f32 #shifted_x.95=(1,48,48,192)f32 pnnx.Expression pnnx_expr_2253 2 1 17526 14094 17681 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #17526=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.roll torch.roll_2513 3 1 shifted_x.95 17676 17677 x4.117 $input=shifted_x.95 $shifts=17676 $dims=17677 #shifted_x.95=(1,48,48,192)f32 #x4.117=(1,48,48,192)f32 Tensor.view Tensor.view_2024 2 1 x4.117 17681 x5.95 $input=x4.117 $shape=17681 #x4.117=(1,48,48,192)f32 #x5.95=(1,2304,192)f32 pnnx.Expression pnnx_expr_2251 2 1 17526 x5.95 input.425 expr=add(@0,@1) #17526=(1,2304,192)f32 #x5.95=(1,2304,192)f32 #input.425=(1,2304,192)f32 nn.LayerNorm layers_mmsa.3.residual_group.blocks.3.norm2 1 1 input.425 17685 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.425=(1,2304,192)f32 #17685=(1,2304,192)f32 nn.Linear layers_mmsa.3.residual_group.blocks.3.mlp.fc1 1 1 17685 17690 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #17685=(1,2304,192)f32 #17690=(1,2304,384)f32 nn.GELU layers_mmsa.3.residual_group.blocks.3.mlp.act 1 1 17690 17691 #17690=(1,2304,384)f32 #17691=(1,2304,384)f32 nn.Dropout layers_mmsa.3.residual_group.blocks.3.mlp.drop 1 1 17691 17692 #17691=(1,2304,384)f32 #17692=(1,2304,384)f32 nn.Linear layers_mmsa.3.residual_group.blocks.3.mlp.fc2 1 1 17692 17693 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #17692=(1,2304,384)f32 #17693=(1,2304,192)f32 nn.Dropout pnnx_unique_648 1 1 17693 17694 #17693=(1,2304,192)f32 #17694=(1,2304,192)f32 pnnx.Expression pnnx_expr_2249 2 1 input.425 17694 17696 expr=add(@0,@1) #input.425=(1,2304,192)f32 #17694=(1,2304,192)f32 #17696=(1,2304,192)f32 nn.LayerNorm layers_mmsa.3.residual_group.blocks.4.norm1 1 1 17696 17723 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #17696=(1,2304,192)f32 #17723=(1,2304,192)f32 pnnx.Expression pnnx_expr_2227 2 1 17696 14094 17724 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #17696=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_2025 2 1 17723 17724 x.191 $input=17723 $shape=17724 #17723=(1,2304,192)f32 #x.191=(1,48,48,192)f32 pnnx.Expression pnnx_expr_2206 1 1 x.191 17741 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.191=(1,48,48,192)f32 pnnx.Expression pnnx_expr_2201 0 1 17743 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2026 2 1 x.191 17741 x0.119 $input=x.191 $shape=17741 #x.191=(1,48,48,192)f32 #x0.119=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_2198 1 1 x.191 17746 expr=[-1,8,8,int(size(@0,3))] #x.191=(1,48,48,192)f32 torch.permute torch.permute_2904 2 1 x0.119 17743 17744 $input=x0.119 $dims=17743 #x0.119=(1,6,8,6,8,192)f32 #17744=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_284 1 1 17744 17745 memory_format=torch.contiguous_format $input=17744 #17744=(1,6,6,8,8,192)f32 #17745=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_2196 1 1 17696 17748 expr=[-1,64,int(size(@0,2))] #17696=(1,2304,192)f32 pnnx.Expression pnnx_expr_2186 0 1 17759 expr=-2 pnnx.Expression pnnx_expr_2185 0 1 17760 expr=-1 pnnx.Attribute layers_mmsa.3.residual_group.blocks.4.attn 0 1 relative_position_bias_table.191 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.191=(225,6)f32 pnnx.Attribute pnnx_unique_649 0 1 relative_position_index.191 @relative_position_index=(64,64)i64 #relative_position_index.191=(64,64)i64 Tensor.view Tensor.view_2027 2 1 17745 17746 x_windows.191 $input=17745 $shape=17746 #17745=(1,6,6,8,8,192)f32 #x_windows.191=(36,8,8,192)f32 Tensor.view Tensor.view_2028 2 1 x_windows.191 17748 x1.119 $input=x_windows.191 $shape=17748 #x_windows.191=(36,8,8,192)f32 #x1.119=(36,64,192)f32 nn.Linear layers_mmsa.3.residual_group.blocks.4.attn.qkv 1 1 x1.119 17780 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.119=(36,64,192)f32 #17780=(36,64,576)f32 pnnx.Expression pnnx_expr_2170 1 1 x1.119 17783 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.119=(36,64,192)f32 pnnx.Expression pnnx_expr_2165 0 1 17785 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_622 2 1 17780 17783 17784 $input=17780 $shape=17783 #17780=(36,64,576)f32 #17784=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_2164 0 1 26039 expr=0 pnnx.Expression pnnx_expr_2163 0 1 26040 expr=0 pnnx.Expression pnnx_expr_2162 0 1 26041 expr=0 pnnx.Expression pnnx_expr_2161 0 1 26042 expr=1 pnnx.Expression pnnx_expr_2160 0 1 26043 expr=0 pnnx.Expression pnnx_expr_2159 0 1 26044 expr=2 torch.permute torch.permute_2905 2 1 17784 17785 qkv0.119 $input=17784 $dims=17785 #17784=(36,64,3,6,32)f32 #qkv0.119=(3,36,6,64,32)f32 Tensor.select Tensor.select_932 3 1 qkv0.119 26039 26040 q.191 $input=qkv0.119 $dim=26039 $index=26040 #qkv0.119=(3,36,6,64,32)f32 #q.191=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2158 1 1 q.191 q0.119 expr=mul(@0,1.767767e-01) #q.191=(36,6,64,32)f32 #q0.119=(36,6,64,32)f32 Tensor.select Tensor.select_933 3 1 qkv0.119 26041 26042 k.191 $input=qkv0.119 $dim=26041 $index=26042 #qkv0.119=(3,36,6,64,32)f32 #k.191=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2156 0 1 17793 expr=[-1] Tensor.view Tensor.view_2029 2 1 relative_position_index.191 17793 17794 $input=relative_position_index.191 $shape=17793 #relative_position_index.191=(64,64)i64 #17794=(4096)i64 pnnx.Expression pnnx_expr_2155 1 1 17794 17795 expr=[@0] #17794=(4096)i64 pnnx.Expression pnnx_expr_2152 0 1 17797 expr=[64,64,-1] Tensor.index Tensor.index_420 2 1 relative_position_bias_table.191 17795 17796 $input=relative_position_bias_table.191 $expr=17795 #relative_position_bias_table.191=(225,6)f32 #17796=(4096,6)f32 pnnx.Expression pnnx_expr_2148 0 1 17799 expr=[2,0,1] Tensor.view Tensor.view_2030 2 1 17796 17797 relative_position_bias.191 $input=17796 $shape=17797 #17796=(4096,6)f32 #relative_position_bias.191=(64,64,6)f32 pnnx.Expression pnnx_expr_2147 0 1 26052 expr=0 torch.permute torch.permute_2906 2 1 relative_position_bias.191 17799 17800 $input=relative_position_bias.191 $dims=17799 #relative_position_bias.191=(64,64,6)f32 #17800=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_285 1 1 17800 relative_position_bias0.119 memory_format=torch.contiguous_format $input=17800 #17800=(6,64,64)f32 #relative_position_bias0.119=(6,64,64)f32 torch.transpose torch.transpose_3183 3 1 k.191 17759 17760 17791 $input=k.191 $dim0=17759 $dim1=17760 #k.191=(36,6,64,32)f32 #17791=(36,6,32,64)f32 torch.matmul torch.matmul_2392 2 1 q0.119 17791 attn.383 $input=q0.119 $other=17791 #q0.119=(36,6,64,32)f32 #17791=(36,6,32,64)f32 #attn.383=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3406 2 1 relative_position_bias0.119 26052 17802 $input=relative_position_bias0.119 $dim=26052 #relative_position_bias0.119=(6,64,64)f32 #17802=(1,6,64,64)f32 pnnx.Expression pnnx_expr_2145 2 1 attn.383 17802 input.427 expr=add(@0,@1) #attn.383=(36,6,64,64)f32 #17802=(1,6,64,64)f32 #input.427=(36,6,64,64)f32 nn.Softmax layers_mmsa.3.residual_group.blocks.4.attn.softmax 1 1 input.427 17804 dim=-1 #input.427=(36,6,64,64)f32 #17804=(36,6,64,64)f32 nn.Dropout layers_mmsa.3.residual_group.blocks.4.attn.attn_drop 1 1 17804 17805 #17804=(36,6,64,64)f32 #17805=(36,6,64,64)f32 Tensor.select Tensor.select_934 3 1 qkv0.119 26043 26044 v.191 $input=qkv0.119 $dim=26043 $index=26044 #qkv0.119=(3,36,6,64,32)f32 #v.191=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2144 0 1 26054 expr=1 pnnx.Expression pnnx_expr_2143 0 1 26055 expr=2 torch.matmul torch.matmul_2393 2 1 17805 v.191 17806 $input=17805 $other=v.191 #17805=(36,6,64,64)f32 #v.191=(36,6,64,32)f32 #17806=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2142 1 1 x1.119 17808 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.119=(36,64,192)f32 torch.transpose torch.transpose_3184 3 1 17806 26054 26055 17807 $input=17806 $dim0=26054 $dim1=26055 #17806=(36,6,64,32)f32 #17807=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_623 2 1 17807 17808 input0.123 $input=17807 $shape=17808 #17807=(36,64,6,32)f32 #input0.123=(36,64,192)f32 nn.Linear layers_mmsa.3.residual_group.blocks.4.attn.proj 1 1 input0.123 17810 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.123=(36,64,192)f32 #17810=(36,64,192)f32 nn.Dropout layers_mmsa.3.residual_group.blocks.4.attn.proj_drop 1 1 17810 17811 #17810=(36,64,192)f32 #17811=(36,64,192)f32 pnnx.Expression pnnx_expr_2138 1 1 17696 17812 expr=[-1,8,8,int(size(@0,2))] #17696=(1,2304,192)f32 pnnx.Expression pnnx_expr_2125 1 1 14094 17818 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_2118 0 1 17820 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2031 2 1 17811 17812 windows.191 $input=17811 $shape=17812 #17811=(36,64,192)f32 #windows.191=(36,8,8,192)f32 Tensor.view Tensor.view_2032 2 1 windows.191 17818 x2.119 $input=windows.191 $shape=17818 #windows.191=(36,8,8,192)f32 #x2.119=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_2115 1 1 14094 17823 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2907 2 1 x2.119 17820 17821 $input=x2.119 $dims=17820 #x2.119=(1,6,6,8,8,192)f32 #17821=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_286 1 1 17821 17822 memory_format=torch.contiguous_format $input=17821 #17821=(1,6,8,6,8,192)f32 #17822=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_2112 2 1 17696 14094 17827 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #17696=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_2033 2 1 17822 17823 x3.119 $input=17822 $shape=17823 #17822=(1,6,8,6,8,192)f32 #x3.119=(1,48,48,192)f32 Tensor.view Tensor.view_2034 2 1 x3.119 17827 x4.119 $input=x3.119 $shape=17827 #x3.119=(1,48,48,192)f32 #x4.119=(1,2304,192)f32 pnnx.Expression pnnx_expr_2110 2 1 17696 x4.119 input.429 expr=add(@0,@1) #17696=(1,2304,192)f32 #x4.119=(1,2304,192)f32 #input.429=(1,2304,192)f32 nn.LayerNorm layers_mmsa.3.residual_group.blocks.4.norm2 1 1 input.429 17831 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.429=(1,2304,192)f32 #17831=(1,2304,192)f32 nn.Linear layers_mmsa.3.residual_group.blocks.4.mlp.fc1 1 1 17831 17836 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #17831=(1,2304,192)f32 #17836=(1,2304,384)f32 nn.GELU layers_mmsa.3.residual_group.blocks.4.mlp.act 1 1 17836 17837 #17836=(1,2304,384)f32 #17837=(1,2304,384)f32 nn.Dropout layers_mmsa.3.residual_group.blocks.4.mlp.drop 1 1 17837 17838 #17837=(1,2304,384)f32 #17838=(1,2304,384)f32 nn.Linear layers_mmsa.3.residual_group.blocks.4.mlp.fc2 1 1 17838 17839 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #17838=(1,2304,384)f32 #17839=(1,2304,192)f32 nn.Dropout pnnx_unique_650 1 1 17839 17840 #17839=(1,2304,192)f32 #17840=(1,2304,192)f32 pnnx.Expression pnnx_expr_2108 2 1 input.429 17840 17842 expr=add(@0,@1) #input.429=(1,2304,192)f32 #17840=(1,2304,192)f32 #17842=(1,2304,192)f32 pnnx.Attribute layers_mmsa.3.residual_group.blocks.5 0 1 attn_mask.97 @attn_mask=(36,64,64)f32 #attn_mask.97=(36,64,64)f32 nn.LayerNorm layers_mmsa.3.residual_group.blocks.5.norm1 1 1 17842 17871 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #17842=(1,2304,192)f32 #17871=(1,2304,192)f32 pnnx.Expression pnnx_expr_2085 2 1 17842 14094 17872 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #17842=(1,2304,192)f32 #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_2083 0 1 17874 expr=[-4,-4] pnnx.Expression pnnx_expr_2081 0 1 17875 expr=[1,2] Tensor.view Tensor.view_2035 2 1 17871 17872 x.193 $input=17871 $shape=17872 #17871=(1,2304,192)f32 #x.193=(1,48,48,192)f32 torch.roll torch.roll_2514 3 1 x.193 17874 17875 x0.121 $input=x.193 $shifts=17874 $dims=17875 #x.193=(1,48,48,192)f32 #x0.121=(1,48,48,192)f32 pnnx.Expression pnnx_expr_2059 1 1 x0.121 17892 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.121=(1,48,48,192)f32 pnnx.Expression pnnx_expr_2054 0 1 17894 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2036 2 1 x0.121 17892 x1.121 $input=x0.121 $shape=17892 #x0.121=(1,48,48,192)f32 #x1.121=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_2051 1 1 x0.121 17897 expr=[-1,8,8,int(size(@0,3))] #x0.121=(1,48,48,192)f32 torch.permute torch.permute_2908 2 1 x1.121 17894 17895 $input=x1.121 $dims=17894 #x1.121=(1,6,8,6,8,192)f32 #17895=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_287 1 1 17895 17896 memory_format=torch.contiguous_format $input=17895 #17895=(1,6,6,8,8,192)f32 #17896=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_2049 1 1 17842 17899 expr=[-1,64,int(size(@0,2))] #17842=(1,2304,192)f32 pnnx.Expression pnnx_expr_2039 0 1 17910 expr=-2 pnnx.Expression pnnx_expr_2038 0 1 17911 expr=-1 pnnx.Attribute layers_mmsa.3.residual_group.blocks.5.attn 0 1 relative_position_bias_table.193 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.193=(225,6)f32 pnnx.Attribute pnnx_unique_651 0 1 relative_position_index.193 @relative_position_index=(64,64)i64 #relative_position_index.193=(64,64)i64 Tensor.view Tensor.view_2037 2 1 17896 17897 x_windows.193 $input=17896 $shape=17897 #17896=(1,6,6,8,8,192)f32 #x_windows.193=(36,8,8,192)f32 Tensor.view Tensor.view_2038 2 1 x_windows.193 17899 x2.121 $input=x_windows.193 $shape=17899 #x_windows.193=(36,8,8,192)f32 #x2.121=(36,64,192)f32 nn.Linear layers_mmsa.3.residual_group.blocks.5.attn.qkv 1 1 x2.121 17935 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.121=(36,64,192)f32 #17935=(36,64,576)f32 pnnx.Expression pnnx_expr_2019 1 1 x2.121 17938 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.121=(36,64,192)f32 pnnx.Expression pnnx_expr_2014 0 1 17940 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_624 2 1 17935 17938 17939 $input=17935 $shape=17938 #17935=(36,64,576)f32 #17939=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_2013 0 1 26098 expr=0 pnnx.Expression pnnx_expr_2012 0 1 26099 expr=0 pnnx.Expression pnnx_expr_2011 0 1 26100 expr=0 pnnx.Expression pnnx_expr_2010 0 1 26101 expr=1 pnnx.Expression pnnx_expr_2009 0 1 26102 expr=0 pnnx.Expression pnnx_expr_2008 0 1 26103 expr=2 torch.permute torch.permute_2909 2 1 17939 17940 qkv0.121 $input=17939 $dims=17940 #17939=(36,64,3,6,32)f32 #qkv0.121=(3,36,6,64,32)f32 Tensor.select Tensor.select_935 3 1 qkv0.121 26098 26099 q.193 $input=qkv0.121 $dim=26098 $index=26099 #qkv0.121=(3,36,6,64,32)f32 #q.193=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2007 1 1 q.193 q0.121 expr=mul(@0,1.767767e-01) #q.193=(36,6,64,32)f32 #q0.121=(36,6,64,32)f32 Tensor.select Tensor.select_936 3 1 qkv0.121 26100 26101 k.193 $input=qkv0.121 $dim=26100 $index=26101 #qkv0.121=(3,36,6,64,32)f32 #k.193=(36,6,64,32)f32 pnnx.Expression pnnx_expr_2005 0 1 17948 expr=[-1] Tensor.view Tensor.view_2039 2 1 relative_position_index.193 17948 17949 $input=relative_position_index.193 $shape=17948 #relative_position_index.193=(64,64)i64 #17949=(4096)i64 pnnx.Expression pnnx_expr_2004 1 1 17949 17950 expr=[@0] #17949=(4096)i64 pnnx.Expression pnnx_expr_2001 0 1 17952 expr=[64,64,-1] Tensor.index Tensor.index_421 2 1 relative_position_bias_table.193 17950 17951 $input=relative_position_bias_table.193 $expr=17950 #relative_position_bias_table.193=(225,6)f32 #17951=(4096,6)f32 pnnx.Expression pnnx_expr_1997 0 1 17954 expr=[2,0,1] Tensor.view Tensor.view_2040 2 1 17951 17952 relative_position_bias.193 $input=17951 $shape=17952 #17951=(4096,6)f32 #relative_position_bias.193=(64,64,6)f32 pnnx.Expression pnnx_expr_1996 0 1 26111 expr=0 torch.permute torch.permute_2910 2 1 relative_position_bias.193 17954 17955 $input=relative_position_bias.193 $dims=17954 #relative_position_bias.193=(64,64,6)f32 #17955=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_288 1 1 17955 relative_position_bias0.121 memory_format=torch.contiguous_format $input=17955 #17955=(6,64,64)f32 #relative_position_bias0.121=(6,64,64)f32 torch.transpose torch.transpose_3185 3 1 k.193 17910 17911 17946 $input=k.193 $dim0=17910 $dim1=17911 #k.193=(36,6,64,32)f32 #17946=(36,6,32,64)f32 torch.matmul torch.matmul_2394 2 1 q0.121 17946 attn.387 $input=q0.121 $other=17946 #q0.121=(36,6,64,32)f32 #17946=(36,6,32,64)f32 #attn.387=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3407 2 1 relative_position_bias0.121 26111 17957 $input=relative_position_bias0.121 $dim=26111 #relative_position_bias0.121=(6,64,64)f32 #17957=(1,6,64,64)f32 pnnx.Expression pnnx_expr_1994 2 1 attn.387 17957 attn0.61 expr=add(@0,@1) #attn.387=(36,6,64,64)f32 #17957=(1,6,64,64)f32 #attn0.61=(36,6,64,64)f32 pnnx.Expression pnnx_expr_1985 2 1 x2.121 attn_mask.97 17964 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.121=(36,64,192)f32 #attn_mask.97=(36,64,64)f32 pnnx.Expression pnnx_expr_1984 0 1 26116 expr=1 pnnx.Expression pnnx_expr_1983 0 1 26117 expr=0 Tensor.view Tensor.view_2041 2 1 attn0.61 17964 17965 $input=attn0.61 $shape=17964 #attn0.61=(36,6,64,64)f32 #17965=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3408 2 1 attn_mask.97 26116 17966 $input=attn_mask.97 $dim=26116 #attn_mask.97=(36,64,64)f32 #17966=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3409 2 1 17966 26117 17967 $input=17966 $dim=26117 #17966=(36,1,64,64)f32 #17967=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_1981 2 1 17965 17967 attn1.61 expr=add(@0,@1) #17965=(1,36,6,64,64)f32 #17967=(1,36,1,64,64)f32 #attn1.61=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_1978 1 1 x2.121 17969 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.121=(36,64,192)f32 Tensor.view Tensor.view_2042 2 1 attn1.61 17969 input.431 $input=attn1.61 $shape=17969 #attn1.61=(1,36,6,64,64)f32 #input.431=(36,6,64,64)f32 nn.Softmax layers_mmsa.3.residual_group.blocks.5.attn.softmax 1 1 input.431 17971 dim=-1 #input.431=(36,6,64,64)f32 #17971=(36,6,64,64)f32 nn.Dropout layers_mmsa.3.residual_group.blocks.5.attn.attn_drop 1 1 17971 17972 #17971=(36,6,64,64)f32 #17972=(36,6,64,64)f32 Tensor.select Tensor.select_937 3 1 qkv0.121 26102 26103 v.193 $input=qkv0.121 $dim=26102 $index=26103 #qkv0.121=(3,36,6,64,32)f32 #v.193=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1977 0 1 26121 expr=1 pnnx.Expression pnnx_expr_1976 0 1 26122 expr=2 torch.matmul torch.matmul_2395 2 1 17972 v.193 17973 $input=17972 $other=v.193 #17972=(36,6,64,64)f32 #v.193=(36,6,64,32)f32 #17973=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1975 1 1 x2.121 17975 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.121=(36,64,192)f32 torch.transpose torch.transpose_3186 3 1 17973 26121 26122 17974 $input=17973 $dim0=26121 $dim1=26122 #17973=(36,6,64,32)f32 #17974=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_625 2 1 17974 17975 input0.125 $input=17974 $shape=17975 #17974=(36,64,6,32)f32 #input0.125=(36,64,192)f32 nn.Linear layers_mmsa.3.residual_group.blocks.5.attn.proj 1 1 input0.125 17977 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.125=(36,64,192)f32 #17977=(36,64,192)f32 nn.Dropout layers_mmsa.3.residual_group.blocks.5.attn.proj_drop 1 1 17977 17978 #17977=(36,64,192)f32 #17978=(36,64,192)f32 pnnx.Expression pnnx_expr_1971 1 1 17842 17979 expr=[-1,8,8,int(size(@0,2))] #17842=(1,2304,192)f32 pnnx.Expression pnnx_expr_1958 1 1 14094 17985 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_1951 0 1 17987 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2043 2 1 17978 17979 windows.193 $input=17978 $shape=17979 #17978=(36,64,192)f32 #windows.193=(36,8,8,192)f32 Tensor.view Tensor.view_2044 2 1 windows.193 17985 x3.121 $input=windows.193 $shape=17985 #windows.193=(36,8,8,192)f32 #x3.121=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_1948 1 1 14094 17990 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2911 2 1 x3.121 17987 17988 $input=x3.121 $dims=17987 #x3.121=(1,6,6,8,8,192)f32 #17988=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_289 1 1 17988 17989 memory_format=torch.contiguous_format $input=17988 #17988=(1,6,8,6,8,192)f32 #17989=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_1945 0 1 17992 expr=[4,4] pnnx.Expression pnnx_expr_1942 0 1 17993 expr=[1,2] Tensor.view Tensor.view_2045 2 1 17989 17990 shifted_x.97 $input=17989 $shape=17990 #17989=(1,6,8,6,8,192)f32 #shifted_x.97=(1,48,48,192)f32 pnnx.Expression pnnx_expr_1939 2 1 17842 14094 17997 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #17842=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.roll torch.roll_2515 3 1 shifted_x.97 17992 17993 x4.121 $input=shifted_x.97 $shifts=17992 $dims=17993 #shifted_x.97=(1,48,48,192)f32 #x4.121=(1,48,48,192)f32 Tensor.view Tensor.view_2046 2 1 x4.121 17997 x5.97 $input=x4.121 $shape=17997 #x4.121=(1,48,48,192)f32 #x5.97=(1,2304,192)f32 pnnx.Expression pnnx_expr_1937 2 1 17842 x5.97 input.433 expr=add(@0,@1) #17842=(1,2304,192)f32 #x5.97=(1,2304,192)f32 #input.433=(1,2304,192)f32 nn.LayerNorm layers_mmsa.3.residual_group.blocks.5.norm2 1 1 input.433 18001 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.433=(1,2304,192)f32 #18001=(1,2304,192)f32 nn.Linear layers_mmsa.3.residual_group.blocks.5.mlp.fc1 1 1 18001 18006 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #18001=(1,2304,192)f32 #18006=(1,2304,384)f32 nn.GELU layers_mmsa.3.residual_group.blocks.5.mlp.act 1 1 18006 18007 #18006=(1,2304,384)f32 #18007=(1,2304,384)f32 nn.Dropout layers_mmsa.3.residual_group.blocks.5.mlp.drop 1 1 18007 18008 #18007=(1,2304,384)f32 #18008=(1,2304,384)f32 nn.Linear layers_mmsa.3.residual_group.blocks.5.mlp.fc2 1 1 18008 18009 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #18008=(1,2304,384)f32 #18009=(1,2304,192)f32 nn.Dropout pnnx_unique_652 1 1 18009 18010 #18009=(1,2304,192)f32 #18010=(1,2304,192)f32 pnnx.Expression pnnx_expr_1935 2 1 input.433 18010 18012 expr=add(@0,@1) #input.433=(1,2304,192)f32 #18010=(1,2304,192)f32 #18012=(1,2304,192)f32 pnnx.Expression pnnx_expr_1933 0 1 18014 expr=1 pnnx.Expression pnnx_expr_1932 0 1 18015 expr=2 pnnx.Expression pnnx_expr_1927 2 1 18012 14094 18021 expr=[int(size(@0,0)),192,int(size(@1,2)),int(size(@1,3))] #18012=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.transpose torch.transpose_3187 3 1 18012 18014 18015 18020 $input=18012 $dim0=18014 $dim1=18015 #18012=(1,2304,192)f32 #18020=(1,192,2304)f32 Tensor.view Tensor.view_2047 2 1 18020 18021 input.435 $input=18020 $shape=18021 #18020=(1,192,2304)f32 #input.435=(1,192,48,48)f32 nn.Conv2d layers_mmsa.3.conv 1 1 input.435 18023 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(192)f32 @weight=(192,192,3,3)f32 #input.435=(1,192,48,48)f32 #18023=(1,192,48,48)f32 pnnx.Expression pnnx_expr_1926 0 1 18024 expr=-1 pnnx.Expression pnnx_expr_1925 0 1 18025 expr=2 pnnx.Expression pnnx_expr_1924 0 1 18026 expr=1 pnnx.Expression pnnx_expr_1923 0 1 26149 expr=2 torch.flatten torch.flatten_2201 3 1 18023 18025 18024 18027 $input=18023 $start_dim=18025 $end_dim=18024 #18023=(1,192,48,48)f32 #18027=(1,192,2304)f32 torch.transpose torch.transpose_3188 3 1 18027 18026 26149 18028 $input=18027 $dim0=18026 $dim1=26149 #18027=(1,192,2304)f32 #18028=(1,2304,192)f32 pnnx.Expression pnnx_expr_1922 2 1 18028 17047 18029 expr=add(@0,@1) #18028=(1,2304,192)f32 #17047=(1,2304,192)f32 #18029=(1,2304,192)f32 nn.LayerNorm layers_mmsa.4.residual_group.blocks.0.norm1 1 1 18029 18073 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #18029=(1,2304,192)f32 #18073=(1,2304,192)f32 pnnx.Expression pnnx_expr_1899 2 1 18029 14094 18074 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #18029=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_2048 2 1 18073 18074 x.195 $input=18073 $shape=18074 #18073=(1,2304,192)f32 #x.195=(1,48,48,192)f32 pnnx.Expression pnnx_expr_1878 1 1 x.195 18091 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.195=(1,48,48,192)f32 pnnx.Expression pnnx_expr_1873 0 1 18093 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2049 2 1 x.195 18091 x0.123 $input=x.195 $shape=18091 #x.195=(1,48,48,192)f32 #x0.123=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_1870 1 1 x.195 18096 expr=[-1,8,8,int(size(@0,3))] #x.195=(1,48,48,192)f32 torch.permute torch.permute_2912 2 1 x0.123 18093 18094 $input=x0.123 $dims=18093 #x0.123=(1,6,8,6,8,192)f32 #18094=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_290 1 1 18094 18095 memory_format=torch.contiguous_format $input=18094 #18094=(1,6,6,8,8,192)f32 #18095=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_1868 1 1 18029 18098 expr=[-1,64,int(size(@0,2))] #18029=(1,2304,192)f32 pnnx.Expression pnnx_expr_1858 0 1 18109 expr=-2 pnnx.Expression pnnx_expr_1857 0 1 18110 expr=-1 pnnx.Attribute layers_mmsa.4.residual_group.blocks.0.attn 0 1 relative_position_bias_table.195 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.195=(225,6)f32 pnnx.Attribute pnnx_unique_653 0 1 relative_position_index.195 @relative_position_index=(64,64)i64 #relative_position_index.195=(64,64)i64 Tensor.view Tensor.view_2050 2 1 18095 18096 x_windows.195 $input=18095 $shape=18096 #18095=(1,6,6,8,8,192)f32 #x_windows.195=(36,8,8,192)f32 Tensor.view Tensor.view_2051 2 1 x_windows.195 18098 x1.123 $input=x_windows.195 $shape=18098 #x_windows.195=(36,8,8,192)f32 #x1.123=(36,64,192)f32 nn.Linear layers_mmsa.4.residual_group.blocks.0.attn.qkv 1 1 x1.123 18130 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.123=(36,64,192)f32 #18130=(36,64,576)f32 pnnx.Expression pnnx_expr_1842 1 1 x1.123 18133 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.123=(36,64,192)f32 pnnx.Expression pnnx_expr_1837 0 1 18135 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_626 2 1 18130 18133 18134 $input=18130 $shape=18133 #18130=(36,64,576)f32 #18134=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_1836 0 1 26167 expr=0 pnnx.Expression pnnx_expr_1835 0 1 26168 expr=0 pnnx.Expression pnnx_expr_1834 0 1 26169 expr=0 pnnx.Expression pnnx_expr_1833 0 1 26170 expr=1 pnnx.Expression pnnx_expr_1832 0 1 26171 expr=0 pnnx.Expression pnnx_expr_1831 0 1 26172 expr=2 torch.permute torch.permute_2913 2 1 18134 18135 qkv0.123 $input=18134 $dims=18135 #18134=(36,64,3,6,32)f32 #qkv0.123=(3,36,6,64,32)f32 Tensor.select Tensor.select_938 3 1 qkv0.123 26167 26168 q.195 $input=qkv0.123 $dim=26167 $index=26168 #qkv0.123=(3,36,6,64,32)f32 #q.195=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1830 1 1 q.195 q0.123 expr=mul(@0,1.767767e-01) #q.195=(36,6,64,32)f32 #q0.123=(36,6,64,32)f32 Tensor.select Tensor.select_939 3 1 qkv0.123 26169 26170 k.195 $input=qkv0.123 $dim=26169 $index=26170 #qkv0.123=(3,36,6,64,32)f32 #k.195=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1828 0 1 18143 expr=[-1] Tensor.view Tensor.view_2052 2 1 relative_position_index.195 18143 18144 $input=relative_position_index.195 $shape=18143 #relative_position_index.195=(64,64)i64 #18144=(4096)i64 pnnx.Expression pnnx_expr_1827 1 1 18144 18145 expr=[@0] #18144=(4096)i64 pnnx.Expression pnnx_expr_1824 0 1 18147 expr=[64,64,-1] Tensor.index Tensor.index_422 2 1 relative_position_bias_table.195 18145 18146 $input=relative_position_bias_table.195 $expr=18145 #relative_position_bias_table.195=(225,6)f32 #18146=(4096,6)f32 pnnx.Expression pnnx_expr_1820 0 1 18149 expr=[2,0,1] Tensor.view Tensor.view_2053 2 1 18146 18147 relative_position_bias.195 $input=18146 $shape=18147 #18146=(4096,6)f32 #relative_position_bias.195=(64,64,6)f32 pnnx.Expression pnnx_expr_1819 0 1 26180 expr=0 torch.permute torch.permute_2914 2 1 relative_position_bias.195 18149 18150 $input=relative_position_bias.195 $dims=18149 #relative_position_bias.195=(64,64,6)f32 #18150=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_291 1 1 18150 relative_position_bias0.123 memory_format=torch.contiguous_format $input=18150 #18150=(6,64,64)f32 #relative_position_bias0.123=(6,64,64)f32 torch.transpose torch.transpose_3189 3 1 k.195 18109 18110 18141 $input=k.195 $dim0=18109 $dim1=18110 #k.195=(36,6,64,32)f32 #18141=(36,6,32,64)f32 torch.matmul torch.matmul_2396 2 1 q0.123 18141 attn.391 $input=q0.123 $other=18141 #q0.123=(36,6,64,32)f32 #18141=(36,6,32,64)f32 #attn.391=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3410 2 1 relative_position_bias0.123 26180 18152 $input=relative_position_bias0.123 $dim=26180 #relative_position_bias0.123=(6,64,64)f32 #18152=(1,6,64,64)f32 pnnx.Expression pnnx_expr_1817 2 1 attn.391 18152 input.437 expr=add(@0,@1) #attn.391=(36,6,64,64)f32 #18152=(1,6,64,64)f32 #input.437=(36,6,64,64)f32 nn.Softmax layers_mmsa.4.residual_group.blocks.0.attn.softmax 1 1 input.437 18154 dim=-1 #input.437=(36,6,64,64)f32 #18154=(36,6,64,64)f32 nn.Dropout layers_mmsa.4.residual_group.blocks.0.attn.attn_drop 1 1 18154 18155 #18154=(36,6,64,64)f32 #18155=(36,6,64,64)f32 Tensor.select Tensor.select_940 3 1 qkv0.123 26171 26172 v.195 $input=qkv0.123 $dim=26171 $index=26172 #qkv0.123=(3,36,6,64,32)f32 #v.195=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1816 0 1 26182 expr=1 pnnx.Expression pnnx_expr_1815 0 1 26183 expr=2 torch.matmul torch.matmul_2397 2 1 18155 v.195 18156 $input=18155 $other=v.195 #18155=(36,6,64,64)f32 #v.195=(36,6,64,32)f32 #18156=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1814 1 1 x1.123 18158 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.123=(36,64,192)f32 torch.transpose torch.transpose_3190 3 1 18156 26182 26183 18157 $input=18156 $dim0=26182 $dim1=26183 #18156=(36,6,64,32)f32 #18157=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_627 2 1 18157 18158 input0.127 $input=18157 $shape=18158 #18157=(36,64,6,32)f32 #input0.127=(36,64,192)f32 nn.Linear layers_mmsa.4.residual_group.blocks.0.attn.proj 1 1 input0.127 18160 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.127=(36,64,192)f32 #18160=(36,64,192)f32 nn.Dropout layers_mmsa.4.residual_group.blocks.0.attn.proj_drop 1 1 18160 18161 #18160=(36,64,192)f32 #18161=(36,64,192)f32 pnnx.Expression pnnx_expr_1810 1 1 18029 18162 expr=[-1,8,8,int(size(@0,2))] #18029=(1,2304,192)f32 pnnx.Expression pnnx_expr_1797 1 1 14094 18168 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_1790 0 1 18170 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2054 2 1 18161 18162 windows.195 $input=18161 $shape=18162 #18161=(36,64,192)f32 #windows.195=(36,8,8,192)f32 Tensor.view Tensor.view_2055 2 1 windows.195 18168 x2.123 $input=windows.195 $shape=18168 #windows.195=(36,8,8,192)f32 #x2.123=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_1787 1 1 14094 18173 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2915 2 1 x2.123 18170 18171 $input=x2.123 $dims=18170 #x2.123=(1,6,6,8,8,192)f32 #18171=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_292 1 1 18171 18172 memory_format=torch.contiguous_format $input=18171 #18171=(1,6,8,6,8,192)f32 #18172=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_1784 2 1 18029 14094 18177 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #18029=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_2056 2 1 18172 18173 x3.123 $input=18172 $shape=18173 #18172=(1,6,8,6,8,192)f32 #x3.123=(1,48,48,192)f32 Tensor.view Tensor.view_2057 2 1 x3.123 18177 x4.123 $input=x3.123 $shape=18177 #x3.123=(1,48,48,192)f32 #x4.123=(1,2304,192)f32 pnnx.Expression pnnx_expr_1782 2 1 18029 x4.123 input.439 expr=add(@0,@1) #18029=(1,2304,192)f32 #x4.123=(1,2304,192)f32 #input.439=(1,2304,192)f32 nn.LayerNorm layers_mmsa.4.residual_group.blocks.0.norm2 1 1 input.439 18181 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.439=(1,2304,192)f32 #18181=(1,2304,192)f32 nn.Linear layers_mmsa.4.residual_group.blocks.0.mlp.fc1 1 1 18181 18186 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #18181=(1,2304,192)f32 #18186=(1,2304,384)f32 nn.GELU layers_mmsa.4.residual_group.blocks.0.mlp.act 1 1 18186 18187 #18186=(1,2304,384)f32 #18187=(1,2304,384)f32 nn.Dropout layers_mmsa.4.residual_group.blocks.0.mlp.drop 1 1 18187 18188 #18187=(1,2304,384)f32 #18188=(1,2304,384)f32 nn.Linear layers_mmsa.4.residual_group.blocks.0.mlp.fc2 1 1 18188 18189 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #18188=(1,2304,384)f32 #18189=(1,2304,192)f32 nn.Dropout pnnx_unique_654 1 1 18189 18190 #18189=(1,2304,192)f32 #18190=(1,2304,192)f32 pnnx.Expression pnnx_expr_1780 2 1 input.439 18190 18192 expr=add(@0,@1) #input.439=(1,2304,192)f32 #18190=(1,2304,192)f32 #18192=(1,2304,192)f32 pnnx.Attribute layers_mmsa.4.residual_group.blocks.1 0 1 attn_mask.99 @attn_mask=(36,64,64)f32 #attn_mask.99=(36,64,64)f32 nn.LayerNorm layers_mmsa.4.residual_group.blocks.1.norm1 1 1 18192 18221 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #18192=(1,2304,192)f32 #18221=(1,2304,192)f32 pnnx.Expression pnnx_expr_1757 2 1 18192 14094 18222 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #18192=(1,2304,192)f32 #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_1755 0 1 18224 expr=[-4,-4] pnnx.Expression pnnx_expr_1753 0 1 18225 expr=[1,2] Tensor.view Tensor.view_2058 2 1 18221 18222 x.197 $input=18221 $shape=18222 #18221=(1,2304,192)f32 #x.197=(1,48,48,192)f32 torch.roll torch.roll_2516 3 1 x.197 18224 18225 x0.125 $input=x.197 $shifts=18224 $dims=18225 #x.197=(1,48,48,192)f32 #x0.125=(1,48,48,192)f32 pnnx.Expression pnnx_expr_1731 1 1 x0.125 18242 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.125=(1,48,48,192)f32 pnnx.Expression pnnx_expr_1726 0 1 18244 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2059 2 1 x0.125 18242 x1.125 $input=x0.125 $shape=18242 #x0.125=(1,48,48,192)f32 #x1.125=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_1723 1 1 x0.125 18247 expr=[-1,8,8,int(size(@0,3))] #x0.125=(1,48,48,192)f32 torch.permute torch.permute_2916 2 1 x1.125 18244 18245 $input=x1.125 $dims=18244 #x1.125=(1,6,8,6,8,192)f32 #18245=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_293 1 1 18245 18246 memory_format=torch.contiguous_format $input=18245 #18245=(1,6,6,8,8,192)f32 #18246=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_1721 1 1 18192 18249 expr=[-1,64,int(size(@0,2))] #18192=(1,2304,192)f32 pnnx.Expression pnnx_expr_1711 0 1 18260 expr=-2 pnnx.Expression pnnx_expr_1710 0 1 18261 expr=-1 pnnx.Attribute layers_mmsa.4.residual_group.blocks.1.attn 0 1 relative_position_bias_table.197 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.197=(225,6)f32 pnnx.Attribute pnnx_unique_655 0 1 relative_position_index.197 @relative_position_index=(64,64)i64 #relative_position_index.197=(64,64)i64 Tensor.view Tensor.view_2060 2 1 18246 18247 x_windows.197 $input=18246 $shape=18247 #18246=(1,6,6,8,8,192)f32 #x_windows.197=(36,8,8,192)f32 Tensor.view Tensor.view_2061 2 1 x_windows.197 18249 x2.125 $input=x_windows.197 $shape=18249 #x_windows.197=(36,8,8,192)f32 #x2.125=(36,64,192)f32 nn.Linear layers_mmsa.4.residual_group.blocks.1.attn.qkv 1 1 x2.125 18285 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.125=(36,64,192)f32 #18285=(36,64,576)f32 pnnx.Expression pnnx_expr_1691 1 1 x2.125 18288 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.125=(36,64,192)f32 pnnx.Expression pnnx_expr_1686 0 1 18290 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_628 2 1 18285 18288 18289 $input=18285 $shape=18288 #18285=(36,64,576)f32 #18289=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_1685 0 1 26226 expr=0 pnnx.Expression pnnx_expr_1684 0 1 26227 expr=0 pnnx.Expression pnnx_expr_1683 0 1 26228 expr=0 pnnx.Expression pnnx_expr_1682 0 1 26229 expr=1 pnnx.Expression pnnx_expr_1681 0 1 26230 expr=0 pnnx.Expression pnnx_expr_1680 0 1 26231 expr=2 torch.permute torch.permute_2917 2 1 18289 18290 qkv0.125 $input=18289 $dims=18290 #18289=(36,64,3,6,32)f32 #qkv0.125=(3,36,6,64,32)f32 Tensor.select Tensor.select_941 3 1 qkv0.125 26226 26227 q.197 $input=qkv0.125 $dim=26226 $index=26227 #qkv0.125=(3,36,6,64,32)f32 #q.197=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1679 1 1 q.197 q0.125 expr=mul(@0,1.767767e-01) #q.197=(36,6,64,32)f32 #q0.125=(36,6,64,32)f32 Tensor.select Tensor.select_942 3 1 qkv0.125 26228 26229 k.197 $input=qkv0.125 $dim=26228 $index=26229 #qkv0.125=(3,36,6,64,32)f32 #k.197=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1677 0 1 18298 expr=[-1] Tensor.view Tensor.view_2062 2 1 relative_position_index.197 18298 18299 $input=relative_position_index.197 $shape=18298 #relative_position_index.197=(64,64)i64 #18299=(4096)i64 pnnx.Expression pnnx_expr_1676 1 1 18299 18300 expr=[@0] #18299=(4096)i64 pnnx.Expression pnnx_expr_1673 0 1 18302 expr=[64,64,-1] Tensor.index Tensor.index_423 2 1 relative_position_bias_table.197 18300 18301 $input=relative_position_bias_table.197 $expr=18300 #relative_position_bias_table.197=(225,6)f32 #18301=(4096,6)f32 pnnx.Expression pnnx_expr_1669 0 1 18304 expr=[2,0,1] Tensor.view Tensor.view_2063 2 1 18301 18302 relative_position_bias.197 $input=18301 $shape=18302 #18301=(4096,6)f32 #relative_position_bias.197=(64,64,6)f32 pnnx.Expression pnnx_expr_1668 0 1 26239 expr=0 torch.permute torch.permute_2918 2 1 relative_position_bias.197 18304 18305 $input=relative_position_bias.197 $dims=18304 #relative_position_bias.197=(64,64,6)f32 #18305=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_294 1 1 18305 relative_position_bias0.125 memory_format=torch.contiguous_format $input=18305 #18305=(6,64,64)f32 #relative_position_bias0.125=(6,64,64)f32 torch.transpose torch.transpose_3191 3 1 k.197 18260 18261 18296 $input=k.197 $dim0=18260 $dim1=18261 #k.197=(36,6,64,32)f32 #18296=(36,6,32,64)f32 torch.matmul torch.matmul_2398 2 1 q0.125 18296 attn.395 $input=q0.125 $other=18296 #q0.125=(36,6,64,32)f32 #18296=(36,6,32,64)f32 #attn.395=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3411 2 1 relative_position_bias0.125 26239 18307 $input=relative_position_bias0.125 $dim=26239 #relative_position_bias0.125=(6,64,64)f32 #18307=(1,6,64,64)f32 pnnx.Expression pnnx_expr_1666 2 1 attn.395 18307 attn0.63 expr=add(@0,@1) #attn.395=(36,6,64,64)f32 #18307=(1,6,64,64)f32 #attn0.63=(36,6,64,64)f32 pnnx.Expression pnnx_expr_1657 2 1 x2.125 attn_mask.99 18314 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.125=(36,64,192)f32 #attn_mask.99=(36,64,64)f32 pnnx.Expression pnnx_expr_1656 0 1 26244 expr=1 pnnx.Expression pnnx_expr_1655 0 1 26245 expr=0 Tensor.view Tensor.view_2064 2 1 attn0.63 18314 18315 $input=attn0.63 $shape=18314 #attn0.63=(36,6,64,64)f32 #18315=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3412 2 1 attn_mask.99 26244 18316 $input=attn_mask.99 $dim=26244 #attn_mask.99=(36,64,64)f32 #18316=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3413 2 1 18316 26245 18317 $input=18316 $dim=26245 #18316=(36,1,64,64)f32 #18317=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_1653 2 1 18315 18317 attn1.63 expr=add(@0,@1) #18315=(1,36,6,64,64)f32 #18317=(1,36,1,64,64)f32 #attn1.63=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_1650 1 1 x2.125 18319 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.125=(36,64,192)f32 Tensor.view Tensor.view_2065 2 1 attn1.63 18319 input.441 $input=attn1.63 $shape=18319 #attn1.63=(1,36,6,64,64)f32 #input.441=(36,6,64,64)f32 nn.Softmax layers_mmsa.4.residual_group.blocks.1.attn.softmax 1 1 input.441 18321 dim=-1 #input.441=(36,6,64,64)f32 #18321=(36,6,64,64)f32 nn.Dropout layers_mmsa.4.residual_group.blocks.1.attn.attn_drop 1 1 18321 18322 #18321=(36,6,64,64)f32 #18322=(36,6,64,64)f32 Tensor.select Tensor.select_943 3 1 qkv0.125 26230 26231 v.197 $input=qkv0.125 $dim=26230 $index=26231 #qkv0.125=(3,36,6,64,32)f32 #v.197=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1649 0 1 26249 expr=1 pnnx.Expression pnnx_expr_1648 0 1 26250 expr=2 torch.matmul torch.matmul_2399 2 1 18322 v.197 18323 $input=18322 $other=v.197 #18322=(36,6,64,64)f32 #v.197=(36,6,64,32)f32 #18323=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1647 1 1 x2.125 18325 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.125=(36,64,192)f32 torch.transpose torch.transpose_3192 3 1 18323 26249 26250 18324 $input=18323 $dim0=26249 $dim1=26250 #18323=(36,6,64,32)f32 #18324=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_629 2 1 18324 18325 input0.129 $input=18324 $shape=18325 #18324=(36,64,6,32)f32 #input0.129=(36,64,192)f32 nn.Linear layers_mmsa.4.residual_group.blocks.1.attn.proj 1 1 input0.129 18327 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.129=(36,64,192)f32 #18327=(36,64,192)f32 nn.Dropout layers_mmsa.4.residual_group.blocks.1.attn.proj_drop 1 1 18327 18328 #18327=(36,64,192)f32 #18328=(36,64,192)f32 pnnx.Expression pnnx_expr_1643 1 1 18192 18329 expr=[-1,8,8,int(size(@0,2))] #18192=(1,2304,192)f32 pnnx.Expression pnnx_expr_1630 1 1 14094 18335 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_1623 0 1 18337 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2066 2 1 18328 18329 windows.197 $input=18328 $shape=18329 #18328=(36,64,192)f32 #windows.197=(36,8,8,192)f32 Tensor.view Tensor.view_2067 2 1 windows.197 18335 x3.125 $input=windows.197 $shape=18335 #windows.197=(36,8,8,192)f32 #x3.125=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_1620 1 1 14094 18340 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2919 2 1 x3.125 18337 18338 $input=x3.125 $dims=18337 #x3.125=(1,6,6,8,8,192)f32 #18338=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_295 1 1 18338 18339 memory_format=torch.contiguous_format $input=18338 #18338=(1,6,8,6,8,192)f32 #18339=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_1617 0 1 18342 expr=[4,4] pnnx.Expression pnnx_expr_1614 0 1 18343 expr=[1,2] Tensor.view Tensor.view_2068 2 1 18339 18340 shifted_x.99 $input=18339 $shape=18340 #18339=(1,6,8,6,8,192)f32 #shifted_x.99=(1,48,48,192)f32 pnnx.Expression pnnx_expr_1611 2 1 18192 14094 18347 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #18192=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.roll torch.roll_2517 3 1 shifted_x.99 18342 18343 x4.125 $input=shifted_x.99 $shifts=18342 $dims=18343 #shifted_x.99=(1,48,48,192)f32 #x4.125=(1,48,48,192)f32 Tensor.view Tensor.view_2069 2 1 x4.125 18347 x5.99 $input=x4.125 $shape=18347 #x4.125=(1,48,48,192)f32 #x5.99=(1,2304,192)f32 pnnx.Expression pnnx_expr_1609 2 1 18192 x5.99 input.443 expr=add(@0,@1) #18192=(1,2304,192)f32 #x5.99=(1,2304,192)f32 #input.443=(1,2304,192)f32 nn.LayerNorm layers_mmsa.4.residual_group.blocks.1.norm2 1 1 input.443 18351 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.443=(1,2304,192)f32 #18351=(1,2304,192)f32 nn.Linear layers_mmsa.4.residual_group.blocks.1.mlp.fc1 1 1 18351 18356 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #18351=(1,2304,192)f32 #18356=(1,2304,384)f32 nn.GELU layers_mmsa.4.residual_group.blocks.1.mlp.act 1 1 18356 18357 #18356=(1,2304,384)f32 #18357=(1,2304,384)f32 nn.Dropout layers_mmsa.4.residual_group.blocks.1.mlp.drop 1 1 18357 18358 #18357=(1,2304,384)f32 #18358=(1,2304,384)f32 nn.Linear layers_mmsa.4.residual_group.blocks.1.mlp.fc2 1 1 18358 18359 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #18358=(1,2304,384)f32 #18359=(1,2304,192)f32 nn.Dropout pnnx_unique_656 1 1 18359 18360 #18359=(1,2304,192)f32 #18360=(1,2304,192)f32 pnnx.Expression pnnx_expr_1607 2 1 input.443 18360 18362 expr=add(@0,@1) #input.443=(1,2304,192)f32 #18360=(1,2304,192)f32 #18362=(1,2304,192)f32 nn.LayerNorm layers_mmsa.4.residual_group.blocks.2.norm1 1 1 18362 18389 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #18362=(1,2304,192)f32 #18389=(1,2304,192)f32 pnnx.Expression pnnx_expr_1585 2 1 18362 14094 18390 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #18362=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_2070 2 1 18389 18390 x.199 $input=18389 $shape=18390 #18389=(1,2304,192)f32 #x.199=(1,48,48,192)f32 pnnx.Expression pnnx_expr_1564 1 1 x.199 18407 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.199=(1,48,48,192)f32 pnnx.Expression pnnx_expr_1559 0 1 18409 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2071 2 1 x.199 18407 x0.127 $input=x.199 $shape=18407 #x.199=(1,48,48,192)f32 #x0.127=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_1556 1 1 x.199 18412 expr=[-1,8,8,int(size(@0,3))] #x.199=(1,48,48,192)f32 torch.permute torch.permute_2920 2 1 x0.127 18409 18410 $input=x0.127 $dims=18409 #x0.127=(1,6,8,6,8,192)f32 #18410=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_296 1 1 18410 18411 memory_format=torch.contiguous_format $input=18410 #18410=(1,6,6,8,8,192)f32 #18411=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_1554 1 1 18362 18414 expr=[-1,64,int(size(@0,2))] #18362=(1,2304,192)f32 pnnx.Expression pnnx_expr_1544 0 1 18425 expr=-2 pnnx.Expression pnnx_expr_1543 0 1 18426 expr=-1 pnnx.Attribute layers_mmsa.4.residual_group.blocks.2.attn 0 1 relative_position_bias_table.199 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.199=(225,6)f32 pnnx.Attribute pnnx_unique_657 0 1 relative_position_index.199 @relative_position_index=(64,64)i64 #relative_position_index.199=(64,64)i64 Tensor.view Tensor.view_2072 2 1 18411 18412 x_windows.199 $input=18411 $shape=18412 #18411=(1,6,6,8,8,192)f32 #x_windows.199=(36,8,8,192)f32 Tensor.view Tensor.view_2073 2 1 x_windows.199 18414 x1.127 $input=x_windows.199 $shape=18414 #x_windows.199=(36,8,8,192)f32 #x1.127=(36,64,192)f32 nn.Linear layers_mmsa.4.residual_group.blocks.2.attn.qkv 1 1 x1.127 18446 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.127=(36,64,192)f32 #18446=(36,64,576)f32 pnnx.Expression pnnx_expr_1528 1 1 x1.127 18449 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.127=(36,64,192)f32 pnnx.Expression pnnx_expr_1523 0 1 18451 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_630 2 1 18446 18449 18450 $input=18446 $shape=18449 #18446=(36,64,576)f32 #18450=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_1522 0 1 26294 expr=0 pnnx.Expression pnnx_expr_1521 0 1 26295 expr=0 pnnx.Expression pnnx_expr_1520 0 1 26296 expr=0 pnnx.Expression pnnx_expr_1519 0 1 26297 expr=1 pnnx.Expression pnnx_expr_1518 0 1 26298 expr=0 pnnx.Expression pnnx_expr_1517 0 1 26299 expr=2 torch.permute torch.permute_2921 2 1 18450 18451 qkv0.127 $input=18450 $dims=18451 #18450=(36,64,3,6,32)f32 #qkv0.127=(3,36,6,64,32)f32 Tensor.select Tensor.select_944 3 1 qkv0.127 26294 26295 q.199 $input=qkv0.127 $dim=26294 $index=26295 #qkv0.127=(3,36,6,64,32)f32 #q.199=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1516 1 1 q.199 q0.127 expr=mul(@0,1.767767e-01) #q.199=(36,6,64,32)f32 #q0.127=(36,6,64,32)f32 Tensor.select Tensor.select_945 3 1 qkv0.127 26296 26297 k.199 $input=qkv0.127 $dim=26296 $index=26297 #qkv0.127=(3,36,6,64,32)f32 #k.199=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1514 0 1 18459 expr=[-1] Tensor.view Tensor.view_2074 2 1 relative_position_index.199 18459 18460 $input=relative_position_index.199 $shape=18459 #relative_position_index.199=(64,64)i64 #18460=(4096)i64 pnnx.Expression pnnx_expr_1513 1 1 18460 18461 expr=[@0] #18460=(4096)i64 pnnx.Expression pnnx_expr_1510 0 1 18463 expr=[64,64,-1] Tensor.index Tensor.index_424 2 1 relative_position_bias_table.199 18461 18462 $input=relative_position_bias_table.199 $expr=18461 #relative_position_bias_table.199=(225,6)f32 #18462=(4096,6)f32 pnnx.Expression pnnx_expr_1506 0 1 18465 expr=[2,0,1] Tensor.view Tensor.view_2075 2 1 18462 18463 relative_position_bias.199 $input=18462 $shape=18463 #18462=(4096,6)f32 #relative_position_bias.199=(64,64,6)f32 pnnx.Expression pnnx_expr_1505 0 1 26307 expr=0 torch.permute torch.permute_2922 2 1 relative_position_bias.199 18465 18466 $input=relative_position_bias.199 $dims=18465 #relative_position_bias.199=(64,64,6)f32 #18466=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_297 1 1 18466 relative_position_bias0.127 memory_format=torch.contiguous_format $input=18466 #18466=(6,64,64)f32 #relative_position_bias0.127=(6,64,64)f32 torch.transpose torch.transpose_3193 3 1 k.199 18425 18426 18457 $input=k.199 $dim0=18425 $dim1=18426 #k.199=(36,6,64,32)f32 #18457=(36,6,32,64)f32 torch.matmul torch.matmul_2400 2 1 q0.127 18457 attn.399 $input=q0.127 $other=18457 #q0.127=(36,6,64,32)f32 #18457=(36,6,32,64)f32 #attn.399=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3414 2 1 relative_position_bias0.127 26307 18468 $input=relative_position_bias0.127 $dim=26307 #relative_position_bias0.127=(6,64,64)f32 #18468=(1,6,64,64)f32 pnnx.Expression pnnx_expr_1503 2 1 attn.399 18468 input.445 expr=add(@0,@1) #attn.399=(36,6,64,64)f32 #18468=(1,6,64,64)f32 #input.445=(36,6,64,64)f32 nn.Softmax layers_mmsa.4.residual_group.blocks.2.attn.softmax 1 1 input.445 18470 dim=-1 #input.445=(36,6,64,64)f32 #18470=(36,6,64,64)f32 nn.Dropout layers_mmsa.4.residual_group.blocks.2.attn.attn_drop 1 1 18470 18471 #18470=(36,6,64,64)f32 #18471=(36,6,64,64)f32 Tensor.select Tensor.select_946 3 1 qkv0.127 26298 26299 v.199 $input=qkv0.127 $dim=26298 $index=26299 #qkv0.127=(3,36,6,64,32)f32 #v.199=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1502 0 1 26309 expr=1 pnnx.Expression pnnx_expr_1501 0 1 26310 expr=2 torch.matmul torch.matmul_2401 2 1 18471 v.199 18472 $input=18471 $other=v.199 #18471=(36,6,64,64)f32 #v.199=(36,6,64,32)f32 #18472=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1500 1 1 x1.127 18474 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.127=(36,64,192)f32 torch.transpose torch.transpose_3194 3 1 18472 26309 26310 18473 $input=18472 $dim0=26309 $dim1=26310 #18472=(36,6,64,32)f32 #18473=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_631 2 1 18473 18474 input0.131 $input=18473 $shape=18474 #18473=(36,64,6,32)f32 #input0.131=(36,64,192)f32 nn.Linear layers_mmsa.4.residual_group.blocks.2.attn.proj 1 1 input0.131 18476 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.131=(36,64,192)f32 #18476=(36,64,192)f32 nn.Dropout layers_mmsa.4.residual_group.blocks.2.attn.proj_drop 1 1 18476 18477 #18476=(36,64,192)f32 #18477=(36,64,192)f32 pnnx.Expression pnnx_expr_1496 1 1 18362 18478 expr=[-1,8,8,int(size(@0,2))] #18362=(1,2304,192)f32 pnnx.Expression pnnx_expr_1483 1 1 14094 18484 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_1476 0 1 18486 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2076 2 1 18477 18478 windows.199 $input=18477 $shape=18478 #18477=(36,64,192)f32 #windows.199=(36,8,8,192)f32 Tensor.view Tensor.view_2077 2 1 windows.199 18484 x2.127 $input=windows.199 $shape=18484 #windows.199=(36,8,8,192)f32 #x2.127=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_1473 1 1 14094 18489 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2923 2 1 x2.127 18486 18487 $input=x2.127 $dims=18486 #x2.127=(1,6,6,8,8,192)f32 #18487=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_298 1 1 18487 18488 memory_format=torch.contiguous_format $input=18487 #18487=(1,6,8,6,8,192)f32 #18488=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_1470 2 1 18362 14094 18493 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #18362=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_2078 2 1 18488 18489 x3.127 $input=18488 $shape=18489 #18488=(1,6,8,6,8,192)f32 #x3.127=(1,48,48,192)f32 Tensor.view Tensor.view_2079 2 1 x3.127 18493 x4.127 $input=x3.127 $shape=18493 #x3.127=(1,48,48,192)f32 #x4.127=(1,2304,192)f32 pnnx.Expression pnnx_expr_1468 2 1 18362 x4.127 input.447 expr=add(@0,@1) #18362=(1,2304,192)f32 #x4.127=(1,2304,192)f32 #input.447=(1,2304,192)f32 nn.LayerNorm layers_mmsa.4.residual_group.blocks.2.norm2 1 1 input.447 18497 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.447=(1,2304,192)f32 #18497=(1,2304,192)f32 nn.Linear layers_mmsa.4.residual_group.blocks.2.mlp.fc1 1 1 18497 18502 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #18497=(1,2304,192)f32 #18502=(1,2304,384)f32 nn.GELU layers_mmsa.4.residual_group.blocks.2.mlp.act 1 1 18502 18503 #18502=(1,2304,384)f32 #18503=(1,2304,384)f32 nn.Dropout layers_mmsa.4.residual_group.blocks.2.mlp.drop 1 1 18503 18504 #18503=(1,2304,384)f32 #18504=(1,2304,384)f32 nn.Linear layers_mmsa.4.residual_group.blocks.2.mlp.fc2 1 1 18504 18505 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #18504=(1,2304,384)f32 #18505=(1,2304,192)f32 nn.Dropout pnnx_unique_658 1 1 18505 18506 #18505=(1,2304,192)f32 #18506=(1,2304,192)f32 pnnx.Expression pnnx_expr_1466 2 1 input.447 18506 18508 expr=add(@0,@1) #input.447=(1,2304,192)f32 #18506=(1,2304,192)f32 #18508=(1,2304,192)f32 pnnx.Attribute layers_mmsa.4.residual_group.blocks.3 0 1 attn_mask.101 @attn_mask=(36,64,64)f32 #attn_mask.101=(36,64,64)f32 nn.LayerNorm layers_mmsa.4.residual_group.blocks.3.norm1 1 1 18508 18537 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #18508=(1,2304,192)f32 #18537=(1,2304,192)f32 pnnx.Expression pnnx_expr_1443 2 1 18508 14094 18538 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #18508=(1,2304,192)f32 #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_1441 0 1 18540 expr=[-4,-4] pnnx.Expression pnnx_expr_1439 0 1 18541 expr=[1,2] Tensor.view Tensor.view_2080 2 1 18537 18538 x.201 $input=18537 $shape=18538 #18537=(1,2304,192)f32 #x.201=(1,48,48,192)f32 torch.roll torch.roll_2518 3 1 x.201 18540 18541 x0.129 $input=x.201 $shifts=18540 $dims=18541 #x.201=(1,48,48,192)f32 #x0.129=(1,48,48,192)f32 pnnx.Expression pnnx_expr_1417 1 1 x0.129 18558 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.129=(1,48,48,192)f32 pnnx.Expression pnnx_expr_1412 0 1 18560 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2081 2 1 x0.129 18558 x1.129 $input=x0.129 $shape=18558 #x0.129=(1,48,48,192)f32 #x1.129=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_1409 1 1 x0.129 18563 expr=[-1,8,8,int(size(@0,3))] #x0.129=(1,48,48,192)f32 torch.permute torch.permute_2924 2 1 x1.129 18560 18561 $input=x1.129 $dims=18560 #x1.129=(1,6,8,6,8,192)f32 #18561=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_299 1 1 18561 18562 memory_format=torch.contiguous_format $input=18561 #18561=(1,6,6,8,8,192)f32 #18562=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_1407 1 1 18508 18565 expr=[-1,64,int(size(@0,2))] #18508=(1,2304,192)f32 pnnx.Expression pnnx_expr_1397 0 1 18576 expr=-2 pnnx.Expression pnnx_expr_1396 0 1 18577 expr=-1 pnnx.Attribute layers_mmsa.4.residual_group.blocks.3.attn 0 1 relative_position_bias_table.201 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.201=(225,6)f32 pnnx.Attribute pnnx_unique_659 0 1 relative_position_index.201 @relative_position_index=(64,64)i64 #relative_position_index.201=(64,64)i64 Tensor.view Tensor.view_2082 2 1 18562 18563 x_windows.201 $input=18562 $shape=18563 #18562=(1,6,6,8,8,192)f32 #x_windows.201=(36,8,8,192)f32 Tensor.view Tensor.view_2083 2 1 x_windows.201 18565 x2.129 $input=x_windows.201 $shape=18565 #x_windows.201=(36,8,8,192)f32 #x2.129=(36,64,192)f32 nn.Linear layers_mmsa.4.residual_group.blocks.3.attn.qkv 1 1 x2.129 18601 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.129=(36,64,192)f32 #18601=(36,64,576)f32 pnnx.Expression pnnx_expr_1377 1 1 x2.129 18604 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.129=(36,64,192)f32 pnnx.Expression pnnx_expr_1372 0 1 18606 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_632 2 1 18601 18604 18605 $input=18601 $shape=18604 #18601=(36,64,576)f32 #18605=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_1371 0 1 26353 expr=0 pnnx.Expression pnnx_expr_1370 0 1 26354 expr=0 pnnx.Expression pnnx_expr_1369 0 1 26355 expr=0 pnnx.Expression pnnx_expr_1368 0 1 26356 expr=1 pnnx.Expression pnnx_expr_1367 0 1 26357 expr=0 pnnx.Expression pnnx_expr_1366 0 1 26358 expr=2 torch.permute torch.permute_2925 2 1 18605 18606 qkv0.129 $input=18605 $dims=18606 #18605=(36,64,3,6,32)f32 #qkv0.129=(3,36,6,64,32)f32 Tensor.select Tensor.select_947 3 1 qkv0.129 26353 26354 q.201 $input=qkv0.129 $dim=26353 $index=26354 #qkv0.129=(3,36,6,64,32)f32 #q.201=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1365 1 1 q.201 q0.129 expr=mul(@0,1.767767e-01) #q.201=(36,6,64,32)f32 #q0.129=(36,6,64,32)f32 Tensor.select Tensor.select_948 3 1 qkv0.129 26355 26356 k.201 $input=qkv0.129 $dim=26355 $index=26356 #qkv0.129=(3,36,6,64,32)f32 #k.201=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1363 0 1 18614 expr=[-1] Tensor.view Tensor.view_2084 2 1 relative_position_index.201 18614 18615 $input=relative_position_index.201 $shape=18614 #relative_position_index.201=(64,64)i64 #18615=(4096)i64 pnnx.Expression pnnx_expr_1362 1 1 18615 18616 expr=[@0] #18615=(4096)i64 pnnx.Expression pnnx_expr_1359 0 1 18618 expr=[64,64,-1] Tensor.index Tensor.index_425 2 1 relative_position_bias_table.201 18616 18617 $input=relative_position_bias_table.201 $expr=18616 #relative_position_bias_table.201=(225,6)f32 #18617=(4096,6)f32 pnnx.Expression pnnx_expr_1355 0 1 18620 expr=[2,0,1] Tensor.view Tensor.view_2085 2 1 18617 18618 relative_position_bias.201 $input=18617 $shape=18618 #18617=(4096,6)f32 #relative_position_bias.201=(64,64,6)f32 pnnx.Expression pnnx_expr_1354 0 1 26366 expr=0 torch.permute torch.permute_2926 2 1 relative_position_bias.201 18620 18621 $input=relative_position_bias.201 $dims=18620 #relative_position_bias.201=(64,64,6)f32 #18621=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_300 1 1 18621 relative_position_bias0.129 memory_format=torch.contiguous_format $input=18621 #18621=(6,64,64)f32 #relative_position_bias0.129=(6,64,64)f32 torch.transpose torch.transpose_3195 3 1 k.201 18576 18577 18612 $input=k.201 $dim0=18576 $dim1=18577 #k.201=(36,6,64,32)f32 #18612=(36,6,32,64)f32 torch.matmul torch.matmul_2402 2 1 q0.129 18612 attn.403 $input=q0.129 $other=18612 #q0.129=(36,6,64,32)f32 #18612=(36,6,32,64)f32 #attn.403=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3415 2 1 relative_position_bias0.129 26366 18623 $input=relative_position_bias0.129 $dim=26366 #relative_position_bias0.129=(6,64,64)f32 #18623=(1,6,64,64)f32 pnnx.Expression pnnx_expr_1352 2 1 attn.403 18623 attn0.65 expr=add(@0,@1) #attn.403=(36,6,64,64)f32 #18623=(1,6,64,64)f32 #attn0.65=(36,6,64,64)f32 pnnx.Expression pnnx_expr_1343 2 1 x2.129 attn_mask.101 18630 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.129=(36,64,192)f32 #attn_mask.101=(36,64,64)f32 pnnx.Expression pnnx_expr_1342 0 1 26371 expr=1 pnnx.Expression pnnx_expr_1341 0 1 26372 expr=0 Tensor.view Tensor.view_2086 2 1 attn0.65 18630 18631 $input=attn0.65 $shape=18630 #attn0.65=(36,6,64,64)f32 #18631=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3416 2 1 attn_mask.101 26371 18632 $input=attn_mask.101 $dim=26371 #attn_mask.101=(36,64,64)f32 #18632=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3417 2 1 18632 26372 18633 $input=18632 $dim=26372 #18632=(36,1,64,64)f32 #18633=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_1339 2 1 18631 18633 attn1.65 expr=add(@0,@1) #18631=(1,36,6,64,64)f32 #18633=(1,36,1,64,64)f32 #attn1.65=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_1336 1 1 x2.129 18635 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.129=(36,64,192)f32 Tensor.view Tensor.view_2087 2 1 attn1.65 18635 input.449 $input=attn1.65 $shape=18635 #attn1.65=(1,36,6,64,64)f32 #input.449=(36,6,64,64)f32 nn.Softmax layers_mmsa.4.residual_group.blocks.3.attn.softmax 1 1 input.449 18637 dim=-1 #input.449=(36,6,64,64)f32 #18637=(36,6,64,64)f32 nn.Dropout layers_mmsa.4.residual_group.blocks.3.attn.attn_drop 1 1 18637 18638 #18637=(36,6,64,64)f32 #18638=(36,6,64,64)f32 Tensor.select Tensor.select_949 3 1 qkv0.129 26357 26358 v.201 $input=qkv0.129 $dim=26357 $index=26358 #qkv0.129=(3,36,6,64,32)f32 #v.201=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1335 0 1 26376 expr=1 pnnx.Expression pnnx_expr_1334 0 1 26377 expr=2 torch.matmul torch.matmul_2403 2 1 18638 v.201 18639 $input=18638 $other=v.201 #18638=(36,6,64,64)f32 #v.201=(36,6,64,32)f32 #18639=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1333 1 1 x2.129 18641 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.129=(36,64,192)f32 torch.transpose torch.transpose_3196 3 1 18639 26376 26377 18640 $input=18639 $dim0=26376 $dim1=26377 #18639=(36,6,64,32)f32 #18640=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_633 2 1 18640 18641 input0.133 $input=18640 $shape=18641 #18640=(36,64,6,32)f32 #input0.133=(36,64,192)f32 nn.Linear layers_mmsa.4.residual_group.blocks.3.attn.proj 1 1 input0.133 18643 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.133=(36,64,192)f32 #18643=(36,64,192)f32 nn.Dropout layers_mmsa.4.residual_group.blocks.3.attn.proj_drop 1 1 18643 18644 #18643=(36,64,192)f32 #18644=(36,64,192)f32 pnnx.Expression pnnx_expr_1329 1 1 18508 18645 expr=[-1,8,8,int(size(@0,2))] #18508=(1,2304,192)f32 pnnx.Expression pnnx_expr_1316 1 1 14094 18651 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_1309 0 1 18653 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2088 2 1 18644 18645 windows.201 $input=18644 $shape=18645 #18644=(36,64,192)f32 #windows.201=(36,8,8,192)f32 Tensor.view Tensor.view_2089 2 1 windows.201 18651 x3.129 $input=windows.201 $shape=18651 #windows.201=(36,8,8,192)f32 #x3.129=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_1306 1 1 14094 18656 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2927 2 1 x3.129 18653 18654 $input=x3.129 $dims=18653 #x3.129=(1,6,6,8,8,192)f32 #18654=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_301 1 1 18654 18655 memory_format=torch.contiguous_format $input=18654 #18654=(1,6,8,6,8,192)f32 #18655=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_1303 0 1 18658 expr=[4,4] pnnx.Expression pnnx_expr_1300 0 1 18659 expr=[1,2] Tensor.view Tensor.view_2090 2 1 18655 18656 shifted_x.101 $input=18655 $shape=18656 #18655=(1,6,8,6,8,192)f32 #shifted_x.101=(1,48,48,192)f32 pnnx.Expression pnnx_expr_1297 2 1 18508 14094 18663 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #18508=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.roll torch.roll_2519 3 1 shifted_x.101 18658 18659 x4.129 $input=shifted_x.101 $shifts=18658 $dims=18659 #shifted_x.101=(1,48,48,192)f32 #x4.129=(1,48,48,192)f32 Tensor.view Tensor.view_2091 2 1 x4.129 18663 x5.101 $input=x4.129 $shape=18663 #x4.129=(1,48,48,192)f32 #x5.101=(1,2304,192)f32 pnnx.Expression pnnx_expr_1295 2 1 18508 x5.101 input.451 expr=add(@0,@1) #18508=(1,2304,192)f32 #x5.101=(1,2304,192)f32 #input.451=(1,2304,192)f32 nn.LayerNorm layers_mmsa.4.residual_group.blocks.3.norm2 1 1 input.451 18667 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.451=(1,2304,192)f32 #18667=(1,2304,192)f32 nn.Linear layers_mmsa.4.residual_group.blocks.3.mlp.fc1 1 1 18667 18672 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #18667=(1,2304,192)f32 #18672=(1,2304,384)f32 nn.GELU layers_mmsa.4.residual_group.blocks.3.mlp.act 1 1 18672 18673 #18672=(1,2304,384)f32 #18673=(1,2304,384)f32 nn.Dropout layers_mmsa.4.residual_group.blocks.3.mlp.drop 1 1 18673 18674 #18673=(1,2304,384)f32 #18674=(1,2304,384)f32 nn.Linear layers_mmsa.4.residual_group.blocks.3.mlp.fc2 1 1 18674 18675 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #18674=(1,2304,384)f32 #18675=(1,2304,192)f32 nn.Dropout pnnx_unique_660 1 1 18675 18676 #18675=(1,2304,192)f32 #18676=(1,2304,192)f32 pnnx.Expression pnnx_expr_1293 2 1 input.451 18676 18678 expr=add(@0,@1) #input.451=(1,2304,192)f32 #18676=(1,2304,192)f32 #18678=(1,2304,192)f32 nn.LayerNorm layers_mmsa.4.residual_group.blocks.4.norm1 1 1 18678 18705 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #18678=(1,2304,192)f32 #18705=(1,2304,192)f32 pnnx.Expression pnnx_expr_1271 2 1 18678 14094 18706 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #18678=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_2092 2 1 18705 18706 x.203 $input=18705 $shape=18706 #18705=(1,2304,192)f32 #x.203=(1,48,48,192)f32 pnnx.Expression pnnx_expr_1250 1 1 x.203 18723 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.203=(1,48,48,192)f32 pnnx.Expression pnnx_expr_1245 0 1 18725 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2093 2 1 x.203 18723 x0.131 $input=x.203 $shape=18723 #x.203=(1,48,48,192)f32 #x0.131=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_1242 1 1 x.203 18728 expr=[-1,8,8,int(size(@0,3))] #x.203=(1,48,48,192)f32 torch.permute torch.permute_2928 2 1 x0.131 18725 18726 $input=x0.131 $dims=18725 #x0.131=(1,6,8,6,8,192)f32 #18726=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_302 1 1 18726 18727 memory_format=torch.contiguous_format $input=18726 #18726=(1,6,6,8,8,192)f32 #18727=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_1240 1 1 18678 18730 expr=[-1,64,int(size(@0,2))] #18678=(1,2304,192)f32 pnnx.Expression pnnx_expr_1230 0 1 18741 expr=-2 pnnx.Expression pnnx_expr_1229 0 1 18742 expr=-1 pnnx.Attribute layers_mmsa.4.residual_group.blocks.4.attn 0 1 relative_position_bias_table.203 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.203=(225,6)f32 pnnx.Attribute pnnx_unique_661 0 1 relative_position_index.203 @relative_position_index=(64,64)i64 #relative_position_index.203=(64,64)i64 Tensor.view Tensor.view_2094 2 1 18727 18728 x_windows.203 $input=18727 $shape=18728 #18727=(1,6,6,8,8,192)f32 #x_windows.203=(36,8,8,192)f32 Tensor.view Tensor.view_2095 2 1 x_windows.203 18730 x1.131 $input=x_windows.203 $shape=18730 #x_windows.203=(36,8,8,192)f32 #x1.131=(36,64,192)f32 nn.Linear layers_mmsa.4.residual_group.blocks.4.attn.qkv 1 1 x1.131 18762 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.131=(36,64,192)f32 #18762=(36,64,576)f32 pnnx.Expression pnnx_expr_1214 1 1 x1.131 18765 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.131=(36,64,192)f32 pnnx.Expression pnnx_expr_1209 0 1 18767 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_634 2 1 18762 18765 18766 $input=18762 $shape=18765 #18762=(36,64,576)f32 #18766=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_1208 0 1 26421 expr=0 pnnx.Expression pnnx_expr_1207 0 1 26422 expr=0 pnnx.Expression pnnx_expr_1206 0 1 26423 expr=0 pnnx.Expression pnnx_expr_1205 0 1 26424 expr=1 pnnx.Expression pnnx_expr_1204 0 1 26425 expr=0 pnnx.Expression pnnx_expr_1203 0 1 26426 expr=2 torch.permute torch.permute_2929 2 1 18766 18767 qkv0.131 $input=18766 $dims=18767 #18766=(36,64,3,6,32)f32 #qkv0.131=(3,36,6,64,32)f32 Tensor.select Tensor.select_950 3 1 qkv0.131 26421 26422 q.203 $input=qkv0.131 $dim=26421 $index=26422 #qkv0.131=(3,36,6,64,32)f32 #q.203=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1202 1 1 q.203 q0.131 expr=mul(@0,1.767767e-01) #q.203=(36,6,64,32)f32 #q0.131=(36,6,64,32)f32 Tensor.select Tensor.select_951 3 1 qkv0.131 26423 26424 k.203 $input=qkv0.131 $dim=26423 $index=26424 #qkv0.131=(3,36,6,64,32)f32 #k.203=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1200 0 1 18775 expr=[-1] Tensor.view Tensor.view_2096 2 1 relative_position_index.203 18775 18776 $input=relative_position_index.203 $shape=18775 #relative_position_index.203=(64,64)i64 #18776=(4096)i64 pnnx.Expression pnnx_expr_1199 1 1 18776 18777 expr=[@0] #18776=(4096)i64 pnnx.Expression pnnx_expr_1196 0 1 18779 expr=[64,64,-1] Tensor.index Tensor.index_426 2 1 relative_position_bias_table.203 18777 18778 $input=relative_position_bias_table.203 $expr=18777 #relative_position_bias_table.203=(225,6)f32 #18778=(4096,6)f32 pnnx.Expression pnnx_expr_1192 0 1 18781 expr=[2,0,1] Tensor.view Tensor.view_2097 2 1 18778 18779 relative_position_bias.203 $input=18778 $shape=18779 #18778=(4096,6)f32 #relative_position_bias.203=(64,64,6)f32 pnnx.Expression pnnx_expr_1191 0 1 26434 expr=0 torch.permute torch.permute_2930 2 1 relative_position_bias.203 18781 18782 $input=relative_position_bias.203 $dims=18781 #relative_position_bias.203=(64,64,6)f32 #18782=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_303 1 1 18782 relative_position_bias0.131 memory_format=torch.contiguous_format $input=18782 #18782=(6,64,64)f32 #relative_position_bias0.131=(6,64,64)f32 torch.transpose torch.transpose_3197 3 1 k.203 18741 18742 18773 $input=k.203 $dim0=18741 $dim1=18742 #k.203=(36,6,64,32)f32 #18773=(36,6,32,64)f32 torch.matmul torch.matmul_2404 2 1 q0.131 18773 attn.407 $input=q0.131 $other=18773 #q0.131=(36,6,64,32)f32 #18773=(36,6,32,64)f32 #attn.407=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3418 2 1 relative_position_bias0.131 26434 18784 $input=relative_position_bias0.131 $dim=26434 #relative_position_bias0.131=(6,64,64)f32 #18784=(1,6,64,64)f32 pnnx.Expression pnnx_expr_1189 2 1 attn.407 18784 input.453 expr=add(@0,@1) #attn.407=(36,6,64,64)f32 #18784=(1,6,64,64)f32 #input.453=(36,6,64,64)f32 nn.Softmax layers_mmsa.4.residual_group.blocks.4.attn.softmax 1 1 input.453 18786 dim=-1 #input.453=(36,6,64,64)f32 #18786=(36,6,64,64)f32 nn.Dropout layers_mmsa.4.residual_group.blocks.4.attn.attn_drop 1 1 18786 18787 #18786=(36,6,64,64)f32 #18787=(36,6,64,64)f32 Tensor.select Tensor.select_952 3 1 qkv0.131 26425 26426 v.203 $input=qkv0.131 $dim=26425 $index=26426 #qkv0.131=(3,36,6,64,32)f32 #v.203=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1188 0 1 26436 expr=1 pnnx.Expression pnnx_expr_1187 0 1 26437 expr=2 torch.matmul torch.matmul_2405 2 1 18787 v.203 18788 $input=18787 $other=v.203 #18787=(36,6,64,64)f32 #v.203=(36,6,64,32)f32 #18788=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1186 1 1 x1.131 18790 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.131=(36,64,192)f32 torch.transpose torch.transpose_3198 3 1 18788 26436 26437 18789 $input=18788 $dim0=26436 $dim1=26437 #18788=(36,6,64,32)f32 #18789=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_635 2 1 18789 18790 input0.135 $input=18789 $shape=18790 #18789=(36,64,6,32)f32 #input0.135=(36,64,192)f32 nn.Linear layers_mmsa.4.residual_group.blocks.4.attn.proj 1 1 input0.135 18792 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.135=(36,64,192)f32 #18792=(36,64,192)f32 nn.Dropout layers_mmsa.4.residual_group.blocks.4.attn.proj_drop 1 1 18792 18793 #18792=(36,64,192)f32 #18793=(36,64,192)f32 pnnx.Expression pnnx_expr_1182 1 1 18678 18794 expr=[-1,8,8,int(size(@0,2))] #18678=(1,2304,192)f32 pnnx.Expression pnnx_expr_1169 1 1 14094 18800 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_1162 0 1 18802 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2098 2 1 18793 18794 windows.203 $input=18793 $shape=18794 #18793=(36,64,192)f32 #windows.203=(36,8,8,192)f32 Tensor.view Tensor.view_2099 2 1 windows.203 18800 x2.131 $input=windows.203 $shape=18800 #windows.203=(36,8,8,192)f32 #x2.131=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_1159 1 1 14094 18805 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2931 2 1 x2.131 18802 18803 $input=x2.131 $dims=18802 #x2.131=(1,6,6,8,8,192)f32 #18803=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_304 1 1 18803 18804 memory_format=torch.contiguous_format $input=18803 #18803=(1,6,8,6,8,192)f32 #18804=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_1156 2 1 18678 14094 18809 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #18678=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_2100 2 1 18804 18805 x3.131 $input=18804 $shape=18805 #18804=(1,6,8,6,8,192)f32 #x3.131=(1,48,48,192)f32 Tensor.view Tensor.view_2101 2 1 x3.131 18809 x4.131 $input=x3.131 $shape=18809 #x3.131=(1,48,48,192)f32 #x4.131=(1,2304,192)f32 pnnx.Expression pnnx_expr_1154 2 1 18678 x4.131 input.455 expr=add(@0,@1) #18678=(1,2304,192)f32 #x4.131=(1,2304,192)f32 #input.455=(1,2304,192)f32 nn.LayerNorm layers_mmsa.4.residual_group.blocks.4.norm2 1 1 input.455 18813 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.455=(1,2304,192)f32 #18813=(1,2304,192)f32 nn.Linear layers_mmsa.4.residual_group.blocks.4.mlp.fc1 1 1 18813 18818 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #18813=(1,2304,192)f32 #18818=(1,2304,384)f32 nn.GELU layers_mmsa.4.residual_group.blocks.4.mlp.act 1 1 18818 18819 #18818=(1,2304,384)f32 #18819=(1,2304,384)f32 nn.Dropout layers_mmsa.4.residual_group.blocks.4.mlp.drop 1 1 18819 18820 #18819=(1,2304,384)f32 #18820=(1,2304,384)f32 nn.Linear layers_mmsa.4.residual_group.blocks.4.mlp.fc2 1 1 18820 18821 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #18820=(1,2304,384)f32 #18821=(1,2304,192)f32 nn.Dropout pnnx_unique_662 1 1 18821 18822 #18821=(1,2304,192)f32 #18822=(1,2304,192)f32 pnnx.Expression pnnx_expr_1152 2 1 input.455 18822 18824 expr=add(@0,@1) #input.455=(1,2304,192)f32 #18822=(1,2304,192)f32 #18824=(1,2304,192)f32 pnnx.Attribute layers_mmsa.4.residual_group.blocks.5 0 1 attn_mask.103 @attn_mask=(36,64,64)f32 #attn_mask.103=(36,64,64)f32 nn.LayerNorm layers_mmsa.4.residual_group.blocks.5.norm1 1 1 18824 18853 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #18824=(1,2304,192)f32 #18853=(1,2304,192)f32 pnnx.Expression pnnx_expr_1129 2 1 18824 14094 18854 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #18824=(1,2304,192)f32 #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_1127 0 1 18856 expr=[-4,-4] pnnx.Expression pnnx_expr_1125 0 1 18857 expr=[1,2] Tensor.view Tensor.view_2102 2 1 18853 18854 x.205 $input=18853 $shape=18854 #18853=(1,2304,192)f32 #x.205=(1,48,48,192)f32 torch.roll torch.roll_2520 3 1 x.205 18856 18857 x0.133 $input=x.205 $shifts=18856 $dims=18857 #x.205=(1,48,48,192)f32 #x0.133=(1,48,48,192)f32 pnnx.Expression pnnx_expr_1103 1 1 x0.133 18874 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.133=(1,48,48,192)f32 pnnx.Expression pnnx_expr_1098 0 1 18876 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2103 2 1 x0.133 18874 x1.133 $input=x0.133 $shape=18874 #x0.133=(1,48,48,192)f32 #x1.133=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_1095 1 1 x0.133 18879 expr=[-1,8,8,int(size(@0,3))] #x0.133=(1,48,48,192)f32 torch.permute torch.permute_2932 2 1 x1.133 18876 18877 $input=x1.133 $dims=18876 #x1.133=(1,6,8,6,8,192)f32 #18877=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_305 1 1 18877 18878 memory_format=torch.contiguous_format $input=18877 #18877=(1,6,6,8,8,192)f32 #18878=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_1093 1 1 18824 18881 expr=[-1,64,int(size(@0,2))] #18824=(1,2304,192)f32 pnnx.Expression pnnx_expr_1083 0 1 18892 expr=-2 pnnx.Expression pnnx_expr_1082 0 1 18893 expr=-1 pnnx.Attribute layers_mmsa.4.residual_group.blocks.5.attn 0 1 relative_position_bias_table.205 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.205=(225,6)f32 pnnx.Attribute pnnx_unique_663 0 1 relative_position_index.205 @relative_position_index=(64,64)i64 #relative_position_index.205=(64,64)i64 Tensor.view Tensor.view_2104 2 1 18878 18879 x_windows.205 $input=18878 $shape=18879 #18878=(1,6,6,8,8,192)f32 #x_windows.205=(36,8,8,192)f32 Tensor.view Tensor.view_2105 2 1 x_windows.205 18881 x2.133 $input=x_windows.205 $shape=18881 #x_windows.205=(36,8,8,192)f32 #x2.133=(36,64,192)f32 nn.Linear layers_mmsa.4.residual_group.blocks.5.attn.qkv 1 1 x2.133 18917 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.133=(36,64,192)f32 #18917=(36,64,576)f32 pnnx.Expression pnnx_expr_1063 1 1 x2.133 18920 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.133=(36,64,192)f32 pnnx.Expression pnnx_expr_1058 0 1 18922 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_636 2 1 18917 18920 18921 $input=18917 $shape=18920 #18917=(36,64,576)f32 #18921=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_1057 0 1 26480 expr=0 pnnx.Expression pnnx_expr_1056 0 1 26481 expr=0 pnnx.Expression pnnx_expr_1055 0 1 26482 expr=0 pnnx.Expression pnnx_expr_1054 0 1 26483 expr=1 pnnx.Expression pnnx_expr_1053 0 1 26484 expr=0 pnnx.Expression pnnx_expr_1052 0 1 26485 expr=2 torch.permute torch.permute_2933 2 1 18921 18922 qkv0.133 $input=18921 $dims=18922 #18921=(36,64,3,6,32)f32 #qkv0.133=(3,36,6,64,32)f32 Tensor.select Tensor.select_953 3 1 qkv0.133 26480 26481 q.205 $input=qkv0.133 $dim=26480 $index=26481 #qkv0.133=(3,36,6,64,32)f32 #q.205=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1051 1 1 q.205 q0.133 expr=mul(@0,1.767767e-01) #q.205=(36,6,64,32)f32 #q0.133=(36,6,64,32)f32 Tensor.select Tensor.select_954 3 1 qkv0.133 26482 26483 k.205 $input=qkv0.133 $dim=26482 $index=26483 #qkv0.133=(3,36,6,64,32)f32 #k.205=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1049 0 1 18930 expr=[-1] Tensor.view Tensor.view_2106 2 1 relative_position_index.205 18930 18931 $input=relative_position_index.205 $shape=18930 #relative_position_index.205=(64,64)i64 #18931=(4096)i64 pnnx.Expression pnnx_expr_1048 1 1 18931 18932 expr=[@0] #18931=(4096)i64 pnnx.Expression pnnx_expr_1045 0 1 18934 expr=[64,64,-1] Tensor.index Tensor.index_427 2 1 relative_position_bias_table.205 18932 18933 $input=relative_position_bias_table.205 $expr=18932 #relative_position_bias_table.205=(225,6)f32 #18933=(4096,6)f32 pnnx.Expression pnnx_expr_1041 0 1 18936 expr=[2,0,1] Tensor.view Tensor.view_2107 2 1 18933 18934 relative_position_bias.205 $input=18933 $shape=18934 #18933=(4096,6)f32 #relative_position_bias.205=(64,64,6)f32 pnnx.Expression pnnx_expr_1040 0 1 26493 expr=0 torch.permute torch.permute_2934 2 1 relative_position_bias.205 18936 18937 $input=relative_position_bias.205 $dims=18936 #relative_position_bias.205=(64,64,6)f32 #18937=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_306 1 1 18937 relative_position_bias0.133 memory_format=torch.contiguous_format $input=18937 #18937=(6,64,64)f32 #relative_position_bias0.133=(6,64,64)f32 torch.transpose torch.transpose_3199 3 1 k.205 18892 18893 18928 $input=k.205 $dim0=18892 $dim1=18893 #k.205=(36,6,64,32)f32 #18928=(36,6,32,64)f32 torch.matmul torch.matmul_2406 2 1 q0.133 18928 attn.411 $input=q0.133 $other=18928 #q0.133=(36,6,64,32)f32 #18928=(36,6,32,64)f32 #attn.411=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3419 2 1 relative_position_bias0.133 26493 18939 $input=relative_position_bias0.133 $dim=26493 #relative_position_bias0.133=(6,64,64)f32 #18939=(1,6,64,64)f32 pnnx.Expression pnnx_expr_1038 2 1 attn.411 18939 attn0.67 expr=add(@0,@1) #attn.411=(36,6,64,64)f32 #18939=(1,6,64,64)f32 #attn0.67=(36,6,64,64)f32 pnnx.Expression pnnx_expr_1029 2 1 x2.133 attn_mask.103 18946 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.133=(36,64,192)f32 #attn_mask.103=(36,64,64)f32 pnnx.Expression pnnx_expr_1028 0 1 26498 expr=1 pnnx.Expression pnnx_expr_1027 0 1 26499 expr=0 Tensor.view Tensor.view_2108 2 1 attn0.67 18946 18947 $input=attn0.67 $shape=18946 #attn0.67=(36,6,64,64)f32 #18947=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3420 2 1 attn_mask.103 26498 18948 $input=attn_mask.103 $dim=26498 #attn_mask.103=(36,64,64)f32 #18948=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3421 2 1 18948 26499 18949 $input=18948 $dim=26499 #18948=(36,1,64,64)f32 #18949=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_1025 2 1 18947 18949 attn1.67 expr=add(@0,@1) #18947=(1,36,6,64,64)f32 #18949=(1,36,1,64,64)f32 #attn1.67=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_1022 1 1 x2.133 18951 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.133=(36,64,192)f32 Tensor.view Tensor.view_2109 2 1 attn1.67 18951 input.457 $input=attn1.67 $shape=18951 #attn1.67=(1,36,6,64,64)f32 #input.457=(36,6,64,64)f32 nn.Softmax layers_mmsa.4.residual_group.blocks.5.attn.softmax 1 1 input.457 18953 dim=-1 #input.457=(36,6,64,64)f32 #18953=(36,6,64,64)f32 nn.Dropout layers_mmsa.4.residual_group.blocks.5.attn.attn_drop 1 1 18953 18954 #18953=(36,6,64,64)f32 #18954=(36,6,64,64)f32 Tensor.select Tensor.select_955 3 1 qkv0.133 26484 26485 v.205 $input=qkv0.133 $dim=26484 $index=26485 #qkv0.133=(3,36,6,64,32)f32 #v.205=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1021 0 1 26503 expr=1 pnnx.Expression pnnx_expr_1020 0 1 26504 expr=2 torch.matmul torch.matmul_2407 2 1 18954 v.205 18955 $input=18954 $other=v.205 #18954=(36,6,64,64)f32 #v.205=(36,6,64,32)f32 #18955=(36,6,64,32)f32 pnnx.Expression pnnx_expr_1019 1 1 x2.133 18957 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.133=(36,64,192)f32 torch.transpose torch.transpose_3200 3 1 18955 26503 26504 18956 $input=18955 $dim0=26503 $dim1=26504 #18955=(36,6,64,32)f32 #18956=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_637 2 1 18956 18957 input0.137 $input=18956 $shape=18957 #18956=(36,64,6,32)f32 #input0.137=(36,64,192)f32 nn.Linear layers_mmsa.4.residual_group.blocks.5.attn.proj 1 1 input0.137 18959 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.137=(36,64,192)f32 #18959=(36,64,192)f32 nn.Dropout layers_mmsa.4.residual_group.blocks.5.attn.proj_drop 1 1 18959 18960 #18959=(36,64,192)f32 #18960=(36,64,192)f32 pnnx.Expression pnnx_expr_1015 1 1 18824 18961 expr=[-1,8,8,int(size(@0,2))] #18824=(1,2304,192)f32 pnnx.Expression pnnx_expr_1002 1 1 14094 18967 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_995 0 1 18969 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2110 2 1 18960 18961 windows.205 $input=18960 $shape=18961 #18960=(36,64,192)f32 #windows.205=(36,8,8,192)f32 Tensor.view Tensor.view_2111 2 1 windows.205 18967 x3.133 $input=windows.205 $shape=18967 #windows.205=(36,8,8,192)f32 #x3.133=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_992 1 1 14094 18972 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2935 2 1 x3.133 18969 18970 $input=x3.133 $dims=18969 #x3.133=(1,6,6,8,8,192)f32 #18970=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_307 1 1 18970 18971 memory_format=torch.contiguous_format $input=18970 #18970=(1,6,8,6,8,192)f32 #18971=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_989 0 1 18974 expr=[4,4] pnnx.Expression pnnx_expr_986 0 1 18975 expr=[1,2] Tensor.view Tensor.view_2112 2 1 18971 18972 shifted_x.103 $input=18971 $shape=18972 #18971=(1,6,8,6,8,192)f32 #shifted_x.103=(1,48,48,192)f32 pnnx.Expression pnnx_expr_983 2 1 18824 14094 18979 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #18824=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.roll torch.roll_2521 3 1 shifted_x.103 18974 18975 x4.133 $input=shifted_x.103 $shifts=18974 $dims=18975 #shifted_x.103=(1,48,48,192)f32 #x4.133=(1,48,48,192)f32 Tensor.view Tensor.view_2113 2 1 x4.133 18979 x5.103 $input=x4.133 $shape=18979 #x4.133=(1,48,48,192)f32 #x5.103=(1,2304,192)f32 pnnx.Expression pnnx_expr_981 2 1 18824 x5.103 input.459 expr=add(@0,@1) #18824=(1,2304,192)f32 #x5.103=(1,2304,192)f32 #input.459=(1,2304,192)f32 nn.LayerNorm layers_mmsa.4.residual_group.blocks.5.norm2 1 1 input.459 18983 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.459=(1,2304,192)f32 #18983=(1,2304,192)f32 nn.Linear layers_mmsa.4.residual_group.blocks.5.mlp.fc1 1 1 18983 18988 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #18983=(1,2304,192)f32 #18988=(1,2304,384)f32 nn.GELU layers_mmsa.4.residual_group.blocks.5.mlp.act 1 1 18988 18989 #18988=(1,2304,384)f32 #18989=(1,2304,384)f32 nn.Dropout layers_mmsa.4.residual_group.blocks.5.mlp.drop 1 1 18989 18990 #18989=(1,2304,384)f32 #18990=(1,2304,384)f32 nn.Linear layers_mmsa.4.residual_group.blocks.5.mlp.fc2 1 1 18990 18991 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #18990=(1,2304,384)f32 #18991=(1,2304,192)f32 nn.Dropout pnnx_unique_664 1 1 18991 18992 #18991=(1,2304,192)f32 #18992=(1,2304,192)f32 pnnx.Expression pnnx_expr_979 2 1 input.459 18992 18994 expr=add(@0,@1) #input.459=(1,2304,192)f32 #18992=(1,2304,192)f32 #18994=(1,2304,192)f32 pnnx.Expression pnnx_expr_977 0 1 18996 expr=1 pnnx.Expression pnnx_expr_976 0 1 18997 expr=2 pnnx.Expression pnnx_expr_971 2 1 18994 14094 19003 expr=[int(size(@0,0)),192,int(size(@1,2)),int(size(@1,3))] #18994=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.transpose torch.transpose_3201 3 1 18994 18996 18997 19002 $input=18994 $dim0=18996 $dim1=18997 #18994=(1,2304,192)f32 #19002=(1,192,2304)f32 Tensor.view Tensor.view_2114 2 1 19002 19003 input.461 $input=19002 $shape=19003 #19002=(1,192,2304)f32 #input.461=(1,192,48,48)f32 nn.Conv2d layers_mmsa.4.conv 1 1 input.461 19005 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(192)f32 @weight=(192,192,3,3)f32 #input.461=(1,192,48,48)f32 #19005=(1,192,48,48)f32 pnnx.Expression pnnx_expr_970 0 1 19006 expr=-1 pnnx.Expression pnnx_expr_969 0 1 19007 expr=2 pnnx.Expression pnnx_expr_968 0 1 19008 expr=1 pnnx.Expression pnnx_expr_967 0 1 26531 expr=2 torch.flatten torch.flatten_2202 3 1 19005 19007 19006 19009 $input=19005 $start_dim=19007 $end_dim=19006 #19005=(1,192,48,48)f32 #19009=(1,192,2304)f32 torch.transpose torch.transpose_3202 3 1 19009 19008 26531 19010 $input=19009 $dim0=19008 $dim1=26531 #19009=(1,192,2304)f32 #19010=(1,2304,192)f32 pnnx.Expression pnnx_expr_966 2 1 19010 18029 19011 expr=add(@0,@1) #19010=(1,2304,192)f32 #18029=(1,2304,192)f32 #19011=(1,2304,192)f32 nn.LayerNorm layers_mmsa.5.residual_group.blocks.0.norm1 1 1 19011 19055 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #19011=(1,2304,192)f32 #19055=(1,2304,192)f32 pnnx.Expression pnnx_expr_943 2 1 19011 14094 19056 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #19011=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_2115 2 1 19055 19056 x.2 $input=19055 $shape=19056 #19055=(1,2304,192)f32 #x.2=(1,48,48,192)f32 pnnx.Expression pnnx_expr_922 1 1 x.2 19073 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.2=(1,48,48,192)f32 pnnx.Expression pnnx_expr_917 0 1 19075 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2116 2 1 x.2 19073 x0.2 $input=x.2 $shape=19073 #x.2=(1,48,48,192)f32 #x0.2=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_914 1 1 x.2 19078 expr=[-1,8,8,int(size(@0,3))] #x.2=(1,48,48,192)f32 torch.permute torch.permute_2936 2 1 x0.2 19075 19076 $input=x0.2 $dims=19075 #x0.2=(1,6,8,6,8,192)f32 #19076=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_308 1 1 19076 19077 memory_format=torch.contiguous_format $input=19076 #19076=(1,6,6,8,8,192)f32 #19077=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_912 1 1 19011 19080 expr=[-1,64,int(size(@0,2))] #19011=(1,2304,192)f32 pnnx.Expression pnnx_expr_902 0 1 19091 expr=-2 pnnx.Expression pnnx_expr_901 0 1 19092 expr=-1 pnnx.Attribute layers_mmsa.5.residual_group.blocks.0.attn 0 1 relative_position_bias_table.2 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.2=(225,6)f32 pnnx.Attribute pnnx_unique_665 0 1 relative_position_index.2 @relative_position_index=(64,64)i64 #relative_position_index.2=(64,64)i64 Tensor.view Tensor.view_2117 2 1 19077 19078 x_windows.2 $input=19077 $shape=19078 #19077=(1,6,6,8,8,192)f32 #x_windows.2=(36,8,8,192)f32 Tensor.view Tensor.view_2118 2 1 x_windows.2 19080 x1.2 $input=x_windows.2 $shape=19080 #x_windows.2=(36,8,8,192)f32 #x1.2=(36,64,192)f32 nn.Linear layers_mmsa.5.residual_group.blocks.0.attn.qkv 1 1 x1.2 19112 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.2=(36,64,192)f32 #19112=(36,64,576)f32 pnnx.Expression pnnx_expr_886 1 1 x1.2 19115 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.2=(36,64,192)f32 pnnx.Expression pnnx_expr_881 0 1 19117 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_638 2 1 19112 19115 19116 $input=19112 $shape=19115 #19112=(36,64,576)f32 #19116=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_880 0 1 26549 expr=0 pnnx.Expression pnnx_expr_879 0 1 26550 expr=0 pnnx.Expression pnnx_expr_878 0 1 26551 expr=0 pnnx.Expression pnnx_expr_877 0 1 26552 expr=1 pnnx.Expression pnnx_expr_876 0 1 26553 expr=0 pnnx.Expression pnnx_expr_875 0 1 26554 expr=2 torch.permute torch.permute_2937 2 1 19116 19117 qkv0.2 $input=19116 $dims=19117 #19116=(36,64,3,6,32)f32 #qkv0.2=(3,36,6,64,32)f32 Tensor.select Tensor.select_956 3 1 qkv0.2 26549 26550 q.2 $input=qkv0.2 $dim=26549 $index=26550 #qkv0.2=(3,36,6,64,32)f32 #q.2=(36,6,64,32)f32 pnnx.Expression pnnx_expr_874 1 1 q.2 q0.2 expr=mul(@0,1.767767e-01) #q.2=(36,6,64,32)f32 #q0.2=(36,6,64,32)f32 Tensor.select Tensor.select_957 3 1 qkv0.2 26551 26552 k.2 $input=qkv0.2 $dim=26551 $index=26552 #qkv0.2=(3,36,6,64,32)f32 #k.2=(36,6,64,32)f32 pnnx.Expression pnnx_expr_872 0 1 19125 expr=[-1] Tensor.view Tensor.view_2119 2 1 relative_position_index.2 19125 19126 $input=relative_position_index.2 $shape=19125 #relative_position_index.2=(64,64)i64 #19126=(4096)i64 pnnx.Expression pnnx_expr_871 1 1 19126 19127 expr=[@0] #19126=(4096)i64 pnnx.Expression pnnx_expr_868 0 1 19129 expr=[64,64,-1] Tensor.index Tensor.index_428 2 1 relative_position_bias_table.2 19127 19128 $input=relative_position_bias_table.2 $expr=19127 #relative_position_bias_table.2=(225,6)f32 #19128=(4096,6)f32 pnnx.Expression pnnx_expr_864 0 1 19131 expr=[2,0,1] Tensor.view Tensor.view_2120 2 1 19128 19129 relative_position_bias.2 $input=19128 $shape=19129 #19128=(4096,6)f32 #relative_position_bias.2=(64,64,6)f32 pnnx.Expression pnnx_expr_863 0 1 26562 expr=0 torch.permute torch.permute_2938 2 1 relative_position_bias.2 19131 19132 $input=relative_position_bias.2 $dims=19131 #relative_position_bias.2=(64,64,6)f32 #19132=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_309 1 1 19132 relative_position_bias0.2 memory_format=torch.contiguous_format $input=19132 #19132=(6,64,64)f32 #relative_position_bias0.2=(6,64,64)f32 torch.transpose torch.transpose_3203 3 1 k.2 19091 19092 19123 $input=k.2 $dim0=19091 $dim1=19092 #k.2=(36,6,64,32)f32 #19123=(36,6,32,64)f32 torch.matmul torch.matmul_2408 2 1 q0.2 19123 attn.6 $input=q0.2 $other=19123 #q0.2=(36,6,64,32)f32 #19123=(36,6,32,64)f32 #attn.6=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3422 2 1 relative_position_bias0.2 26562 19134 $input=relative_position_bias0.2 $dim=26562 #relative_position_bias0.2=(6,64,64)f32 #19134=(1,6,64,64)f32 pnnx.Expression pnnx_expr_861 2 1 attn.6 19134 input.2 expr=add(@0,@1) #attn.6=(36,6,64,64)f32 #19134=(1,6,64,64)f32 #input.2=(36,6,64,64)f32 nn.Softmax layers_mmsa.5.residual_group.blocks.0.attn.softmax 1 1 input.2 19136 dim=-1 #input.2=(36,6,64,64)f32 #19136=(36,6,64,64)f32 nn.Dropout layers_mmsa.5.residual_group.blocks.0.attn.attn_drop 1 1 19136 19137 #19136=(36,6,64,64)f32 #19137=(36,6,64,64)f32 Tensor.select Tensor.select_958 3 1 qkv0.2 26553 26554 v.2 $input=qkv0.2 $dim=26553 $index=26554 #qkv0.2=(3,36,6,64,32)f32 #v.2=(36,6,64,32)f32 pnnx.Expression pnnx_expr_860 0 1 26564 expr=1 pnnx.Expression pnnx_expr_859 0 1 26565 expr=2 torch.matmul torch.matmul_2409 2 1 19137 v.2 19138 $input=19137 $other=v.2 #19137=(36,6,64,64)f32 #v.2=(36,6,64,32)f32 #19138=(36,6,64,32)f32 pnnx.Expression pnnx_expr_858 1 1 x1.2 19140 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.2=(36,64,192)f32 torch.transpose torch.transpose_3204 3 1 19138 26564 26565 19139 $input=19138 $dim0=26564 $dim1=26565 #19138=(36,6,64,32)f32 #19139=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_639 2 1 19139 19140 input0.2 $input=19139 $shape=19140 #19139=(36,64,6,32)f32 #input0.2=(36,64,192)f32 nn.Linear layers_mmsa.5.residual_group.blocks.0.attn.proj 1 1 input0.2 19142 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.2=(36,64,192)f32 #19142=(36,64,192)f32 nn.Dropout layers_mmsa.5.residual_group.blocks.0.attn.proj_drop 1 1 19142 19143 #19142=(36,64,192)f32 #19143=(36,64,192)f32 pnnx.Expression pnnx_expr_854 1 1 19011 19144 expr=[-1,8,8,int(size(@0,2))] #19011=(1,2304,192)f32 pnnx.Expression pnnx_expr_841 1 1 14094 19150 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_834 0 1 19152 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2121 2 1 19143 19144 windows.2 $input=19143 $shape=19144 #19143=(36,64,192)f32 #windows.2=(36,8,8,192)f32 Tensor.view Tensor.view_2122 2 1 windows.2 19150 x2.2 $input=windows.2 $shape=19150 #windows.2=(36,8,8,192)f32 #x2.2=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_831 1 1 14094 19155 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2939 2 1 x2.2 19152 19153 $input=x2.2 $dims=19152 #x2.2=(1,6,6,8,8,192)f32 #19153=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_310 1 1 19153 19154 memory_format=torch.contiguous_format $input=19153 #19153=(1,6,8,6,8,192)f32 #19154=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_828 2 1 19011 14094 19159 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #19011=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_2123 2 1 19154 19155 x3.2 $input=19154 $shape=19155 #19154=(1,6,8,6,8,192)f32 #x3.2=(1,48,48,192)f32 Tensor.view Tensor.view_2124 2 1 x3.2 19159 x4.2 $input=x3.2 $shape=19159 #x3.2=(1,48,48,192)f32 #x4.2=(1,2304,192)f32 pnnx.Expression pnnx_expr_826 2 1 19011 x4.2 input.6 expr=add(@0,@1) #19011=(1,2304,192)f32 #x4.2=(1,2304,192)f32 #input.6=(1,2304,192)f32 nn.LayerNorm layers_mmsa.5.residual_group.blocks.0.norm2 1 1 input.6 19163 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.6=(1,2304,192)f32 #19163=(1,2304,192)f32 nn.Linear layers_mmsa.5.residual_group.blocks.0.mlp.fc1 1 1 19163 19168 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #19163=(1,2304,192)f32 #19168=(1,2304,384)f32 nn.GELU layers_mmsa.5.residual_group.blocks.0.mlp.act 1 1 19168 19169 #19168=(1,2304,384)f32 #19169=(1,2304,384)f32 nn.Dropout layers_mmsa.5.residual_group.blocks.0.mlp.drop 1 1 19169 19170 #19169=(1,2304,384)f32 #19170=(1,2304,384)f32 nn.Linear layers_mmsa.5.residual_group.blocks.0.mlp.fc2 1 1 19170 19171 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #19170=(1,2304,384)f32 #19171=(1,2304,192)f32 nn.Dropout pnnx_unique_666 1 1 19171 19172 #19171=(1,2304,192)f32 #19172=(1,2304,192)f32 pnnx.Expression pnnx_expr_824 2 1 input.6 19172 19174 expr=add(@0,@1) #input.6=(1,2304,192)f32 #19172=(1,2304,192)f32 #19174=(1,2304,192)f32 pnnx.Attribute layers_mmsa.5.residual_group.blocks.1 0 1 attn_mask.2 @attn_mask=(36,64,64)f32 #attn_mask.2=(36,64,64)f32 nn.LayerNorm layers_mmsa.5.residual_group.blocks.1.norm1 1 1 19174 19203 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #19174=(1,2304,192)f32 #19203=(1,2304,192)f32 pnnx.Expression pnnx_expr_801 2 1 19174 14094 19204 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #19174=(1,2304,192)f32 #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_799 0 1 19206 expr=[-4,-4] pnnx.Expression pnnx_expr_797 0 1 19207 expr=[1,2] Tensor.view Tensor.view_2125 2 1 19203 19204 x.4 $input=19203 $shape=19204 #19203=(1,2304,192)f32 #x.4=(1,48,48,192)f32 torch.roll torch.roll_2522 3 1 x.4 19206 19207 x0.4 $input=x.4 $shifts=19206 $dims=19207 #x.4=(1,48,48,192)f32 #x0.4=(1,48,48,192)f32 pnnx.Expression pnnx_expr_775 1 1 x0.4 19224 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.4=(1,48,48,192)f32 pnnx.Expression pnnx_expr_770 0 1 19226 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2126 2 1 x0.4 19224 x1.4 $input=x0.4 $shape=19224 #x0.4=(1,48,48,192)f32 #x1.4=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_767 1 1 x0.4 19229 expr=[-1,8,8,int(size(@0,3))] #x0.4=(1,48,48,192)f32 torch.permute torch.permute_2940 2 1 x1.4 19226 19227 $input=x1.4 $dims=19226 #x1.4=(1,6,8,6,8,192)f32 #19227=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_311 1 1 19227 19228 memory_format=torch.contiguous_format $input=19227 #19227=(1,6,6,8,8,192)f32 #19228=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_765 1 1 19174 19231 expr=[-1,64,int(size(@0,2))] #19174=(1,2304,192)f32 pnnx.Expression pnnx_expr_755 0 1 19242 expr=-2 pnnx.Expression pnnx_expr_754 0 1 19243 expr=-1 pnnx.Attribute layers_mmsa.5.residual_group.blocks.1.attn 0 1 relative_position_bias_table.4 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.4=(225,6)f32 pnnx.Attribute pnnx_unique_667 0 1 relative_position_index.4 @relative_position_index=(64,64)i64 #relative_position_index.4=(64,64)i64 Tensor.view Tensor.view_2127 2 1 19228 19229 x_windows.4 $input=19228 $shape=19229 #19228=(1,6,6,8,8,192)f32 #x_windows.4=(36,8,8,192)f32 Tensor.view Tensor.view_2128 2 1 x_windows.4 19231 x2.4 $input=x_windows.4 $shape=19231 #x_windows.4=(36,8,8,192)f32 #x2.4=(36,64,192)f32 nn.Linear layers_mmsa.5.residual_group.blocks.1.attn.qkv 1 1 x2.4 19267 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.4=(36,64,192)f32 #19267=(36,64,576)f32 pnnx.Expression pnnx_expr_735 1 1 x2.4 19270 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.4=(36,64,192)f32 pnnx.Expression pnnx_expr_730 0 1 19272 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_640 2 1 19267 19270 19271 $input=19267 $shape=19270 #19267=(36,64,576)f32 #19271=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_729 0 1 26608 expr=0 pnnx.Expression pnnx_expr_728 0 1 26609 expr=0 pnnx.Expression pnnx_expr_727 0 1 26610 expr=0 pnnx.Expression pnnx_expr_726 0 1 26611 expr=1 pnnx.Expression pnnx_expr_725 0 1 26612 expr=0 pnnx.Expression pnnx_expr_724 0 1 26613 expr=2 torch.permute torch.permute_2941 2 1 19271 19272 qkv0.4 $input=19271 $dims=19272 #19271=(36,64,3,6,32)f32 #qkv0.4=(3,36,6,64,32)f32 Tensor.select Tensor.select_959 3 1 qkv0.4 26608 26609 q.4 $input=qkv0.4 $dim=26608 $index=26609 #qkv0.4=(3,36,6,64,32)f32 #q.4=(36,6,64,32)f32 pnnx.Expression pnnx_expr_723 1 1 q.4 q0.4 expr=mul(@0,1.767767e-01) #q.4=(36,6,64,32)f32 #q0.4=(36,6,64,32)f32 Tensor.select Tensor.select_960 3 1 qkv0.4 26610 26611 k.4 $input=qkv0.4 $dim=26610 $index=26611 #qkv0.4=(3,36,6,64,32)f32 #k.4=(36,6,64,32)f32 pnnx.Expression pnnx_expr_721 0 1 19280 expr=[-1] Tensor.view Tensor.view_2129 2 1 relative_position_index.4 19280 19281 $input=relative_position_index.4 $shape=19280 #relative_position_index.4=(64,64)i64 #19281=(4096)i64 pnnx.Expression pnnx_expr_720 1 1 19281 19282 expr=[@0] #19281=(4096)i64 pnnx.Expression pnnx_expr_717 0 1 19284 expr=[64,64,-1] Tensor.index Tensor.index_429 2 1 relative_position_bias_table.4 19282 19283 $input=relative_position_bias_table.4 $expr=19282 #relative_position_bias_table.4=(225,6)f32 #19283=(4096,6)f32 pnnx.Expression pnnx_expr_713 0 1 19286 expr=[2,0,1] Tensor.view Tensor.view_2130 2 1 19283 19284 relative_position_bias.4 $input=19283 $shape=19284 #19283=(4096,6)f32 #relative_position_bias.4=(64,64,6)f32 pnnx.Expression pnnx_expr_712 0 1 26621 expr=0 torch.permute torch.permute_2942 2 1 relative_position_bias.4 19286 19287 $input=relative_position_bias.4 $dims=19286 #relative_position_bias.4=(64,64,6)f32 #19287=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_312 1 1 19287 relative_position_bias0.4 memory_format=torch.contiguous_format $input=19287 #19287=(6,64,64)f32 #relative_position_bias0.4=(6,64,64)f32 torch.transpose torch.transpose_3205 3 1 k.4 19242 19243 19278 $input=k.4 $dim0=19242 $dim1=19243 #k.4=(36,6,64,32)f32 #19278=(36,6,32,64)f32 torch.matmul torch.matmul_2410 2 1 q0.4 19278 attn.10 $input=q0.4 $other=19278 #q0.4=(36,6,64,32)f32 #19278=(36,6,32,64)f32 #attn.10=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3423 2 1 relative_position_bias0.4 26621 19289 $input=relative_position_bias0.4 $dim=26621 #relative_position_bias0.4=(6,64,64)f32 #19289=(1,6,64,64)f32 pnnx.Expression pnnx_expr_710 2 1 attn.10 19289 attn0.2 expr=add(@0,@1) #attn.10=(36,6,64,64)f32 #19289=(1,6,64,64)f32 #attn0.2=(36,6,64,64)f32 pnnx.Expression pnnx_expr_701 2 1 x2.4 attn_mask.2 19296 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.4=(36,64,192)f32 #attn_mask.2=(36,64,64)f32 pnnx.Expression pnnx_expr_700 0 1 26626 expr=1 pnnx.Expression pnnx_expr_699 0 1 26627 expr=0 Tensor.view Tensor.view_2131 2 1 attn0.2 19296 19297 $input=attn0.2 $shape=19296 #attn0.2=(36,6,64,64)f32 #19297=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3424 2 1 attn_mask.2 26626 19298 $input=attn_mask.2 $dim=26626 #attn_mask.2=(36,64,64)f32 #19298=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3425 2 1 19298 26627 19299 $input=19298 $dim=26627 #19298=(36,1,64,64)f32 #19299=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_697 2 1 19297 19299 attn1.2 expr=add(@0,@1) #19297=(1,36,6,64,64)f32 #19299=(1,36,1,64,64)f32 #attn1.2=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_694 1 1 x2.4 19301 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.4=(36,64,192)f32 Tensor.view Tensor.view_2132 2 1 attn1.2 19301 input.8 $input=attn1.2 $shape=19301 #attn1.2=(1,36,6,64,64)f32 #input.8=(36,6,64,64)f32 nn.Softmax layers_mmsa.5.residual_group.blocks.1.attn.softmax 1 1 input.8 19303 dim=-1 #input.8=(36,6,64,64)f32 #19303=(36,6,64,64)f32 nn.Dropout layers_mmsa.5.residual_group.blocks.1.attn.attn_drop 1 1 19303 19304 #19303=(36,6,64,64)f32 #19304=(36,6,64,64)f32 Tensor.select Tensor.select_961 3 1 qkv0.4 26612 26613 v.4 $input=qkv0.4 $dim=26612 $index=26613 #qkv0.4=(3,36,6,64,32)f32 #v.4=(36,6,64,32)f32 pnnx.Expression pnnx_expr_693 0 1 26631 expr=1 pnnx.Expression pnnx_expr_692 0 1 26632 expr=2 torch.matmul torch.matmul_2411 2 1 19304 v.4 19305 $input=19304 $other=v.4 #19304=(36,6,64,64)f32 #v.4=(36,6,64,32)f32 #19305=(36,6,64,32)f32 pnnx.Expression pnnx_expr_691 1 1 x2.4 19307 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.4=(36,64,192)f32 torch.transpose torch.transpose_3206 3 1 19305 26631 26632 19306 $input=19305 $dim0=26631 $dim1=26632 #19305=(36,6,64,32)f32 #19306=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_641 2 1 19306 19307 input0.4 $input=19306 $shape=19307 #19306=(36,64,6,32)f32 #input0.4=(36,64,192)f32 nn.Linear layers_mmsa.5.residual_group.blocks.1.attn.proj 1 1 input0.4 19309 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.4=(36,64,192)f32 #19309=(36,64,192)f32 nn.Dropout layers_mmsa.5.residual_group.blocks.1.attn.proj_drop 1 1 19309 19310 #19309=(36,64,192)f32 #19310=(36,64,192)f32 pnnx.Expression pnnx_expr_687 1 1 19174 19311 expr=[-1,8,8,int(size(@0,2))] #19174=(1,2304,192)f32 pnnx.Expression pnnx_expr_674 1 1 14094 19317 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_667 0 1 19319 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2133 2 1 19310 19311 windows.4 $input=19310 $shape=19311 #19310=(36,64,192)f32 #windows.4=(36,8,8,192)f32 Tensor.view Tensor.view_2134 2 1 windows.4 19317 x3.4 $input=windows.4 $shape=19317 #windows.4=(36,8,8,192)f32 #x3.4=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_664 1 1 14094 19322 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2943 2 1 x3.4 19319 19320 $input=x3.4 $dims=19319 #x3.4=(1,6,6,8,8,192)f32 #19320=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_313 1 1 19320 19321 memory_format=torch.contiguous_format $input=19320 #19320=(1,6,8,6,8,192)f32 #19321=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_661 0 1 19324 expr=[4,4] pnnx.Expression pnnx_expr_658 0 1 19325 expr=[1,2] Tensor.view Tensor.view_2135 2 1 19321 19322 shifted_x.2 $input=19321 $shape=19322 #19321=(1,6,8,6,8,192)f32 #shifted_x.2=(1,48,48,192)f32 pnnx.Expression pnnx_expr_655 2 1 19174 14094 19329 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #19174=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.roll torch.roll_2523 3 1 shifted_x.2 19324 19325 x4.4 $input=shifted_x.2 $shifts=19324 $dims=19325 #shifted_x.2=(1,48,48,192)f32 #x4.4=(1,48,48,192)f32 Tensor.view Tensor.view_2136 2 1 x4.4 19329 x5.2 $input=x4.4 $shape=19329 #x4.4=(1,48,48,192)f32 #x5.2=(1,2304,192)f32 pnnx.Expression pnnx_expr_653 2 1 19174 x5.2 input.10 expr=add(@0,@1) #19174=(1,2304,192)f32 #x5.2=(1,2304,192)f32 #input.10=(1,2304,192)f32 nn.LayerNorm layers_mmsa.5.residual_group.blocks.1.norm2 1 1 input.10 19333 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.10=(1,2304,192)f32 #19333=(1,2304,192)f32 nn.Linear layers_mmsa.5.residual_group.blocks.1.mlp.fc1 1 1 19333 19338 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #19333=(1,2304,192)f32 #19338=(1,2304,384)f32 nn.GELU layers_mmsa.5.residual_group.blocks.1.mlp.act 1 1 19338 19339 #19338=(1,2304,384)f32 #19339=(1,2304,384)f32 nn.Dropout layers_mmsa.5.residual_group.blocks.1.mlp.drop 1 1 19339 19340 #19339=(1,2304,384)f32 #19340=(1,2304,384)f32 nn.Linear layers_mmsa.5.residual_group.blocks.1.mlp.fc2 1 1 19340 19341 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #19340=(1,2304,384)f32 #19341=(1,2304,192)f32 nn.Dropout pnnx_unique_668 1 1 19341 19342 #19341=(1,2304,192)f32 #19342=(1,2304,192)f32 pnnx.Expression pnnx_expr_651 2 1 input.10 19342 19344 expr=add(@0,@1) #input.10=(1,2304,192)f32 #19342=(1,2304,192)f32 #19344=(1,2304,192)f32 nn.LayerNorm layers_mmsa.5.residual_group.blocks.2.norm1 1 1 19344 19371 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #19344=(1,2304,192)f32 #19371=(1,2304,192)f32 pnnx.Expression pnnx_expr_629 2 1 19344 14094 19372 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #19344=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_2137 2 1 19371 19372 x.6 $input=19371 $shape=19372 #19371=(1,2304,192)f32 #x.6=(1,48,48,192)f32 pnnx.Expression pnnx_expr_608 1 1 x.6 19389 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.6=(1,48,48,192)f32 pnnx.Expression pnnx_expr_603 0 1 19391 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2138 2 1 x.6 19389 x0.6 $input=x.6 $shape=19389 #x.6=(1,48,48,192)f32 #x0.6=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_600 1 1 x.6 19394 expr=[-1,8,8,int(size(@0,3))] #x.6=(1,48,48,192)f32 torch.permute torch.permute_2944 2 1 x0.6 19391 19392 $input=x0.6 $dims=19391 #x0.6=(1,6,8,6,8,192)f32 #19392=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_314 1 1 19392 19393 memory_format=torch.contiguous_format $input=19392 #19392=(1,6,6,8,8,192)f32 #19393=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_598 1 1 19344 19396 expr=[-1,64,int(size(@0,2))] #19344=(1,2304,192)f32 pnnx.Expression pnnx_expr_588 0 1 19407 expr=-2 pnnx.Expression pnnx_expr_587 0 1 19408 expr=-1 pnnx.Attribute layers_mmsa.5.residual_group.blocks.2.attn 0 1 relative_position_bias_table.6 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.6=(225,6)f32 pnnx.Attribute pnnx_unique_669 0 1 relative_position_index.6 @relative_position_index=(64,64)i64 #relative_position_index.6=(64,64)i64 Tensor.view Tensor.view_2139 2 1 19393 19394 x_windows.6 $input=19393 $shape=19394 #19393=(1,6,6,8,8,192)f32 #x_windows.6=(36,8,8,192)f32 Tensor.view Tensor.view_2140 2 1 x_windows.6 19396 x1.6 $input=x_windows.6 $shape=19396 #x_windows.6=(36,8,8,192)f32 #x1.6=(36,64,192)f32 nn.Linear layers_mmsa.5.residual_group.blocks.2.attn.qkv 1 1 x1.6 19428 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.6=(36,64,192)f32 #19428=(36,64,576)f32 pnnx.Expression pnnx_expr_572 1 1 x1.6 19431 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.6=(36,64,192)f32 pnnx.Expression pnnx_expr_567 0 1 19433 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_642 2 1 19428 19431 19432 $input=19428 $shape=19431 #19428=(36,64,576)f32 #19432=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_566 0 1 26676 expr=0 pnnx.Expression pnnx_expr_565 0 1 26677 expr=0 pnnx.Expression pnnx_expr_564 0 1 26678 expr=0 pnnx.Expression pnnx_expr_563 0 1 26679 expr=1 pnnx.Expression pnnx_expr_562 0 1 26680 expr=0 pnnx.Expression pnnx_expr_561 0 1 26681 expr=2 torch.permute torch.permute_2945 2 1 19432 19433 qkv0.6 $input=19432 $dims=19433 #19432=(36,64,3,6,32)f32 #qkv0.6=(3,36,6,64,32)f32 Tensor.select Tensor.select_962 3 1 qkv0.6 26676 26677 q.6 $input=qkv0.6 $dim=26676 $index=26677 #qkv0.6=(3,36,6,64,32)f32 #q.6=(36,6,64,32)f32 pnnx.Expression pnnx_expr_560 1 1 q.6 q0.6 expr=mul(@0,1.767767e-01) #q.6=(36,6,64,32)f32 #q0.6=(36,6,64,32)f32 Tensor.select Tensor.select_963 3 1 qkv0.6 26678 26679 k.6 $input=qkv0.6 $dim=26678 $index=26679 #qkv0.6=(3,36,6,64,32)f32 #k.6=(36,6,64,32)f32 pnnx.Expression pnnx_expr_558 0 1 19441 expr=[-1] Tensor.view Tensor.view_2141 2 1 relative_position_index.6 19441 19442 $input=relative_position_index.6 $shape=19441 #relative_position_index.6=(64,64)i64 #19442=(4096)i64 pnnx.Expression pnnx_expr_557 1 1 19442 19443 expr=[@0] #19442=(4096)i64 pnnx.Expression pnnx_expr_554 0 1 19445 expr=[64,64,-1] Tensor.index Tensor.index_430 2 1 relative_position_bias_table.6 19443 19444 $input=relative_position_bias_table.6 $expr=19443 #relative_position_bias_table.6=(225,6)f32 #19444=(4096,6)f32 pnnx.Expression pnnx_expr_550 0 1 19447 expr=[2,0,1] Tensor.view Tensor.view_2142 2 1 19444 19445 relative_position_bias.6 $input=19444 $shape=19445 #19444=(4096,6)f32 #relative_position_bias.6=(64,64,6)f32 pnnx.Expression pnnx_expr_549 0 1 26689 expr=0 torch.permute torch.permute_2946 2 1 relative_position_bias.6 19447 19448 $input=relative_position_bias.6 $dims=19447 #relative_position_bias.6=(64,64,6)f32 #19448=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_315 1 1 19448 relative_position_bias0.6 memory_format=torch.contiguous_format $input=19448 #19448=(6,64,64)f32 #relative_position_bias0.6=(6,64,64)f32 torch.transpose torch.transpose_3207 3 1 k.6 19407 19408 19439 $input=k.6 $dim0=19407 $dim1=19408 #k.6=(36,6,64,32)f32 #19439=(36,6,32,64)f32 torch.matmul torch.matmul_2412 2 1 q0.6 19439 attn.14 $input=q0.6 $other=19439 #q0.6=(36,6,64,32)f32 #19439=(36,6,32,64)f32 #attn.14=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3426 2 1 relative_position_bias0.6 26689 19450 $input=relative_position_bias0.6 $dim=26689 #relative_position_bias0.6=(6,64,64)f32 #19450=(1,6,64,64)f32 pnnx.Expression pnnx_expr_547 2 1 attn.14 19450 input.12 expr=add(@0,@1) #attn.14=(36,6,64,64)f32 #19450=(1,6,64,64)f32 #input.12=(36,6,64,64)f32 nn.Softmax layers_mmsa.5.residual_group.blocks.2.attn.softmax 1 1 input.12 19452 dim=-1 #input.12=(36,6,64,64)f32 #19452=(36,6,64,64)f32 nn.Dropout layers_mmsa.5.residual_group.blocks.2.attn.attn_drop 1 1 19452 19453 #19452=(36,6,64,64)f32 #19453=(36,6,64,64)f32 Tensor.select Tensor.select_964 3 1 qkv0.6 26680 26681 v.6 $input=qkv0.6 $dim=26680 $index=26681 #qkv0.6=(3,36,6,64,32)f32 #v.6=(36,6,64,32)f32 pnnx.Expression pnnx_expr_546 0 1 26691 expr=1 pnnx.Expression pnnx_expr_545 0 1 26692 expr=2 torch.matmul torch.matmul_2413 2 1 19453 v.6 19454 $input=19453 $other=v.6 #19453=(36,6,64,64)f32 #v.6=(36,6,64,32)f32 #19454=(36,6,64,32)f32 pnnx.Expression pnnx_expr_544 1 1 x1.6 19456 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.6=(36,64,192)f32 torch.transpose torch.transpose_3208 3 1 19454 26691 26692 19455 $input=19454 $dim0=26691 $dim1=26692 #19454=(36,6,64,32)f32 #19455=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_643 2 1 19455 19456 input0.6 $input=19455 $shape=19456 #19455=(36,64,6,32)f32 #input0.6=(36,64,192)f32 nn.Linear layers_mmsa.5.residual_group.blocks.2.attn.proj 1 1 input0.6 19458 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.6=(36,64,192)f32 #19458=(36,64,192)f32 nn.Dropout layers_mmsa.5.residual_group.blocks.2.attn.proj_drop 1 1 19458 19459 #19458=(36,64,192)f32 #19459=(36,64,192)f32 pnnx.Expression pnnx_expr_540 1 1 19344 19460 expr=[-1,8,8,int(size(@0,2))] #19344=(1,2304,192)f32 pnnx.Expression pnnx_expr_527 1 1 14094 19466 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_520 0 1 19468 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2143 2 1 19459 19460 windows.6 $input=19459 $shape=19460 #19459=(36,64,192)f32 #windows.6=(36,8,8,192)f32 Tensor.view Tensor.view_2144 2 1 windows.6 19466 x2.6 $input=windows.6 $shape=19466 #windows.6=(36,8,8,192)f32 #x2.6=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_517 1 1 14094 19471 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2947 2 1 x2.6 19468 19469 $input=x2.6 $dims=19468 #x2.6=(1,6,6,8,8,192)f32 #19469=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_316 1 1 19469 19470 memory_format=torch.contiguous_format $input=19469 #19469=(1,6,8,6,8,192)f32 #19470=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_514 2 1 19344 14094 19475 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #19344=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_2145 2 1 19470 19471 x3.6 $input=19470 $shape=19471 #19470=(1,6,8,6,8,192)f32 #x3.6=(1,48,48,192)f32 Tensor.view Tensor.view_2146 2 1 x3.6 19475 x4.6 $input=x3.6 $shape=19475 #x3.6=(1,48,48,192)f32 #x4.6=(1,2304,192)f32 pnnx.Expression pnnx_expr_512 2 1 19344 x4.6 input.14 expr=add(@0,@1) #19344=(1,2304,192)f32 #x4.6=(1,2304,192)f32 #input.14=(1,2304,192)f32 nn.LayerNorm layers_mmsa.5.residual_group.blocks.2.norm2 1 1 input.14 19479 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.14=(1,2304,192)f32 #19479=(1,2304,192)f32 nn.Linear layers_mmsa.5.residual_group.blocks.2.mlp.fc1 1 1 19479 19484 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #19479=(1,2304,192)f32 #19484=(1,2304,384)f32 nn.GELU layers_mmsa.5.residual_group.blocks.2.mlp.act 1 1 19484 19485 #19484=(1,2304,384)f32 #19485=(1,2304,384)f32 nn.Dropout layers_mmsa.5.residual_group.blocks.2.mlp.drop 1 1 19485 19486 #19485=(1,2304,384)f32 #19486=(1,2304,384)f32 nn.Linear layers_mmsa.5.residual_group.blocks.2.mlp.fc2 1 1 19486 19487 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #19486=(1,2304,384)f32 #19487=(1,2304,192)f32 nn.Dropout pnnx_unique_670 1 1 19487 19488 #19487=(1,2304,192)f32 #19488=(1,2304,192)f32 pnnx.Expression pnnx_expr_510 2 1 input.14 19488 19490 expr=add(@0,@1) #input.14=(1,2304,192)f32 #19488=(1,2304,192)f32 #19490=(1,2304,192)f32 pnnx.Attribute layers_mmsa.5.residual_group.blocks.3 0 1 attn_mask.4 @attn_mask=(36,64,64)f32 #attn_mask.4=(36,64,64)f32 nn.LayerNorm layers_mmsa.5.residual_group.blocks.3.norm1 1 1 19490 19519 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #19490=(1,2304,192)f32 #19519=(1,2304,192)f32 pnnx.Expression pnnx_expr_487 2 1 19490 14094 19520 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #19490=(1,2304,192)f32 #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_485 0 1 19522 expr=[-4,-4] pnnx.Expression pnnx_expr_483 0 1 19523 expr=[1,2] Tensor.view Tensor.view_2147 2 1 19519 19520 x.8 $input=19519 $shape=19520 #19519=(1,2304,192)f32 #x.8=(1,48,48,192)f32 torch.roll torch.roll_2524 3 1 x.8 19522 19523 x0.8 $input=x.8 $shifts=19522 $dims=19523 #x.8=(1,48,48,192)f32 #x0.8=(1,48,48,192)f32 pnnx.Expression pnnx_expr_461 1 1 x0.8 19540 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.8=(1,48,48,192)f32 pnnx.Expression pnnx_expr_456 0 1 19542 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2148 2 1 x0.8 19540 x1.8 $input=x0.8 $shape=19540 #x0.8=(1,48,48,192)f32 #x1.8=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_453 1 1 x0.8 19545 expr=[-1,8,8,int(size(@0,3))] #x0.8=(1,48,48,192)f32 torch.permute torch.permute_2948 2 1 x1.8 19542 19543 $input=x1.8 $dims=19542 #x1.8=(1,6,8,6,8,192)f32 #19543=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_317 1 1 19543 19544 memory_format=torch.contiguous_format $input=19543 #19543=(1,6,6,8,8,192)f32 #19544=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_451 1 1 19490 19547 expr=[-1,64,int(size(@0,2))] #19490=(1,2304,192)f32 pnnx.Expression pnnx_expr_441 0 1 19558 expr=-2 pnnx.Expression pnnx_expr_440 0 1 19559 expr=-1 pnnx.Attribute layers_mmsa.5.residual_group.blocks.3.attn 0 1 relative_position_bias_table.8 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.8=(225,6)f32 pnnx.Attribute pnnx_unique_671 0 1 relative_position_index.8 @relative_position_index=(64,64)i64 #relative_position_index.8=(64,64)i64 Tensor.view Tensor.view_2149 2 1 19544 19545 x_windows.8 $input=19544 $shape=19545 #19544=(1,6,6,8,8,192)f32 #x_windows.8=(36,8,8,192)f32 Tensor.view Tensor.view_2150 2 1 x_windows.8 19547 x2.8 $input=x_windows.8 $shape=19547 #x_windows.8=(36,8,8,192)f32 #x2.8=(36,64,192)f32 nn.Linear layers_mmsa.5.residual_group.blocks.3.attn.qkv 1 1 x2.8 19583 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.8=(36,64,192)f32 #19583=(36,64,576)f32 pnnx.Expression pnnx_expr_421 1 1 x2.8 19586 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.8=(36,64,192)f32 pnnx.Expression pnnx_expr_416 0 1 19588 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_644 2 1 19583 19586 19587 $input=19583 $shape=19586 #19583=(36,64,576)f32 #19587=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_415 0 1 26735 expr=0 pnnx.Expression pnnx_expr_414 0 1 26736 expr=0 pnnx.Expression pnnx_expr_413 0 1 26737 expr=0 pnnx.Expression pnnx_expr_412 0 1 26738 expr=1 pnnx.Expression pnnx_expr_411 0 1 26739 expr=0 pnnx.Expression pnnx_expr_410 0 1 26740 expr=2 torch.permute torch.permute_2949 2 1 19587 19588 qkv0.8 $input=19587 $dims=19588 #19587=(36,64,3,6,32)f32 #qkv0.8=(3,36,6,64,32)f32 Tensor.select Tensor.select_965 3 1 qkv0.8 26735 26736 q.8 $input=qkv0.8 $dim=26735 $index=26736 #qkv0.8=(3,36,6,64,32)f32 #q.8=(36,6,64,32)f32 pnnx.Expression pnnx_expr_409 1 1 q.8 q0.8 expr=mul(@0,1.767767e-01) #q.8=(36,6,64,32)f32 #q0.8=(36,6,64,32)f32 Tensor.select Tensor.select_966 3 1 qkv0.8 26737 26738 k.8 $input=qkv0.8 $dim=26737 $index=26738 #qkv0.8=(3,36,6,64,32)f32 #k.8=(36,6,64,32)f32 pnnx.Expression pnnx_expr_407 0 1 19596 expr=[-1] Tensor.view Tensor.view_2151 2 1 relative_position_index.8 19596 19597 $input=relative_position_index.8 $shape=19596 #relative_position_index.8=(64,64)i64 #19597=(4096)i64 pnnx.Expression pnnx_expr_406 1 1 19597 19598 expr=[@0] #19597=(4096)i64 pnnx.Expression pnnx_expr_403 0 1 19600 expr=[64,64,-1] Tensor.index Tensor.index_431 2 1 relative_position_bias_table.8 19598 19599 $input=relative_position_bias_table.8 $expr=19598 #relative_position_bias_table.8=(225,6)f32 #19599=(4096,6)f32 pnnx.Expression pnnx_expr_399 0 1 19602 expr=[2,0,1] Tensor.view Tensor.view_2152 2 1 19599 19600 relative_position_bias.8 $input=19599 $shape=19600 #19599=(4096,6)f32 #relative_position_bias.8=(64,64,6)f32 pnnx.Expression pnnx_expr_398 0 1 26748 expr=0 torch.permute torch.permute_2950 2 1 relative_position_bias.8 19602 19603 $input=relative_position_bias.8 $dims=19602 #relative_position_bias.8=(64,64,6)f32 #19603=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_318 1 1 19603 relative_position_bias0.8 memory_format=torch.contiguous_format $input=19603 #19603=(6,64,64)f32 #relative_position_bias0.8=(6,64,64)f32 torch.transpose torch.transpose_3209 3 1 k.8 19558 19559 19594 $input=k.8 $dim0=19558 $dim1=19559 #k.8=(36,6,64,32)f32 #19594=(36,6,32,64)f32 torch.matmul torch.matmul_2414 2 1 q0.8 19594 attn.18 $input=q0.8 $other=19594 #q0.8=(36,6,64,32)f32 #19594=(36,6,32,64)f32 #attn.18=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3427 2 1 relative_position_bias0.8 26748 19605 $input=relative_position_bias0.8 $dim=26748 #relative_position_bias0.8=(6,64,64)f32 #19605=(1,6,64,64)f32 pnnx.Expression pnnx_expr_396 2 1 attn.18 19605 attn0.4 expr=add(@0,@1) #attn.18=(36,6,64,64)f32 #19605=(1,6,64,64)f32 #attn0.4=(36,6,64,64)f32 pnnx.Expression pnnx_expr_387 2 1 x2.8 attn_mask.4 19612 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.8=(36,64,192)f32 #attn_mask.4=(36,64,64)f32 pnnx.Expression pnnx_expr_386 0 1 26753 expr=1 pnnx.Expression pnnx_expr_385 0 1 26754 expr=0 Tensor.view Tensor.view_2153 2 1 attn0.4 19612 19613 $input=attn0.4 $shape=19612 #attn0.4=(36,6,64,64)f32 #19613=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3428 2 1 attn_mask.4 26753 19614 $input=attn_mask.4 $dim=26753 #attn_mask.4=(36,64,64)f32 #19614=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3429 2 1 19614 26754 19615 $input=19614 $dim=26754 #19614=(36,1,64,64)f32 #19615=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_383 2 1 19613 19615 attn1.4 expr=add(@0,@1) #19613=(1,36,6,64,64)f32 #19615=(1,36,1,64,64)f32 #attn1.4=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_380 1 1 x2.8 19617 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.8=(36,64,192)f32 Tensor.view Tensor.view_2154 2 1 attn1.4 19617 input.16 $input=attn1.4 $shape=19617 #attn1.4=(1,36,6,64,64)f32 #input.16=(36,6,64,64)f32 nn.Softmax layers_mmsa.5.residual_group.blocks.3.attn.softmax 1 1 input.16 19619 dim=-1 #input.16=(36,6,64,64)f32 #19619=(36,6,64,64)f32 nn.Dropout layers_mmsa.5.residual_group.blocks.3.attn.attn_drop 1 1 19619 19620 #19619=(36,6,64,64)f32 #19620=(36,6,64,64)f32 Tensor.select Tensor.select_967 3 1 qkv0.8 26739 26740 v.8 $input=qkv0.8 $dim=26739 $index=26740 #qkv0.8=(3,36,6,64,32)f32 #v.8=(36,6,64,32)f32 pnnx.Expression pnnx_expr_379 0 1 26758 expr=1 pnnx.Expression pnnx_expr_378 0 1 26759 expr=2 torch.matmul torch.matmul_2415 2 1 19620 v.8 19621 $input=19620 $other=v.8 #19620=(36,6,64,64)f32 #v.8=(36,6,64,32)f32 #19621=(36,6,64,32)f32 pnnx.Expression pnnx_expr_377 1 1 x2.8 19623 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.8=(36,64,192)f32 torch.transpose torch.transpose_3210 3 1 19621 26758 26759 19622 $input=19621 $dim0=26758 $dim1=26759 #19621=(36,6,64,32)f32 #19622=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_645 2 1 19622 19623 input0.8 $input=19622 $shape=19623 #19622=(36,64,6,32)f32 #input0.8=(36,64,192)f32 nn.Linear layers_mmsa.5.residual_group.blocks.3.attn.proj 1 1 input0.8 19625 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.8=(36,64,192)f32 #19625=(36,64,192)f32 nn.Dropout layers_mmsa.5.residual_group.blocks.3.attn.proj_drop 1 1 19625 19626 #19625=(36,64,192)f32 #19626=(36,64,192)f32 pnnx.Expression pnnx_expr_373 1 1 19490 19627 expr=[-1,8,8,int(size(@0,2))] #19490=(1,2304,192)f32 pnnx.Expression pnnx_expr_360 1 1 14094 19633 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_353 0 1 19635 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2155 2 1 19626 19627 windows.8 $input=19626 $shape=19627 #19626=(36,64,192)f32 #windows.8=(36,8,8,192)f32 Tensor.view Tensor.view_2156 2 1 windows.8 19633 x3.8 $input=windows.8 $shape=19633 #windows.8=(36,8,8,192)f32 #x3.8=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_350 1 1 14094 19638 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2951 2 1 x3.8 19635 19636 $input=x3.8 $dims=19635 #x3.8=(1,6,6,8,8,192)f32 #19636=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_319 1 1 19636 19637 memory_format=torch.contiguous_format $input=19636 #19636=(1,6,8,6,8,192)f32 #19637=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_347 0 1 19640 expr=[4,4] pnnx.Expression pnnx_expr_344 0 1 19641 expr=[1,2] Tensor.view Tensor.view_2157 2 1 19637 19638 shifted_x.4 $input=19637 $shape=19638 #19637=(1,6,8,6,8,192)f32 #shifted_x.4=(1,48,48,192)f32 pnnx.Expression pnnx_expr_341 2 1 19490 14094 19645 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #19490=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.roll torch.roll_2525 3 1 shifted_x.4 19640 19641 x4.8 $input=shifted_x.4 $shifts=19640 $dims=19641 #shifted_x.4=(1,48,48,192)f32 #x4.8=(1,48,48,192)f32 Tensor.view Tensor.view_2158 2 1 x4.8 19645 x5.4 $input=x4.8 $shape=19645 #x4.8=(1,48,48,192)f32 #x5.4=(1,2304,192)f32 pnnx.Expression pnnx_expr_339 2 1 19490 x5.4 input.18 expr=add(@0,@1) #19490=(1,2304,192)f32 #x5.4=(1,2304,192)f32 #input.18=(1,2304,192)f32 nn.LayerNorm layers_mmsa.5.residual_group.blocks.3.norm2 1 1 input.18 19649 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.18=(1,2304,192)f32 #19649=(1,2304,192)f32 nn.Linear layers_mmsa.5.residual_group.blocks.3.mlp.fc1 1 1 19649 19654 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #19649=(1,2304,192)f32 #19654=(1,2304,384)f32 nn.GELU layers_mmsa.5.residual_group.blocks.3.mlp.act 1 1 19654 19655 #19654=(1,2304,384)f32 #19655=(1,2304,384)f32 nn.Dropout layers_mmsa.5.residual_group.blocks.3.mlp.drop 1 1 19655 19656 #19655=(1,2304,384)f32 #19656=(1,2304,384)f32 nn.Linear layers_mmsa.5.residual_group.blocks.3.mlp.fc2 1 1 19656 19657 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #19656=(1,2304,384)f32 #19657=(1,2304,192)f32 nn.Dropout pnnx_unique_672 1 1 19657 19658 #19657=(1,2304,192)f32 #19658=(1,2304,192)f32 pnnx.Expression pnnx_expr_337 2 1 input.18 19658 19660 expr=add(@0,@1) #input.18=(1,2304,192)f32 #19658=(1,2304,192)f32 #19660=(1,2304,192)f32 nn.LayerNorm layers_mmsa.5.residual_group.blocks.4.norm1 1 1 19660 19687 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #19660=(1,2304,192)f32 #19687=(1,2304,192)f32 pnnx.Expression pnnx_expr_315 2 1 19660 14094 19688 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #19660=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_2159 2 1 19687 19688 x.10 $input=19687 $shape=19688 #19687=(1,2304,192)f32 #x.10=(1,48,48,192)f32 pnnx.Expression pnnx_expr_294 1 1 x.10 19705 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x.10=(1,48,48,192)f32 pnnx.Expression pnnx_expr_289 0 1 19707 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2160 2 1 x.10 19705 x0.10 $input=x.10 $shape=19705 #x.10=(1,48,48,192)f32 #x0.10=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_286 1 1 x.10 19710 expr=[-1,8,8,int(size(@0,3))] #x.10=(1,48,48,192)f32 torch.permute torch.permute_2952 2 1 x0.10 19707 19708 $input=x0.10 $dims=19707 #x0.10=(1,6,8,6,8,192)f32 #19708=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_320 1 1 19708 19709 memory_format=torch.contiguous_format $input=19708 #19708=(1,6,6,8,8,192)f32 #19709=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_284 1 1 19660 19712 expr=[-1,64,int(size(@0,2))] #19660=(1,2304,192)f32 pnnx.Expression pnnx_expr_274 0 1 19723 expr=-2 pnnx.Expression pnnx_expr_273 0 1 19724 expr=-1 pnnx.Attribute layers_mmsa.5.residual_group.blocks.4.attn 0 1 relative_position_bias_table.10 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.10=(225,6)f32 pnnx.Attribute pnnx_unique_673 0 1 relative_position_index.10 @relative_position_index=(64,64)i64 #relative_position_index.10=(64,64)i64 Tensor.view Tensor.view_2161 2 1 19709 19710 x_windows.10 $input=19709 $shape=19710 #19709=(1,6,6,8,8,192)f32 #x_windows.10=(36,8,8,192)f32 Tensor.view Tensor.view_2162 2 1 x_windows.10 19712 x1.10 $input=x_windows.10 $shape=19712 #x_windows.10=(36,8,8,192)f32 #x1.10=(36,64,192)f32 nn.Linear layers_mmsa.5.residual_group.blocks.4.attn.qkv 1 1 x1.10 19744 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x1.10=(36,64,192)f32 #19744=(36,64,576)f32 pnnx.Expression pnnx_expr_258 1 1 x1.10 19747 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x1.10=(36,64,192)f32 pnnx.Expression pnnx_expr_253 0 1 19749 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_646 2 1 19744 19747 19748 $input=19744 $shape=19747 #19744=(36,64,576)f32 #19748=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_252 0 1 26803 expr=0 pnnx.Expression pnnx_expr_251 0 1 26804 expr=0 pnnx.Expression pnnx_expr_250 0 1 26805 expr=0 pnnx.Expression pnnx_expr_249 0 1 26806 expr=1 pnnx.Expression pnnx_expr_248 0 1 26807 expr=0 pnnx.Expression pnnx_expr_247 0 1 26808 expr=2 torch.permute torch.permute_2953 2 1 19748 19749 qkv0.10 $input=19748 $dims=19749 #19748=(36,64,3,6,32)f32 #qkv0.10=(3,36,6,64,32)f32 Tensor.select Tensor.select_968 3 1 qkv0.10 26803 26804 q.10 $input=qkv0.10 $dim=26803 $index=26804 #qkv0.10=(3,36,6,64,32)f32 #q.10=(36,6,64,32)f32 pnnx.Expression pnnx_expr_246 1 1 q.10 q0.10 expr=mul(@0,1.767767e-01) #q.10=(36,6,64,32)f32 #q0.10=(36,6,64,32)f32 Tensor.select Tensor.select_969 3 1 qkv0.10 26805 26806 k.10 $input=qkv0.10 $dim=26805 $index=26806 #qkv0.10=(3,36,6,64,32)f32 #k.10=(36,6,64,32)f32 pnnx.Expression pnnx_expr_244 0 1 19757 expr=[-1] Tensor.view Tensor.view_2163 2 1 relative_position_index.10 19757 19758 $input=relative_position_index.10 $shape=19757 #relative_position_index.10=(64,64)i64 #19758=(4096)i64 pnnx.Expression pnnx_expr_243 1 1 19758 19759 expr=[@0] #19758=(4096)i64 pnnx.Expression pnnx_expr_240 0 1 19761 expr=[64,64,-1] Tensor.index Tensor.index_432 2 1 relative_position_bias_table.10 19759 19760 $input=relative_position_bias_table.10 $expr=19759 #relative_position_bias_table.10=(225,6)f32 #19760=(4096,6)f32 pnnx.Expression pnnx_expr_236 0 1 19763 expr=[2,0,1] Tensor.view Tensor.view_2164 2 1 19760 19761 relative_position_bias.10 $input=19760 $shape=19761 #19760=(4096,6)f32 #relative_position_bias.10=(64,64,6)f32 pnnx.Expression pnnx_expr_235 0 1 26816 expr=0 torch.permute torch.permute_2954 2 1 relative_position_bias.10 19763 19764 $input=relative_position_bias.10 $dims=19763 #relative_position_bias.10=(64,64,6)f32 #19764=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_321 1 1 19764 relative_position_bias0.10 memory_format=torch.contiguous_format $input=19764 #19764=(6,64,64)f32 #relative_position_bias0.10=(6,64,64)f32 torch.transpose torch.transpose_3211 3 1 k.10 19723 19724 19755 $input=k.10 $dim0=19723 $dim1=19724 #k.10=(36,6,64,32)f32 #19755=(36,6,32,64)f32 torch.matmul torch.matmul_2416 2 1 q0.10 19755 attn.22 $input=q0.10 $other=19755 #q0.10=(36,6,64,32)f32 #19755=(36,6,32,64)f32 #attn.22=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3430 2 1 relative_position_bias0.10 26816 19766 $input=relative_position_bias0.10 $dim=26816 #relative_position_bias0.10=(6,64,64)f32 #19766=(1,6,64,64)f32 pnnx.Expression pnnx_expr_233 2 1 attn.22 19766 input.20 expr=add(@0,@1) #attn.22=(36,6,64,64)f32 #19766=(1,6,64,64)f32 #input.20=(36,6,64,64)f32 nn.Softmax layers_mmsa.5.residual_group.blocks.4.attn.softmax 1 1 input.20 19768 dim=-1 #input.20=(36,6,64,64)f32 #19768=(36,6,64,64)f32 nn.Dropout layers_mmsa.5.residual_group.blocks.4.attn.attn_drop 1 1 19768 19769 #19768=(36,6,64,64)f32 #19769=(36,6,64,64)f32 Tensor.select Tensor.select_970 3 1 qkv0.10 26807 26808 v.10 $input=qkv0.10 $dim=26807 $index=26808 #qkv0.10=(3,36,6,64,32)f32 #v.10=(36,6,64,32)f32 pnnx.Expression pnnx_expr_232 0 1 26818 expr=1 pnnx.Expression pnnx_expr_231 0 1 26819 expr=2 torch.matmul torch.matmul_2417 2 1 19769 v.10 19770 $input=19769 $other=v.10 #19769=(36,6,64,64)f32 #v.10=(36,6,64,32)f32 #19770=(36,6,64,32)f32 pnnx.Expression pnnx_expr_230 1 1 x1.10 19772 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x1.10=(36,64,192)f32 torch.transpose torch.transpose_3212 3 1 19770 26818 26819 19771 $input=19770 $dim0=26818 $dim1=26819 #19770=(36,6,64,32)f32 #19771=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_647 2 1 19771 19772 input0.10 $input=19771 $shape=19772 #19771=(36,64,6,32)f32 #input0.10=(36,64,192)f32 nn.Linear layers_mmsa.5.residual_group.blocks.4.attn.proj 1 1 input0.10 19774 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.10=(36,64,192)f32 #19774=(36,64,192)f32 nn.Dropout layers_mmsa.5.residual_group.blocks.4.attn.proj_drop 1 1 19774 19775 #19774=(36,64,192)f32 #19775=(36,64,192)f32 pnnx.Expression pnnx_expr_226 1 1 19660 19776 expr=[-1,8,8,int(size(@0,2))] #19660=(1,2304,192)f32 pnnx.Expression pnnx_expr_213 1 1 14094 19782 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_206 0 1 19784 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2165 2 1 19775 19776 windows.10 $input=19775 $shape=19776 #19775=(36,64,192)f32 #windows.10=(36,8,8,192)f32 Tensor.view Tensor.view_2166 2 1 windows.10 19782 x2.10 $input=windows.10 $shape=19782 #windows.10=(36,8,8,192)f32 #x2.10=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_203 1 1 14094 19787 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2955 2 1 x2.10 19784 19785 $input=x2.10 $dims=19784 #x2.10=(1,6,6,8,8,192)f32 #19785=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_322 1 1 19785 19786 memory_format=torch.contiguous_format $input=19785 #19785=(1,6,8,6,8,192)f32 #19786=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_200 2 1 19660 14094 19791 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #19660=(1,2304,192)f32 #14094=(1,192,48,48)f32 Tensor.view Tensor.view_2167 2 1 19786 19787 x3.10 $input=19786 $shape=19787 #19786=(1,6,8,6,8,192)f32 #x3.10=(1,48,48,192)f32 Tensor.view Tensor.view_2168 2 1 x3.10 19791 x4.10 $input=x3.10 $shape=19791 #x3.10=(1,48,48,192)f32 #x4.10=(1,2304,192)f32 pnnx.Expression pnnx_expr_198 2 1 19660 x4.10 input.22 expr=add(@0,@1) #19660=(1,2304,192)f32 #x4.10=(1,2304,192)f32 #input.22=(1,2304,192)f32 nn.LayerNorm layers_mmsa.5.residual_group.blocks.4.norm2 1 1 input.22 19795 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.22=(1,2304,192)f32 #19795=(1,2304,192)f32 nn.Linear layers_mmsa.5.residual_group.blocks.4.mlp.fc1 1 1 19795 19800 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #19795=(1,2304,192)f32 #19800=(1,2304,384)f32 nn.GELU layers_mmsa.5.residual_group.blocks.4.mlp.act 1 1 19800 19801 #19800=(1,2304,384)f32 #19801=(1,2304,384)f32 nn.Dropout layers_mmsa.5.residual_group.blocks.4.mlp.drop 1 1 19801 19802 #19801=(1,2304,384)f32 #19802=(1,2304,384)f32 nn.Linear layers_mmsa.5.residual_group.blocks.4.mlp.fc2 1 1 19802 19803 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #19802=(1,2304,384)f32 #19803=(1,2304,192)f32 nn.Dropout pnnx_unique_674 1 1 19803 19804 #19803=(1,2304,192)f32 #19804=(1,2304,192)f32 pnnx.Expression pnnx_expr_196 2 1 input.22 19804 19806 expr=add(@0,@1) #input.22=(1,2304,192)f32 #19804=(1,2304,192)f32 #19806=(1,2304,192)f32 pnnx.Attribute layers_mmsa.5.residual_group.blocks.5 0 1 attn_mask.1 @attn_mask=(36,64,64)f32 #attn_mask.1=(36,64,64)f32 nn.LayerNorm layers_mmsa.5.residual_group.blocks.5.norm1 1 1 19806 19835 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #19806=(1,2304,192)f32 #19835=(1,2304,192)f32 pnnx.Expression pnnx_expr_173 2 1 19806 14094 19836 expr=[int(size(@0,0)),int(size(@1,2)),int(size(@1,3)),int(size(@0,2))] #19806=(1,2304,192)f32 #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_171 0 1 19838 expr=[-4,-4] pnnx.Expression pnnx_expr_169 0 1 19839 expr=[1,2] Tensor.view Tensor.view_2169 2 1 19835 19836 x.1 $input=19835 $shape=19836 #19835=(1,2304,192)f32 #x.1=(1,48,48,192)f32 torch.roll torch.roll_2526 3 1 x.1 19838 19839 x0.1 $input=x.1 $shifts=19838 $dims=19839 #x.1=(1,48,48,192)f32 #x0.1=(1,48,48,192)f32 pnnx.Expression pnnx_expr_147 1 1 x0.1 19856 expr=[int(size(@0,0)),int(div(size(@0,1),8)),8,int(div(size(@0,2),8)),8,int(size(@0,3))] #x0.1=(1,48,48,192)f32 pnnx.Expression pnnx_expr_142 0 1 19858 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2170 2 1 x0.1 19856 x1.1 $input=x0.1 $shape=19856 #x0.1=(1,48,48,192)f32 #x1.1=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_139 1 1 x0.1 19861 expr=[-1,8,8,int(size(@0,3))] #x0.1=(1,48,48,192)f32 torch.permute torch.permute_2956 2 1 x1.1 19858 19859 $input=x1.1 $dims=19858 #x1.1=(1,6,8,6,8,192)f32 #19859=(1,6,6,8,8,192)f32 Tensor.contiguous Tensor.contiguous_323 1 1 19859 19860 memory_format=torch.contiguous_format $input=19859 #19859=(1,6,6,8,8,192)f32 #19860=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_137 1 1 19806 19863 expr=[-1,64,int(size(@0,2))] #19806=(1,2304,192)f32 pnnx.Expression pnnx_expr_127 0 1 19874 expr=-2 pnnx.Expression pnnx_expr_126 0 1 19875 expr=-1 pnnx.Attribute layers_mmsa.5.residual_group.blocks.5.attn 0 1 relative_position_bias_table.1 @relative_position_bias_table=(225,6)f32 #relative_position_bias_table.1=(225,6)f32 pnnx.Attribute pnnx_unique_675 0 1 relative_position_index.1 @relative_position_index=(64,64)i64 #relative_position_index.1=(64,64)i64 Tensor.view Tensor.view_2171 2 1 19860 19861 x_windows.1 $input=19860 $shape=19861 #19860=(1,6,6,8,8,192)f32 #x_windows.1=(36,8,8,192)f32 Tensor.view Tensor.view_2172 2 1 x_windows.1 19863 x2.1 $input=x_windows.1 $shape=19863 #x_windows.1=(36,8,8,192)f32 #x2.1=(36,64,192)f32 nn.Linear layers_mmsa.5.residual_group.blocks.5.attn.qkv 1 1 x2.1 19899 bias=True in_features=192 out_features=576 @bias=(576)f32 @weight=(576,192)f32 #x2.1=(36,64,192)f32 #19899=(36,64,576)f32 pnnx.Expression pnnx_expr_107 1 1 x2.1 19902 expr=[int(size(@0,0)),int(size(@0,1)),3,6,int(div(size(@0,2),6))] #x2.1=(36,64,192)f32 pnnx.Expression pnnx_expr_102 0 1 19904 expr=[2,0,3,1,4] Tensor.reshape Tensor.reshape_648 2 1 19899 19902 19903 $input=19899 $shape=19902 #19899=(36,64,576)f32 #19903=(36,64,3,6,32)f32 pnnx.Expression pnnx_expr_101 0 1 26862 expr=0 pnnx.Expression pnnx_expr_100 0 1 26863 expr=0 pnnx.Expression pnnx_expr_99 0 1 26864 expr=0 pnnx.Expression pnnx_expr_98 0 1 26865 expr=1 pnnx.Expression pnnx_expr_97 0 1 26866 expr=0 pnnx.Expression pnnx_expr_96 0 1 26867 expr=2 torch.permute torch.permute_2957 2 1 19903 19904 qkv0.1 $input=19903 $dims=19904 #19903=(36,64,3,6,32)f32 #qkv0.1=(3,36,6,64,32)f32 Tensor.select Tensor.select_971 3 1 qkv0.1 26862 26863 q.1 $input=qkv0.1 $dim=26862 $index=26863 #qkv0.1=(3,36,6,64,32)f32 #q.1=(36,6,64,32)f32 pnnx.Expression pnnx_expr_95 1 1 q.1 q0.1 expr=mul(@0,1.767767e-01) #q.1=(36,6,64,32)f32 #q0.1=(36,6,64,32)f32 Tensor.select Tensor.select_972 3 1 qkv0.1 26864 26865 k.1 $input=qkv0.1 $dim=26864 $index=26865 #qkv0.1=(3,36,6,64,32)f32 #k.1=(36,6,64,32)f32 pnnx.Expression pnnx_expr_93 0 1 19912 expr=[-1] Tensor.view Tensor.view_2173 2 1 relative_position_index.1 19912 19913 $input=relative_position_index.1 $shape=19912 #relative_position_index.1=(64,64)i64 #19913=(4096)i64 pnnx.Expression pnnx_expr_92 1 1 19913 19914 expr=[@0] #19913=(4096)i64 pnnx.Expression pnnx_expr_89 0 1 19916 expr=[64,64,-1] Tensor.index Tensor.index_433 2 1 relative_position_bias_table.1 19914 19915 $input=relative_position_bias_table.1 $expr=19914 #relative_position_bias_table.1=(225,6)f32 #19915=(4096,6)f32 pnnx.Expression pnnx_expr_85 0 1 19918 expr=[2,0,1] Tensor.view Tensor.view_2174 2 1 19915 19916 relative_position_bias.1 $input=19915 $shape=19916 #19915=(4096,6)f32 #relative_position_bias.1=(64,64,6)f32 pnnx.Expression pnnx_expr_84 0 1 26875 expr=0 torch.permute torch.permute_2958 2 1 relative_position_bias.1 19918 19919 $input=relative_position_bias.1 $dims=19918 #relative_position_bias.1=(64,64,6)f32 #19919=(6,64,64)f32 Tensor.contiguous Tensor.contiguous_324 1 1 19919 relative_position_bias0.1 memory_format=torch.contiguous_format $input=19919 #19919=(6,64,64)f32 #relative_position_bias0.1=(6,64,64)f32 torch.transpose torch.transpose_3213 3 1 k.1 19874 19875 19910 $input=k.1 $dim0=19874 $dim1=19875 #k.1=(36,6,64,32)f32 #19910=(36,6,32,64)f32 torch.matmul torch.matmul_2418 2 1 q0.1 19910 attn.1 $input=q0.1 $other=19910 #q0.1=(36,6,64,32)f32 #19910=(36,6,32,64)f32 #attn.1=(36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3431 2 1 relative_position_bias0.1 26875 19921 $input=relative_position_bias0.1 $dim=26875 #relative_position_bias0.1=(6,64,64)f32 #19921=(1,6,64,64)f32 pnnx.Expression pnnx_expr_82 2 1 attn.1 19921 attn0.1 expr=add(@0,@1) #attn.1=(36,6,64,64)f32 #19921=(1,6,64,64)f32 #attn0.1=(36,6,64,64)f32 pnnx.Expression pnnx_expr_73 2 1 x2.1 attn_mask.1 19928 expr=[int(div(size(@0,0),size(@1,0))),int(size(@1,0)),6,int(size(@0,1)),int(size(@0,1))] #x2.1=(36,64,192)f32 #attn_mask.1=(36,64,64)f32 pnnx.Expression pnnx_expr_72 0 1 26880 expr=1 pnnx.Expression pnnx_expr_71 0 1 26881 expr=0 Tensor.view Tensor.view_2175 2 1 attn0.1 19928 19929 $input=attn0.1 $shape=19928 #attn0.1=(36,6,64,64)f32 #19929=(1,36,6,64,64)f32 torch.unsqueeze torch.unsqueeze_3432 2 1 attn_mask.1 26880 19930 $input=attn_mask.1 $dim=26880 #attn_mask.1=(36,64,64)f32 #19930=(36,1,64,64)f32 torch.unsqueeze torch.unsqueeze_3433 2 1 19930 26881 19931 $input=19930 $dim=26881 #19930=(36,1,64,64)f32 #19931=(1,36,1,64,64)f32 pnnx.Expression pnnx_expr_69 2 1 19929 19931 attn1.1 expr=add(@0,@1) #19929=(1,36,6,64,64)f32 #19931=(1,36,1,64,64)f32 #attn1.1=(1,36,6,64,64)f32 pnnx.Expression pnnx_expr_66 1 1 x2.1 19933 expr=[-1,6,int(size(@0,1)),int(size(@0,1))] #x2.1=(36,64,192)f32 Tensor.view Tensor.view_2176 2 1 attn1.1 19933 input.3 $input=attn1.1 $shape=19933 #attn1.1=(1,36,6,64,64)f32 #input.3=(36,6,64,64)f32 nn.Softmax layers_mmsa.5.residual_group.blocks.5.attn.softmax 1 1 input.3 19935 dim=-1 #input.3=(36,6,64,64)f32 #19935=(36,6,64,64)f32 nn.Dropout layers_mmsa.5.residual_group.blocks.5.attn.attn_drop 1 1 19935 19936 #19935=(36,6,64,64)f32 #19936=(36,6,64,64)f32 Tensor.select Tensor.select_973 3 1 qkv0.1 26866 26867 v.1 $input=qkv0.1 $dim=26866 $index=26867 #qkv0.1=(3,36,6,64,32)f32 #v.1=(36,6,64,32)f32 pnnx.Expression pnnx_expr_65 0 1 26885 expr=1 pnnx.Expression pnnx_expr_64 0 1 26886 expr=2 torch.matmul torch.matmul_2419 2 1 19936 v.1 19937 $input=19936 $other=v.1 #19936=(36,6,64,64)f32 #v.1=(36,6,64,32)f32 #19937=(36,6,64,32)f32 pnnx.Expression pnnx_expr_63 1 1 x2.1 19939 expr=[int(size(@0,0)),int(size(@0,1)),int(size(@0,2))] #x2.1=(36,64,192)f32 torch.transpose torch.transpose_3214 3 1 19937 26885 26886 19938 $input=19937 $dim0=26885 $dim1=26886 #19937=(36,6,64,32)f32 #19938=(36,64,6,32)f32 Tensor.reshape Tensor.reshape_649 2 1 19938 19939 input0.1 $input=19938 $shape=19939 #19938=(36,64,6,32)f32 #input0.1=(36,64,192)f32 nn.Linear layers_mmsa.5.residual_group.blocks.5.attn.proj 1 1 input0.1 19941 bias=True in_features=192 out_features=192 @bias=(192)f32 @weight=(192,192)f32 #input0.1=(36,64,192)f32 #19941=(36,64,192)f32 nn.Dropout layers_mmsa.5.residual_group.blocks.5.attn.proj_drop 1 1 19941 19942 #19941=(36,64,192)f32 #19942=(36,64,192)f32 pnnx.Expression pnnx_expr_59 1 1 19806 19943 expr=[-1,8,8,int(size(@0,2))] #19806=(1,2304,192)f32 pnnx.Expression pnnx_expr_46 1 1 14094 19949 expr=[1,int(div(size(@0,2),8)),int(div(size(@0,3),8)),8,8,-1] #14094=(1,192,48,48)f32 pnnx.Expression pnnx_expr_39 0 1 19951 expr=[0,1,3,2,4,5] Tensor.view Tensor.view_2177 2 1 19942 19943 windows.1 $input=19942 $shape=19943 #19942=(36,64,192)f32 #windows.1=(36,8,8,192)f32 Tensor.view Tensor.view_2178 2 1 windows.1 19949 x3.1 $input=windows.1 $shape=19949 #windows.1=(36,8,8,192)f32 #x3.1=(1,6,6,8,8,192)f32 pnnx.Expression pnnx_expr_36 1 1 14094 19954 expr=[1,int(size(@0,2)),int(size(@0,3)),-1] #14094=(1,192,48,48)f32 torch.permute torch.permute_2959 2 1 x3.1 19951 19952 $input=x3.1 $dims=19951 #x3.1=(1,6,6,8,8,192)f32 #19952=(1,6,8,6,8,192)f32 Tensor.contiguous Tensor.contiguous_325 1 1 19952 19953 memory_format=torch.contiguous_format $input=19952 #19952=(1,6,8,6,8,192)f32 #19953=(1,6,8,6,8,192)f32 pnnx.Expression pnnx_expr_33 0 1 19956 expr=[4,4] pnnx.Expression pnnx_expr_30 0 1 19957 expr=[1,2] Tensor.view Tensor.view_2179 2 1 19953 19954 shifted_x.1 $input=19953 $shape=19954 #19953=(1,6,8,6,8,192)f32 #shifted_x.1=(1,48,48,192)f32 pnnx.Expression pnnx_expr_27 2 1 19806 14094 19961 expr=[int(size(@0,0)),int(mul(size(@1,2),size(@1,3))),int(size(@0,2))] #19806=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.roll torch.roll_2527 3 1 shifted_x.1 19956 19957 x4.1 $input=shifted_x.1 $shifts=19956 $dims=19957 #shifted_x.1=(1,48,48,192)f32 #x4.1=(1,48,48,192)f32 Tensor.view Tensor.view_2180 2 1 x4.1 19961 x5.1 $input=x4.1 $shape=19961 #x4.1=(1,48,48,192)f32 #x5.1=(1,2304,192)f32 pnnx.Expression pnnx_expr_25 2 1 19806 x5.1 input.5 expr=add(@0,@1) #19806=(1,2304,192)f32 #x5.1=(1,2304,192)f32 #input.5=(1,2304,192)f32 nn.LayerNorm layers_mmsa.5.residual_group.blocks.5.norm2 1 1 input.5 19965 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #input.5=(1,2304,192)f32 #19965=(1,2304,192)f32 nn.Linear layers_mmsa.5.residual_group.blocks.5.mlp.fc1 1 1 19965 19970 bias=True in_features=192 out_features=384 @bias=(384)f32 @weight=(384,192)f32 #19965=(1,2304,192)f32 #19970=(1,2304,384)f32 nn.GELU layers_mmsa.5.residual_group.blocks.5.mlp.act 1 1 19970 19971 #19970=(1,2304,384)f32 #19971=(1,2304,384)f32 nn.Dropout layers_mmsa.5.residual_group.blocks.5.mlp.drop 1 1 19971 19972 #19971=(1,2304,384)f32 #19972=(1,2304,384)f32 nn.Linear layers_mmsa.5.residual_group.blocks.5.mlp.fc2 1 1 19972 19973 bias=True in_features=384 out_features=192 @bias=(192)f32 @weight=(192,384)f32 #19972=(1,2304,384)f32 #19973=(1,2304,192)f32 nn.Dropout pnnx_unique_676 1 1 19973 19974 #19973=(1,2304,192)f32 #19974=(1,2304,192)f32 pnnx.Expression pnnx_expr_23 2 1 input.5 19974 19976 expr=add(@0,@1) #input.5=(1,2304,192)f32 #19974=(1,2304,192)f32 #19976=(1,2304,192)f32 pnnx.Expression pnnx_expr_21 0 1 19978 expr=1 pnnx.Expression pnnx_expr_20 0 1 19979 expr=2 pnnx.Expression pnnx_expr_15 2 1 19976 14094 19985 expr=[int(size(@0,0)),192,int(size(@1,2)),int(size(@1,3))] #19976=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.transpose torch.transpose_3215 3 1 19976 19978 19979 19984 $input=19976 $dim0=19978 $dim1=19979 #19976=(1,2304,192)f32 #19984=(1,192,2304)f32 Tensor.view Tensor.view_2181 2 1 19984 19985 input.463 $input=19984 $shape=19985 #19984=(1,192,2304)f32 #input.463=(1,192,48,48)f32 nn.Conv2d layers_mmsa.5.conv 1 1 input.463 19987 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(192)f32 @weight=(192,192,3,3)f32 #input.463=(1,192,48,48)f32 #19987=(1,192,48,48)f32 pnnx.Expression pnnx_expr_14 0 1 19988 expr=-1 pnnx.Expression pnnx_expr_13 0 1 19989 expr=2 pnnx.Expression pnnx_expr_12 0 1 19990 expr=1 pnnx.Expression pnnx_expr_11 0 1 26913 expr=2 torch.flatten torch.flatten_2203 3 1 19987 19989 19988 19991 $input=19987 $start_dim=19989 $end_dim=19988 #19987=(1,192,48,48)f32 #19991=(1,192,2304)f32 torch.transpose torch.transpose_3216 3 1 19991 19990 26913 19992 $input=19991 $dim0=19990 $dim1=26913 #19991=(1,192,2304)f32 #19992=(1,2304,192)f32 pnnx.Expression pnnx_expr_10 2 1 19992 19011 19993 expr=add(@0,@1) #19992=(1,2304,192)f32 #19011=(1,2304,192)f32 #19993=(1,2304,192)f32 nn.LayerNorm norm_mmsa 1 1 19993 2206 elementwise_affine=True eps=1.000000e-05 normalized_shape=(192) @bias=(192)f32 @weight=(192)f32 #19993=(1,2304,192)f32 #2206=(1,2304,192)f32 pnnx.Expression pnnx_expr_8 0 1 19995 expr=1 pnnx.Expression pnnx_expr_7 0 1 19996 expr=2 pnnx.Expression pnnx_expr_2 2 1 2206 14094 20002 expr=[int(size(@0,0)),192,int(size(@1,2)),int(size(@1,3))] #2206=(1,2304,192)f32 #14094=(1,192,48,48)f32 torch.transpose torch.transpose_3217 3 1 2206 19995 19996 20001 $input=2206 $dim0=19995 $dim1=19996 #2206=(1,2304,192)f32 #20001=(1,192,2304)f32 Tensor.view Tensor.view_2182 2 1 20001 20002 input.1 $input=20001 $shape=20002 #20001=(1,192,2304)f32 #input.1=(1,192,48,48)f32 nn.Conv2d conv_after_body_mmsa 1 1 input.1 2212 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=192 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(192)f32 @weight=(192,192,3,3)f32 #input.1=(1,192,48,48)f32 #2212=(1,192,48,48)f32 pnnx.Expression pnnx_expr_0 2 1 2212 14094 input1.3 expr=add(@0,@1) #2212=(1,192,48,48)f32 #14094=(1,192,48,48)f32 #input1.3=(1,192,48,48)f32 nn.ConvTranspose2d upsample_conv 1 1 input1.3 2220 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(8,8) out_channels=192 output_padding=(0,0) padding=(0,0) stride=(8,8) @bias=(192)f32 @weight=(192,192,8,8)f32 #input1.3=(1,192,48,48)f32 #2220=(1,192,384,384)f32 nn.Conv2d conv_last 1 1 2220 2221 bias=True dilation=(1,1) groups=1 in_channels=192 kernel_size=(3,3) out_channels=3 padding=(1,1) padding_mode=zeros stride=(1,1) @bias=(3)f32 @weight=(3,192,3,3)f32 #2220=(1,192,384,384)f32 #2221=(1,3,384,384)f32 torch.zeros_like torch.zeros_like_3434 1 1 x_a.1 2228 $input=x_a.1 #x_a.1=(1,192,48,48)f32 #2228=(1,192,48,48)f32 prim::TupleConstruct pnnx_21790 4 1 2221 x_a.1 x_b.1 2228 2233 #2221=(1,3,384,384)f32 #x_a.1=(1,192,48,48)f32 #x_b.1=(1,192,48,48)f32 #2228=(1,192,48,48)f32 pnnx.Output pnnx_output_0 1 0 2233