text
stringlengths 0
93.6k
|
|---|
nn.ConvTranspose2d(320, 128, kernel_size=4, stride=2, padding=1),
|
nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1)
|
)
|
# 768到96的上采样,三次上采样,逐步降低通道数
|
self.upsample3 = nn.Sequential(
|
nn.ConvTranspose2d(512, 320, kernel_size=4, stride=2, padding=1),
|
nn.ConvTranspose2d(320, 128, kernel_size=4, stride=2, padding=1),
|
nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1)
|
)
|
def forward(self, inputs):
|
# 上采样
|
x1,x2,x3,x4 = inputs
|
up2 = self.upsample1(x2)
|
up3 = self.upsample2(x3)
|
up4 = self.upsample3(x4)
|
x = torch.cat([x1, up2, up3, up4], dim=1)
|
return x
|
class MixVisionTransformer(nn.Module):
|
def __init__(self,seg_pretrain_path=None, img_size=512, patch_size=4, in_chans=3,embed_dims=[64, 128, 320, 512],num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=True, qk_scale=None, drop_rate=0.0,
|
attn_drop_rate=0., drop_path_rate=0.1, norm_layer=partial(nn.LayerNorm, eps=1e-6),
|
depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1]):
|
super().__init__()
|
self.depths = depths
|
# patch_embed
|
self.patch_embed1 = OverlapPatchEmbed(img_size=img_size, patch_size=7, stride=4, in_chans=in_chans,
|
embed_dim=embed_dims[0])
|
self.patch_embed2 = OverlapPatchEmbed(img_size=img_size // 4, patch_size=3, stride=2, in_chans=embed_dims[0],
|
embed_dim=embed_dims[1])
|
self.patch_embed3 = OverlapPatchEmbed(img_size=img_size // 8, patch_size=3, stride=2, in_chans=embed_dims[1],
|
embed_dim=embed_dims[2])
|
self.patch_embed4 = OverlapPatchEmbed(img_size=img_size // 16, patch_size=3, stride=2, in_chans=embed_dims[2],
|
embed_dim=embed_dims[3])
|
# transformer encoder
|
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
|
cur = 0
|
self.block1 = nn.ModuleList([Block(
|
dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
|
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
|
sr_ratio=sr_ratios[0])
|
for i in range(depths[0])])
|
self.norm1 = norm_layer(embed_dims[0])
|
cur += depths[0]
|
self.block2 = nn.ModuleList([Block(
|
dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
|
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
|
sr_ratio=sr_ratios[1])
|
for i in range(depths[1])])
|
self.norm2 = norm_layer(embed_dims[1])
|
cur += depths[1]
|
self.block3 = nn.ModuleList([Block(
|
dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale,
|
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
|
sr_ratio=sr_ratios[2])
|
for i in range(depths[2])])
|
self.norm3 = norm_layer(embed_dims[2])
|
cur += depths[2]
|
self.block4 = nn.ModuleList([Block(
|
dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
|
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
|
sr_ratio=sr_ratios[3])
|
for i in range(depths[3])])
|
self.norm4 = norm_layer(embed_dims[3])
|
if seg_pretrain_path is not None:
|
self.load_state_dict(torch.load(seg_pretrain_path),
|
strict=False)
|
original_first_layer = self.patch_embed1.proj
|
new_first_layer = nn.Conv2d(6, original_first_layer.out_channels,
|
kernel_size=original_first_layer.kernel_size, stride=original_first_layer.stride,
|
padding=original_first_layer.padding, bias=False)
|
new_first_layer.weight.data[:, :3, :, :] = original_first_layer.weight.data.clone()[:, :3, :, :]
|
new_first_layer.weight.data[:, 3:, :, :] = torch.nn.init.kaiming_normal_(new_first_layer.weight[:, 3:, :, :])
|
self.patch_embed1.proj = new_first_layer
|
def _init_weights(self, m):
|
if isinstance(m, nn.Linear):
|
trunc_normal_(m.weight, std=.02)
|
if isinstance(m, nn.Linear) and m.bias is not None:
|
nn.init.constant_(m.bias, 0)
|
elif isinstance(m, nn.LayerNorm):
|
nn.init.constant_(m.bias, 0)
|
nn.init.constant_(m.weight, 1.0)
|
elif isinstance(m, nn.Conv2d):
|
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
|
fan_out //= m.groups
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.