text
stringlengths 0
93.6k
|
|---|
x = self.norm_pre(x)
|
return x , [out[0],out[2]]
|
def forward(self, image, mask=None, *args, **kwargs):
|
feature,out = self.forward_features(image)
|
return feature,out
|
class Mlp(nn.Module):
|
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
super().__init__()
|
out_features = out_features or in_features
|
hidden_features = hidden_features or in_features
|
self.fc1 = nn.Linear(in_features, hidden_features)
|
self.dwconv = DWConv(hidden_features)
|
self.act = act_layer()
|
self.fc2 = nn.Linear(hidden_features, out_features)
|
self.drop = nn.Dropout(drop)
|
self.apply(self._init_weights)
|
def _init_weights(self, m):
|
if isinstance(m, nn.Linear):
|
trunc_normal_(m.weight, std=.02)
|
if isinstance(m, nn.Linear) and m.bias is not None:
|
nn.init.constant_(m.bias, 0)
|
elif isinstance(m, nn.LayerNorm):
|
nn.init.constant_(m.bias, 0)
|
nn.init.constant_(m.weight, 1.0)
|
elif isinstance(m, nn.Conv2d):
|
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
|
fan_out //= m.groups
|
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
|
if m.bias is not None:
|
m.bias.data.zero_()
|
def forward(self, x, H, W):
|
x = self.fc1(x)
|
x = self.dwconv(x, H, W)
|
x = self.act(x)
|
x = self.drop(x)
|
x = self.fc2(x)
|
x = self.drop(x)
|
return x
|
class Attention(nn.Module):
|
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1):
|
super().__init__()
|
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
|
self.dim = dim
|
self.num_heads = num_heads
|
head_dim = dim // num_heads
|
self.scale = qk_scale or head_dim ** -0.5
|
self.q = nn.Linear(dim, dim, bias=qkv_bias)
|
self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
|
self.attn_drop = nn.Dropout(attn_drop)
|
self.proj = nn.Linear(dim, dim)
|
self.proj_drop = nn.Dropout(proj_drop)
|
self.sr_ratio = sr_ratio
|
if sr_ratio > 1:
|
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
|
self.norm = nn.LayerNorm(dim)
|
self.apply(self._init_weights)
|
def _init_weights(self, m):
|
if isinstance(m, nn.Linear):
|
trunc_normal_(m.weight, std=.02)
|
if isinstance(m, nn.Linear) and m.bias is not None:
|
nn.init.constant_(m.bias, 0)
|
elif isinstance(m, nn.LayerNorm):
|
nn.init.constant_(m.bias, 0)
|
nn.init.constant_(m.weight, 1.0)
|
elif isinstance(m, nn.Conv2d):
|
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
|
fan_out //= m.groups
|
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
|
if m.bias is not None:
|
m.bias.data.zero_()
|
def forward(self, x, H, W):
|
B, N, C = x.shape
|
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
|
if self.sr_ratio > 1:
|
x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
|
x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)
|
x_ = self.norm(x_)
|
kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
else:
|
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
k, v = kv[0], kv[1]
|
attn = (q @ k.transpose(-2, -1)) * self.scale
|
attn = attn.float()
|
attn = attn.softmax(dim=-1)
|
attn = self.attn_drop(attn)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.