| |
| |
| |
|
|
| import os |
| from functools import partial |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from torch.utils.checkpoint import checkpoint |
|
|
| try: |
| from timm.models.layers import drop_path, to_2tuple |
| except: |
| from timm.layers import drop_path, to_2tuple |
|
|
| try: |
| import xformers.ops as xops |
| except ImportError: |
| xops = None |
| print("Please 'pip install xformers'") |
|
|
|
|
| class PatchDropout(nn.Module): |
| """ |
| https://arxiv.org/abs/2212.00794 |
| """ |
|
|
| def __init__(self, prob, exclude_first_token=True): |
| super().__init__() |
| assert 0 <= prob < 1. |
| self.prob = prob |
| self.exclude_first_token = exclude_first_token |
| print(f"os.getenv('RoPE')={os.getenv('RoPE')}") |
|
|
| def forward(self, x): |
| if not self.training or self.prob == 0.: |
| return x |
|
|
| if self.exclude_first_token: |
| cls_tokens, x = x[:, :1], x[:, 1:] |
| else: |
| cls_tokens = torch.jit.annotate(torch.Tensor, x[:, :1]) |
|
|
| batch = x.size()[0] |
| num_tokens = x.size()[1] |
|
|
| batch_indices = torch.arange(batch) |
| batch_indices = batch_indices[..., None] |
|
|
| keep_prob = 1 - self.prob |
| num_patches_keep = max(1, int(num_tokens * keep_prob)) |
|
|
| rand = torch.randn(batch, num_tokens) |
| patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices |
|
|
| x = x[batch_indices, patch_indices_keep] |
|
|
| if self.exclude_first_token: |
| x = torch.cat((cls_tokens, x), dim=1) |
|
|
| if self.training and os.getenv('RoPE') == '1': |
| return x, patch_indices_keep |
|
|
| return x |
|
|
| class DropPath(nn.Module): |
| """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). |
| """ |
| def __init__(self, drop_prob=None): |
| super(DropPath, self).__init__() |
| self.drop_prob = drop_prob |
|
|
| def forward(self, x): |
| return drop_path(x, self.drop_prob, self.training) |
| |
| def extra_repr(self) -> str: |
| return 'p={}'.format(self.drop_prob) |
|
|
|
|
| class Mlp(nn.Module): |
| def __init__( |
| self, |
| in_features, |
| hidden_features=None, |
| out_features=None, |
| act_layer=nn.GELU, |
| norm_layer=nn.LayerNorm, |
| drop=0., |
| subln=False, |
| |
| ): |
| super().__init__() |
| out_features = out_features or in_features |
| hidden_features = hidden_features or in_features |
| self.fc1 = nn.Linear(in_features, hidden_features) |
| self.act = act_layer() |
|
|
| self.ffn_ln = norm_layer(hidden_features) if subln else nn.Identity() |
|
|
| self.fc2 = nn.Linear(hidden_features, out_features) |
| self.drop = nn.Dropout(drop) |
|
|
| def forward(self, x): |
| x = self.fc1(x) |
| x = self.act(x) |
| |
| |
| x = self.ffn_ln(x) |
|
|
| x = self.fc2(x) |
| x = self.drop(x) |
| return x |
|
|
| class SwiGLU(nn.Module): |
| def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, drop=0., |
| norm_layer=nn.LayerNorm, subln=False): |
| super().__init__() |
| out_features = out_features or in_features |
| hidden_features = hidden_features or in_features |
|
|
| self.w1 = nn.Linear(in_features, hidden_features) |
| self.w2 = nn.Linear(in_features, hidden_features) |
|
|
| self.act = act_layer() |
| self.ffn_ln = norm_layer(hidden_features) if subln else nn.Identity() |
| self.w3 = nn.Linear(hidden_features, out_features) |
| |
| self.drop = nn.Dropout(drop) |
|
|
| def forward(self, x): |
| x1 = self.w1(x) |
| x2 = self.w2(x) |
| hidden = self.act(x1) * x2 |
| x = self.ffn_ln(hidden) |
| x = self.w3(x) |
| x = self.drop(x) |
| return x |
|
|
| class Attention(nn.Module): |
| def __init__( |
| self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., |
| proj_drop=0., window_size=None, attn_head_dim=None, xattn=False, rope=None, subln=False, norm_layer=nn.LayerNorm): |
| super().__init__() |
| self.num_heads = num_heads |
| head_dim = dim // num_heads |
| if attn_head_dim is not None: |
| head_dim = attn_head_dim |
| all_head_dim = head_dim * self.num_heads |
| self.scale = qk_scale or head_dim ** -0.5 |
|
|
| self.subln = subln |
| if self.subln: |
| self.q_proj = nn.Linear(dim, all_head_dim, bias=False) |
| self.k_proj = nn.Linear(dim, all_head_dim, bias=False) |
| self.v_proj = nn.Linear(dim, all_head_dim, bias=False) |
| else: |
| if qkv_bias: |
| self.qkv = nn.Linear(dim, all_head_dim * 3, bias=True) |
| else: |
| self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) |
|
|
| |
| |
| |
| |
| |
| |
|
|
| self.window_size = None |
| self.relative_position_bias_table = None |
| self.relative_position_index = None |
|
|
| self.attn_drop = nn.Dropout(attn_drop) |
| self.inner_attn_ln = norm_layer(all_head_dim) if subln else nn.Identity() |
| |
| self.proj = nn.Linear(all_head_dim, dim) |
| self.proj_drop = nn.Dropout(proj_drop) |
| self.xattn = xattn |
| self.xattn_drop = attn_drop |
|
|
| self.rope = rope |
|
|
| def forward(self, x, rel_pos_bias=None, attn_mask=None): |
| B, N, C = x.shape |
| if self.subln: |
| q = F.linear(input=x, weight=self.q_proj.weight, bias=self.q_bias) |
| k = F.linear(input=x, weight=self.k_proj.weight, bias=None) |
| v = F.linear(input=x, weight=self.v_proj.weight, bias=self.v_bias) |
|
|
| q = q.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) |
| k = k.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) |
| v = v.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) |
| else: |
|
|
| |
| |
| |
| |
| |
|
|
| qkv = self.qkv(x) |
|
|
| qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) |
| q, k, v = qkv[0], qkv[1], qkv[2] |
|
|
| if self.rope: |
| q_t = q[:, :, 1:, :] |
| ro_q_t = self.rope(q_t) |
| q = torch.cat((q[:, :, :1, :], ro_q_t), -2).type_as(v) |
|
|
| k_t = k[:, :, 1:, :] |
| ro_k_t = self.rope(k_t) |
| k = torch.cat((k[:, :, :1, :], ro_k_t), -2).type_as(v) |
|
|
| if self.xattn: |
| q = q.permute(0, 2, 1, 3) |
| k = k.permute(0, 2, 1, 3) |
| v = v.permute(0, 2, 1, 3) |
|
|
| x = xops.memory_efficient_attention( |
| q, k, v, |
| p=self.xattn_drop, |
| scale=self.scale, |
| ) |
| x = x.reshape(B, N, -1) |
| x = self.inner_attn_ln(x) |
| x = self.proj(x) |
| x = self.proj_drop(x) |
| else: |
| q = q * self.scale |
| attn = (q @ k.transpose(-2, -1)) |
|
|
| if self.relative_position_bias_table is not None: |
| relative_position_bias = \ |
| self.relative_position_bias_table[self.relative_position_index.view(-1)].view( |
| self.window_size[0] * self.window_size[1] + 1, |
| self.window_size[0] * self.window_size[1] + 1, -1) |
| relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() |
| attn = attn + relative_position_bias.unsqueeze(0).type_as(attn) |
|
|
| if rel_pos_bias is not None: |
| attn = attn + rel_pos_bias.type_as(attn) |
|
|
| if attn_mask is not None: |
| attn_mask = attn_mask.bool() |
| attn = attn.masked_fill(~attn_mask[:, None, None, :], float("-inf")) |
| |
| attn = attn.softmax(dim=-1) |
| attn = self.attn_drop(attn) |
|
|
| x = (attn @ v).transpose(1, 2).reshape(B, N, -1) |
| x = self.inner_attn_ln(x) |
| x = self.proj(x) |
| x = self.proj_drop(x) |
| return x |
|
|
|
|
| class Block(nn.Module): |
|
|
| def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., |
| drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm, |
| window_size=None, attn_head_dim=None, xattn=False, rope=None, postnorm=False, |
| subln=False, naiveswiglu=False): |
| super().__init__() |
| self.norm1 = norm_layer(dim) |
| self.attn = Attention( |
| dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, |
| attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim, |
| xattn=xattn, rope=rope, subln=subln, norm_layer=norm_layer) |
| |
| self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() |
| self.norm2 = norm_layer(dim) |
| mlp_hidden_dim = int(dim * mlp_ratio) |
|
|
| if naiveswiglu: |
| self.mlp = SwiGLU( |
| in_features=dim, |
| hidden_features=mlp_hidden_dim, |
| subln=subln, |
| norm_layer=norm_layer, |
| ) |
| else: |
| self.mlp = Mlp( |
| in_features=dim, |
| hidden_features=mlp_hidden_dim, |
| act_layer=act_layer, |
| subln=subln, |
| drop=drop |
| ) |
|
|
| if init_values is not None and init_values > 0: |
| self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True) |
| self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True) |
| else: |
| self.gamma_1, self.gamma_2 = None, None |
|
|
| self.postnorm = postnorm |
|
|
| def forward(self, x, rel_pos_bias=None, attn_mask=None): |
| if self.gamma_1 is None: |
| if self.postnorm: |
| x = x + self.drop_path(self.norm1(self.attn(x, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask))) |
| x = x + self.drop_path(self.norm2(self.mlp(x))) |
| else: |
| x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask)) |
| x = x + self.drop_path(self.mlp(self.norm2(x))) |
| else: |
| if self.postnorm: |
| x = x + self.drop_path(self.gamma_1 * self.norm1(self.attn(x, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask))) |
| x = x + self.drop_path(self.gamma_2 * self.norm2(self.mlp(x))) |
| else: |
| x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask)) |
| x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) |
| return x |
|
|
|
|
| class PatchEmbed(nn.Module): |
| """ Image to Patch Embedding |
| """ |
| def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): |
| super().__init__() |
| img_size = to_2tuple(img_size) |
| patch_size = to_2tuple(patch_size) |
| num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) |
| self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) |
| self.img_size = img_size |
| self.patch_size = patch_size |
| self.num_patches = num_patches |
|
|
| self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) |
|
|
| def forward(self, x, **kwargs): |
| B, C, H, W = x.shape |
| |
| assert H == self.img_size[0] and W == self.img_size[1], \ |
| f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." |
| x = self.proj(x).flatten(2).transpose(1, 2) |
| return x |
|
|
|
|
| class EVAVisionTransformer(nn.Module): |
| """ Vision Transformer with support for patch or hybrid CNN input stage |
| """ |
| def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, |
| num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., |
| drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, patch_dropout=0., |
| use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, rope=False, |
| use_mean_pooling=True, init_scale=0.001, grad_checkpointing=False, xattn=False, postnorm=False, |
| pt_hw_seq_len=16, intp_freq=False, naiveswiglu=False, subln=False, |
| ): |
| super().__init__() |
| self.image_size = img_size |
| |
| self.num_features = self.embed_dim = embed_dim |
|
|
| self.patch_embed = PatchEmbed( |
| img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) |
| num_patches = self.patch_embed.num_patches |
|
|
| self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) |
| if use_abs_pos_emb: |
| self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) |
| else: |
| self.pos_embed = None |
| self.pos_drop = nn.Dropout(p=drop_rate) |
|
|
| self.rel_pos_bias = None |
| self.rope = None |
|
|
| self.naiveswiglu = naiveswiglu |
|
|
| dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] |
| self.use_rel_pos_bias = use_rel_pos_bias |
| self.blocks = nn.ModuleList([ |
| Block( |
| dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, |
| drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, |
| init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None, |
| xattn=xattn, rope=self.rope, postnorm=postnorm, subln=subln, naiveswiglu=naiveswiglu) |
| for i in range(depth)]) |
|
|
| |
| self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0. else nn.Identity() |
|
|
| self.grad_checkpointing = grad_checkpointing |
|
|
|
|
| def get_num_layers(self): |
| return len(self.blocks) |
| |
| def lock(self, unlocked_groups=0, freeze_bn_stats=False): |
| assert unlocked_groups == 0, 'partial locking not currently supported for this model' |
| for param in self.parameters(): |
| param.requires_grad = False |
|
|
| @torch.jit.ignore |
| def set_grad_checkpointing(self, enable=True): |
| self.grad_checkpointing = enable |
|
|
| @torch.jit.ignore |
| def no_weight_decay(self): |
| return {'pos_embed', 'cls_token'} |
|
|
|
|
| def forward_features(self, x): |
| x = self.patch_embed(x) |
| batch_size, seq_len, _ = x.size() |
|
|
| cls_tokens = self.cls_token.expand(batch_size, -1, -1) |
| x = torch.cat((cls_tokens, x), dim=1) |
| if self.pos_embed is not None: |
| x = x + self.pos_embed |
| x = self.pos_drop(x) |
|
|
| |
| if os.getenv('RoPE') == '1': |
| if self.training and not isinstance(self.patch_dropout, nn.Identity): |
| x, patch_indices_keep = self.patch_dropout(x) |
| self.rope.forward = partial(self.rope.forward, patch_indices_keep=patch_indices_keep) |
| else: |
| self.rope.forward = partial(self.rope.forward, patch_indices_keep=None) |
| x = self.patch_dropout(x) |
| else: |
| x = self.patch_dropout(x) |
|
|
| rel_pos_bias = None |
|
|
| for blk in self.blocks: |
| if self.grad_checkpointing: |
| x = checkpoint(blk, x, (rel_pos_bias,)) |
| else: |
| x = blk(x, rel_pos_bias=rel_pos_bias) |
|
|
| return x |
|
|
| def forward(self, x): |
|
|
| """ |
| :return: |
| forward_features function returns raw features of ViT, |
| forward with return_all_features returns normalized features of ViT |
| :param x: |
| :param return_all_features: |
| """ |
|
|
| features = self.forward_features(x) |
|
|
| return features |