text
stringlengths
1
93.6k
# after matirx multiplications, we reshape the outputs based on its plane for concatenation
Scor = (Ecor * input_tensor).permute(0, 2, 1, 3, 4).contiguous() # Scor will now have a shape (batch_size, N, channels, width, height)
Ssag = (Esag * input_tensor).permute(0, 3, 1, 2, 4).contiguous() # Ssag will now have a shape (batch_size, N, channels, length, height)
Sax = (Eax * input_tensor).permute(0, 4, 1, 2, 3).contiguous() # Sax will now have a shape (batch_size, N, channels, length, width)
# Concatenate the reshaped extracted features : R(C3d×L×W×H) → R(3N×C3d×L×L)
S = torch.cat((Scor, Ssag, Sax), dim = 1) # Now S will have a shape of (batch_size, 3N, channels, length, length)
# 2D CNN block
# perform global average pooling using pre-trained ResNet50 network
# D2d : R(3N×C3d×L×L) → R(3N×C2d) (C2d is out channel size of 2D CNN)
S = S.view(-1,C,H,W).contiguous()
pooled_feat = self.CNN_2D(S).view(B, 3*H, -1) # Eq. (4)
# Non-Linear Projection part T ∈ R(3N×d) (d is projection dimension)
output_tensor = self.non_linear_proj(pooled_feat) # Now we have the desired output shape
return output_tensor
class EmbeddingLayer(nn.Module):
'''
After calculating the multi-plane and multi-slice image tokens, position and
plane embedding tokens are added to the image tokens from non-linear projection layer.
Ref. 3.5. Position and Plane Embedding Block
emb_size = d = 256, total_tokens = 3S = 3*128 = 384
where d = attention dimension and S = input size
'''
def __init__(self, emb_size: int = 256, total_tokens: int = 384):
super(EmbeddingLayer, self).__init__()
# zcls ∈ R(d)
self.cls_token = nn.Parameter(torch.randn(1,1, emb_size))
# zsep ∈ R(d)
self.sep_token = nn.Parameter(torch.randn(1,1, emb_size))
# Ppln ∈ R((3S+4)×d)
# To inject plane-specific information to the model, we will use separate plane embeddings for different segments of the input tensor (refer, Fig.3(d))
self.coronal_plane = nn.Parameter(torch.randn(1, emb_size))
self.sagittal_plane = nn.Parameter(torch.randn(1, emb_size))
self.axial_plane = nn.Parameter(torch.randn(1, emb_size))
# Ppos ∈ R((3S+4)×d)
self.positions = nn.Parameter(torch.randn(total_tokens + 4, emb_size))
def forward(self, input_tensor):
b, _, _ = input_tensor.shape
cls_tokens = repeat(self.cls_token, '() n e -> b n e', b=b)
sep_token = repeat(self.sep_token, '() n e -> b n e', b=b)
x = torch.cat((cls_tokens, input_tensor[:, :128, :], sep_token, input_tensor[:, 128:256, :], sep_token, input_tensor[:, 256:, :], sep_token), dim=1)
x[:, :130] += self.coronal_plane
x[:, 130:259] += self.sagittal_plane
x[:, 259:] += self.axial_plane
x += self.positions
# the above represents Eq. (6)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, emb_size: int = 256, num_heads: int = 8, dropout: float = 0):
super().__init__()
self.emb_size = emb_size
self.num_heads = num_heads
# fuse the queries, keys and values in one matrix
self.qkv = nn.Linear(emb_size, emb_size * 3)
self.att_drop = nn.Dropout(dropout)
self.projection = nn.Linear(emb_size, emb_size)
def forward(self, x : Tensor, mask: Tensor = None) -> Tensor:
# split keys, queries and values in num_heads
qkv = rearrange(self.qkv(x), "b n (h d qkv) -> (qkv) b h n d", h=self.num_heads, qkv=3)
queries, keys, values = qkv[0], qkv[1], qkv[2]
# sum up over the last axis
energy = torch.einsum('bhqd, bhkd -> bhqk', queries, keys) # batch, num_heads, query_len, key_len
if mask is not None:
fill_value = torch.finfo(torch.float32).min
energy.mask_fill(~mask, fill_value)
scaling = self.emb_size ** (1/2)
att = F.softmax(energy, dim=-1) / scaling
att = self.att_drop(att)
# sum up over the third axis
out = torch.einsum('bhal, bhlv -> bhav ', att, values)
out = rearrange(out, "b h n d -> b n (h d)")
out = self.projection(out)
return out
class ResidualAdd(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
res = x
x = self.fn(x, **kwargs)
x += res