text
stringlengths
1
93.6k
# x = self.fc2(self.act(self.fc1(x)))
# x = x.permute(0, 3, 1, 2)
# x = x + shortcut
# 1x1卷积
return x, features
class LayerNorm2d(nn.LayerNorm):
""" LayerNorm for channels of '2D' spatial NCHW tensors """
def __init__(self, num_channels, eps=1e-6, affine=True):
super().__init__(num_channels, eps=eps, elementwise_affine=affine)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x.permute(0, 2, 3, 1)
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
x = x.permute(0, 3, 1, 2)
return x
class ScoreNetwork(nn.Module):
def __init__(self):
super(ScoreNetwork, self).__init__()
self.conv1 = nn.Conv2d(9, 192, kernel_size=7, stride=2, padding=3)
self.invert = nn.Sequential(LayerNorm2d(192),
nn.Conv2d(192, 192, kernel_size=3, stride=1, padding=1),
nn.Conv2d(192, 768, kernel_size=1),
nn.Conv2d(768, 192, kernel_size=1),
nn.GELU())
self.conv2 = nn.Conv2d(192, 8, kernel_size=7, stride=2, padding=3)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = self.conv1(x)
short_cut = x
x = self.invert(x)
x = short_cut + x
x = self.conv2(x)
x = x.float()
x = self.softmax(x)
return x
@MODELS.register_module()
class Mesorch_P(nn.Module):
def __init__(self, seg_pretrain_path, conv_pretrain=False, image_size=512):
super(Mesorch_P, self).__init__()
self.convnext = ConvNeXt(conv_pretrain)
self.segformer = MixVisionTransformer(seg_pretrain_path)
self.upsample = UpsampleConcatConv()
self.low_dct = LowDctFrequencyExtractor()
self.high_dct = HighDctFrequencyExtractor()
self.inverse = nn.ModuleList([nn.Conv2d(96, 1, 1) for _ in range(2)]+[nn.Conv2d(64, 1, 1) for _ in range(3)])
self.resize = nn.Upsample(size=(image_size, image_size), mode='bilinear', align_corners=True)
self.loss_fn = nn.BCEWithLogitsLoss()
def forward(self, image, mask, *args, **kwargs):
high_freq = self.high_dct(image)
low_freq = self.low_dct(image)
input_high = torch.concat([image,high_freq],dim=1)
input_low = torch.concat([image,low_freq],dim=1)
# input_all = torch.concat([image,high_freq,low_freq],dim=1)
_,outs1 = self.convnext(input_high)
_,outs2 = self.segformer(input_low)
inputs = outs1 + outs2
# inputs = [torch.concat([outs1[i],outs2[i]],dim=1) for i in range(len(outs1))]
x, features = self.upsample(inputs)
# reduced = torch.sum([self.inverse[i](features[i]) for i in range(len(features))],dim=1)
pred_mask = torch.sum(torch.concat([self.inverse[i](features[i]) for i in range(len(features))], dim=1), dim=1, keepdim=True)
# 调整大小到512x512
pred_mask = self.resize(pred_mask)
loss = self.loss_fn(pred_mask,mask)
pred_mask = pred_mask.float()
mask_pred = torch.sigmoid(pred_mask)
output_dict = {
# loss for backward
"backward_loss": loss,
# predicted mask, will calculate for metrics automatically
"pred_mask": mask_pred,
# predicted binaray label, will calculate for metrics automatically
"pred_label": None,
# ----values below is for visualization----
# automatically visualize with the key-value pairs
"visual_loss": {
"predict_loss": loss,
},
"visual_image": {
"pred_mask": mask_pred,
}
# -----------------------------------------
}
return output_dict
# <FILESEP>
#-------------------------------------------------Libraries
import os
from datetime import datetime as dt
from Functions.load_spec import load_spec
from Functions.load_data import load_data
from Functions.dfm import dfm
import pickle