text
stringlengths
1
93.6k
def __init__(self, channel):
super(MultiHeadSelfAttention, self).__init__()
self.query = MultiHeadDense(channel)
self.key = MultiHeadDense(channel)
self.value = MultiHeadDense(channel)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
b, c, h, w = x.size()
pe = self.positional_encoding_2d(c, h, w)
x = x + pe
x = x.reshape(b, c, h*w).permute(0, 2, 1) #[b, h*w, d]
Q = self.query(x)
K = self.key(x)
A = self.softmax(torch.bmm(Q, K.permute(0, 2, 1)) / math.sqrt(c))#[b, h*w, h*w]
V = self.value(x)
x = torch.bmm(A, V).permute(0, 2, 1).reshape(b, c, h, w)
return x
########################################
class HybridSN_network(nn.Module):
def __init__(self, band, classes):
super(HybridSN_network, self).__init__()
self.name = 'HybridSN'
self.conv1 = nn.Sequential(
nn.Conv3d(
in_channels=1,
out_channels=8,
kernel_size=(3, 3, 7)),
nn.ReLU(inplace=True))
self.conv2 = nn.Sequential(
nn.Conv3d(
in_channels=8,
out_channels=16,
kernel_size=(3, 3, 5)),
nn.ReLU(inplace=True))
self.conv3 = nn.Sequential(
nn.Conv3d(
in_channels=16,
out_channels=32,
kernel_size=(3, 3, 3)),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(
nn.Conv2d(
in_channels=576,
out_channels=64,
kernel_size=(3, 3)),
nn.ReLU(inplace=True))
self.MHSA = MultiHeadSelfAttention(64)
self.dense1 = nn.Sequential(
nn.Linear(18496,256),
nn.ReLU(inplace=True),
nn.Dropout(p=0.4))
self.dense2 = nn.Sequential(
nn.Linear(256,128),
nn.ReLU(inplace=True),
nn.Dropout(p=0.4))
self.dense3 = nn.Sequential(
nn.Linear(128,classes)
)
def forward(self, X):
x = self.conv1(X)
x = self.conv2(x)
x = self.conv3(x)
x = x.view(x.size(0),x.size(1)*x.size(4),x.size(2),x.size(3))
x = self.conv4(x)
# print(x.shape)
# x = self.MHSA(x)
# print(x.shape)
x = x.contiguous().view(x.size(0), -1)
x = self.dense1(x)
x = self.dense2(x)
out = self.dense3(x)
return out
model = HybridSN_network(BAND, CLASSES_NUM).cuda()
summary(model, (1, img_rows, img_cols, BAND))
# # Plotting
def train(net,
train_iter,
valida_iter,
loss,