text stringlengths 1 93.6k |
|---|
shapeor[-1] = K
|
data_hsi = data_hsi.reshape(shapeor)
|
return data_hsi, gt_hsi, TOTAL_SIZE, TRAIN_SIZE, VALIDATION_SPLIT
|
# # Pytorch Data Loader Creation
|
data_hsi, gt_hsi, TOTAL_SIZE, TRAIN_SIZE, VALIDATION_SPLIT = load_dataset(
|
Dataset, PARAM_VAL)
|
print(data_hsi.shape)
|
image_x, image_y, BAND = data_hsi.shape
|
data = data_hsi.reshape(
|
np.prod(data_hsi.shape[:2]), np.prod(data_hsi.shape[2:]))
|
gt = gt_hsi.reshape(np.prod(gt_hsi.shape[:2]), )
|
CLASSES_NUM = max(gt)
|
print('The class numbers of the HSI data is:', CLASSES_NUM)
|
print('-----Importing Setting Parameters-----')
|
ITER = PARAM_ITER
|
PATCH_LENGTH = PATCH_SIZE
|
lr, num_epochs, batch_size = 0.001, 200, 32
|
loss = torch.nn.CrossEntropyLoss()
|
img_rows = 2 * PATCH_LENGTH + 1
|
img_cols = 2 * PATCH_LENGTH + 1
|
img_channels = data_hsi.shape[2]
|
INPUT_DIMENSION = data_hsi.shape[2]
|
ALL_SIZE = data_hsi.shape[0] * data_hsi.shape[1]
|
VAL_SIZE = int(TRAIN_SIZE)
|
TEST_SIZE = TOTAL_SIZE - TRAIN_SIZE
|
KAPPA = []
|
OA = []
|
AA = []
|
TRAINING_TIME = []
|
TESTING_TIME = []
|
ELEMENT_ACC = np.zeros((ITER, CLASSES_NUM))
|
data = preprocessing.scale(data)
|
data_ = data.reshape(data_hsi.shape[0], data_hsi.shape[1], data_hsi.shape[2])
|
whole_data = data_
|
padded_data = np.lib.pad(
|
whole_data, ((PATCH_LENGTH, PATCH_LENGTH), (PATCH_LENGTH, PATCH_LENGTH),
|
(0, 0)),
|
'constant',
|
constant_values=0)
|
# # Model
|
############## Transformer #############
|
class MultiHeadDense(nn.Module):
|
def __init__(self, d):
|
super(MultiHeadDense, self).__init__()
|
self.weight = nn.Parameter(torch.Tensor(d, d))
|
def forward(self, x):
|
# x:[b, h*w, d]
|
# x = torch.bmm(x, self.weight)
|
x = F.linear(x, self.weight)
|
return x
|
class MultiHeadAttention(nn.Module):
|
def __init__(self):
|
super(MultiHeadAttention, self).__init__()
|
def positional_encoding_2d(self, d_model, height, width):
|
"""
|
reference: wzlxjtu/PositionalEncoding2D
|
:param d_model: dimension of the model
|
:param height: height of the positions
|
:param width: width of the positions
|
:return: d_model*height*width position matrix
|
"""
|
if d_model % 4 != 0:
|
raise ValueError("Cannot use sin/cos positional encoding with "
|
"odd dimension (got dim={:d})".format(d_model))
|
pe = torch.zeros(d_model, height, width)
|
try:
|
pe = pe.to(torch.device("cuda:0"))
|
except RuntimeError:
|
pass
|
# Each dimension use half of d_model
|
d_model = int(d_model / 2)
|
div_term = torch.exp(torch.arange(0., d_model, 2) *
|
-(math.log(10000.0) / d_model))
|
pos_w = torch.arange(0., width).unsqueeze(1)
|
pos_h = torch.arange(0., height).unsqueeze(1)
|
pe[0:d_model:2, :, :] = torch.sin(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1)
|
pe[1:d_model:2, :, :] = torch.cos(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1)
|
pe[d_model::2, :, :] = torch.sin(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width)
|
pe[d_model + 1::2, :, :] = torch.cos(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width)
|
return pe
|
def forward(self, x):
|
raise NotImplementedError()
|
class MultiHeadSelfAttention(MultiHeadAttention):
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.