text stringlengths 1 93.6k |
|---|
def select(groundTruth): #divide dataset into train and test datasets
|
labels_loc = {}
|
train = {}
|
test = {}
|
m = max(groundTruth)
|
#amount = [3, 41, 29, 7, 14, 20, 2, 15, 3, 36, 64, 22, 4, 28, 10, 2]
|
#amount = [43, 1387, 801, 230, 469, 710, 26, 463, 17, 936, 2391, 571, 201, 1237, 376, 91]
|
if Dataset == 'IN':
|
amount = [
|
35, 1011, 581, 167, 344, 515, 19, 327, 12, 683, 1700, 418, 138,
|
876, 274, 69
|
] #IP 20%
|
#amount = [6, 144, 84, 24, 50, 75, 3, 49, 2, 97, 247, 62, 22, 130, 38, 10] #IP 20%
|
if Dataset == 'UP':
|
amount = [5297, 14974, 1648, 2424, 1076, 4026, 1046, 2950, 755] #UP
|
if Dataset == 'KSC':
|
amount = [
|
530, 165, 176, 170, 110, 161, 80, 299, 377, 283, 296, 341, 654
|
] #KSC
|
for i in range(m):
|
indices = [
|
j for j, x in enumerate(groundTruth.ravel().tolist()) if x == i + 1
|
]
|
np.random.shuffle(indices)
|
labels_loc[i] = indices
|
nb_val = int(amount[i])
|
train[i] = indices[:-nb_val]
|
test[i] = indices[-nb_val:]
|
# whole_indices = []
|
train_indices = []
|
test_indices = []
|
for i in range(m):
|
# whole_indices += labels_loc[i]
|
train_indices += train[i]
|
test_indices += test[i]
|
np.random.shuffle(train_indices)
|
np.random.shuffle(test_indices)
|
return train_indices, test_indices
|
# # Training
|
for index_iter in range(ITER):
|
print('iter:', index_iter)
|
# define the model
|
net = HybridSN_network(BAND, CLASSES_NUM)
|
if PARAM_OPTIM == 'diffgrad':
|
optimizer = optim2.DiffGrad(
|
net.parameters(),
|
lr=lr,
|
betas=(0.9, 0.999),
|
eps=1e-8,
|
weight_decay=0) # weight_decay=0.0001)
|
if PARAM_OPTIM == 'adam':
|
optimizer = optim.Adam(
|
net.parameters(),
|
lr=1e-3,
|
betas=(0.9, 0.999),
|
eps=1e-8,
|
weight_decay=0)
|
time_1 = int(time.time())
|
np.random.seed(seeds[index_iter])
|
# train_indices, test_indices = select(gt)
|
train_indices, test_indices = sampling(VALIDATION_SPLIT, gt)
|
_, total_indices = sampling(1, gt)
|
TRAIN_SIZE = len(train_indices)
|
print('Train size: ', TRAIN_SIZE)
|
TEST_SIZE = TOTAL_SIZE - TRAIN_SIZE
|
print('Test size: ', TEST_SIZE)
|
VAL_SIZE = int(TRAIN_SIZE)
|
print('Validation size: ', VAL_SIZE)
|
print('-----Selecting Small Pieces from the Original Cube Data-----')
|
train_iter, valida_iter, test_iter, all_iter = geniter.generate_iter(
|
TRAIN_SIZE, train_indices, TEST_SIZE, test_indices, TOTAL_SIZE,
|
total_indices, VAL_SIZE, whole_data, PATCH_LENGTH, padded_data,
|
INPUT_DIMENSION, 16, gt) #batchsize in 1
|
tic1 = time.time()
|
train(
|
net,
|
train_iter,
|
valida_iter,
|
loss,
|
optimizer,
|
device,
|
epochs=PARAM_EPOCH)
|
toc1 = time.time()
|
pred_test = []
|
tic2 = time.time()
|
with torch.no_grad():
|
for X, y in test_iter:
|
X = X.to(device)
|
net.eval()
|
y_hat = net(X)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.