text
stringlengths
1
93.6k
from sklearn.metrics import confusion_matrix
import geniter
import record
import torch_optimizer as optim2
import Utils
from torchsummary import summary
# # Setting Params
parser = argparse.ArgumentParser(description='Training for HSI')
parser.add_argument(
'-d', '--dataset', dest='dataset', default='IN', help="Name of dataset.")
parser.add_argument(
'-o',
'--optimizer',
dest='optimizer',
default='adam',
help="Name of optimizer.")
parser.add_argument(
'-e', '--epoch', type=int, dest='epoch', default=200, help="No of epoch")
parser.add_argument(
'-i', '--iter', type=int, dest='iter', default=3, help="No of iter")
parser.add_argument(
'-p', '--patch', type=int, dest='patch', default=4, help="Length of patch")
parser.add_argument(
'-vs',
'--valid_split',
type=float,
dest='valid_split',
default=0.9,
help="Percentage of validation split.")
args = parser.parse_args()
PARAM_DATASET = args.dataset # UP,IN,SV, KSC
PARAM_EPOCH = args.epoch
PARAM_ITER = args.iter
PATCH_SIZE = args.patch
PARAM_VAL = args.valid_split
PARAM_OPTIM = args.optimizer
# # Data Loading
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# for Monte Carlo runs
seeds = [1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341]
ensemble = 1
global Dataset # UP,IN,SV, KSC
dataset = PARAM_DATASET #input('Please input the name of Dataset(IN, UP, SV, KSC):')
Dataset = dataset.upper()
def load_dataset(Dataset, split=0.9):
data_path = '../dataset/'
if Dataset == 'IN':
mat_data = sio.loadmat(data_path + 'Indian_pines_corrected.mat')
mat_gt = sio.loadmat(data_path + 'Indian_pines_gt.mat')
data_hsi = mat_data['indian_pines_corrected']
gt_hsi = mat_gt['indian_pines_gt']
K = 30
TOTAL_SIZE = 10249
VALIDATION_SPLIT = split
TRAIN_SIZE = math.ceil(TOTAL_SIZE * VALIDATION_SPLIT)
if Dataset == 'UP':
uPavia = sio.loadmat(data_path + 'PaviaU.mat')
gt_uPavia = sio.loadmat(data_path + 'PaviaU_gt.mat')
data_hsi = uPavia['paviaU']
gt_hsi = gt_uPavia['paviaU_gt']
K = data_hsi.shape[2]
TOTAL_SIZE = 42776
VALIDATION_SPLIT = split
TRAIN_SIZE = math.ceil(TOTAL_SIZE * VALIDATION_SPLIT)
if Dataset == 'SV':
SV = sio.loadmat(data_path + 'Salinas_corrected.mat')
gt_SV = sio.loadmat(data_path + 'Salinas_gt.mat')
data_hsi = SV['salinas_corrected']
gt_hsi = gt_SV['salinas_gt']
K = data_hsi.shape[2]
TOTAL_SIZE = 54129
VALIDATION_SPLIT = split
TRAIN_SIZE = math.ceil(TOTAL_SIZE * VALIDATION_SPLIT)
if Dataset == 'KSC':
SV = sio.loadmat(data_path + 'KSC.mat')
gt_SV = sio.loadmat(data_path + 'KSC_gt.mat')
data_hsi = SV['KSC']
gt_hsi = gt_SV['KSC_gt']
K = data_hsi.shape[2]
TOTAL_SIZE = 5211
VALIDATION_SPLIT = split
TRAIN_SIZE = math.ceil(TOTAL_SIZE * VALIDATION_SPLIT)
shapeor = data_hsi.shape
data_hsi = data_hsi.reshape(-1, data_hsi.shape[-1])
data_hsi = PCA(n_components=K).fit_transform(data_hsi)
shapeor = np.array(shapeor)