text
stringlengths
1
93.6k
if probe == 'Walking':
X_train_J, X_train_P, X_train_B, _, _, y_train, X_gal_J, X_gal_P, X_gal_B, _, _, y_gal, \
adj_J, biases_J, _, _, _, _, _, _, _, _, nb_classes, X_train_J_D, X_gal_J_D = \
process.gen_train_data(dataset=dataset, split='Still', time_step=time_step,
nb_nodes=nb_nodes, nhood=nhood, global_att=global_att,
batch_size=batch_size, norm=norm
)
else:
X_train_J, X_train_P, X_train_B, _, _, y_train, X_gal_J, X_gal_P, X_gal_B, _, _, y_gal, \
adj_J, biases_J, _, _, _, _, _, _, _, _, nb_classes, X_train_J_D, X_gal_J_D = \
process.gen_train_data(dataset=dataset, split='Walking', time_step=time_step,
nb_nodes=nb_nodes, nhood=nhood, global_att=global_att,
batch_size=batch_size, norm=norm
)
elif dataset == 'IAS':
if probe == 'A':
X_train_J, X_train_P, X_train_B, _, _, y_train, X_gal_J, X_gal_P, X_gal_B, _, _, y_gal, \
adj_J, biases_J, _, _, _, _, _, _, _, _, nb_classes, X_train_J_D, X_gal_J_D = \
process.gen_train_data(dataset=dataset, split='B', time_step=time_step,
nb_nodes=nb_nodes, nhood=nhood, global_att=global_att,
batch_size=batch_size, norm=norm
)
else:
X_train_J, X_train_P, X_train_B, _, _, y_train, X_gal_J, X_gal_P, X_gal_B, _, _, y_gal, \
adj_J, biases_J, _, _, _, _, _, _, _, _, nb_classes, X_train_J_D, X_gal_J_D = \
process.gen_train_data(dataset=dataset, split='A', time_step=time_step,
nb_nodes=nb_nodes, nhood=nhood, global_att=global_att,
batch_size=batch_size, norm=norm
)
elif dataset == 'CASIA_B':
X_train_J, X_train_P, X_train_B, _, _, y_train, X_gal_J, X_gal_P, X_gal_B, _, _, y_gal, \
adj_J, biases_J, _, _, _, _, _, _, _, _, nb_classes = \
process.gen_train_data(dataset=dataset, split=probe, time_step=time_step,
nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size,
PG_type=FLAGS.probe_type.split('.')[1])
del _
gc.collect()
gal_features_all_int, gal_features_all, gal_features_all_P, gal_features_all_B, gal_labels_all = gal_loader(
X_gal_J, X_gal_P, X_gal_B, y_gal)
mAP_int, top_1_int, top_5_int, top_10_int, mAP, top_1, top_5, top_10, mAP_P, top_1_P, top_5_P, top_10_P, \
mAP_B, top_1_B, top_5_B, top_10_B, = evaluation()
# print(
# '[Evaluation - J-level] %s - %s | Top-1: %.4f | Top-5: %.4f | Top-10: %.4f | mAP: %.4f ' % (
# FLAGS.dataset, FLAGS.probe,
# top_1, top_5, top_10, mAP))
# print(
# '[Evaluation - C-level] %s - %s | Top-1: %.4f | Top-5: %.4f | Top-10: %.4f | mAP: %.4f ' % (
# FLAGS.dataset, FLAGS.probe,
# top_1_P, top_5_P, top_10_P, mAP_P,))
# print(
# '[Evaluation - L-level] %s - %s | Top-1: %.4f | Top-5: %.4f | Top-10: %.4f | mAP: %.4f ' % (
# FLAGS.dataset, FLAGS.probe,
# top_1_B, top_5_B, top_10_B, mAP_B,))
print(
'[Evaluation - MSMR] %s - %s | Top-1: %.4f | Top-5: %.4f | Top-10: %.4f | mAP: %.4f ' % (
FLAGS.dataset, FLAGS.probe,
top_1_int, top_5_int, top_10_int, mAP_int))
sess.close()
exit()
print('End')
print('----- Model hyperparams -----')
print('batch_size: ' + str(batch_size))
print('M: ' + FLAGS.M)
print('H: ' + FLAGS.H)
print('eps: ' + FLAGS.eps)
print('min_samples: ' + FLAGS.min_samples)
print('seqence_length: ' + str(time_step))
print('patience: ' + FLAGS.patience)
print('Mode: ' + FLAGS.mode)
if FLAGS.mode == 'Train':
print('----- Dataset Information -----')
print('Dataset: ' + dataset)
print('Probe: ' + FLAGS.probe)
# <FILESEP>
#!/usr/bin/env python
# coding: utf-8
# # Imports
import argparse
import collections
import math
import time
import numpy as np
import scipy.io as sio
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn import metrics, preprocessing
from sklearn.decomposition import PCA