text
stringlengths
1
93.6k
if FLAGS.probe_type == '':
if FLAGS.dataset == 'KS20':
nb_nodes = 25
X_train_J, X_train_P, X_train_B, _, _, y_train, X_test_J, X_test_P, X_test_B, _, _, y_test, \
adj_J, biases_J, _, _, _, _, _, _, _, _, nb_classes, X_train_J_D, X_test_J_D = \
process.gen_train_data(dataset=dataset, split=probe, time_step=time_step,
nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size, norm=norm)
del _
gc.collect()
else:
from utils import process_cme_L3 as process
X_train_J, X_train_P, X_train_B, _, _, y_train, X_test_J, X_test_P, X_test_B, _, _, y_test, \
adj_J, biases_J, _, _, _, _, _, _, _, _, nb_classes = \
process.gen_train_data(dataset=dataset, split=probe, time_step=time_step,
nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size,
PG_type=FLAGS.probe_type.split('.')[0])
print('## [Probe].[Gallery]', FLAGS.probe_type)
del _
gc.collect()
all_ftr_size = int(FLAGS.H)
loaded_graph = tf.Graph()
joint_num = X_train_J.shape[2]
train_epochs = 15000
display = 80
imp_val, imp_val_P, imp_val_B = None, None, None
if FLAGS.mode == 'Train':
loaded_graph = tf.Graph()
with loaded_graph.as_default():
with tf.name_scope('Input'):
H = int(FLAGS.H)
J_in = tf.placeholder(dtype=tf.float32, shape=(batch_size * time_step, joint_num, ft_size))
P_in = tf.placeholder(dtype=tf.float32, shape=(batch_size * time_step, 10, ft_size))
B_in = tf.placeholder(dtype=tf.float32, shape=(batch_size * time_step, 5, ft_size))
pseudo_lab_J = tf.placeholder(dtype=tf.int32, shape=(batch_size,))
seq_cluster_ftr_J = tf.placeholder(dtype=tf.float32, shape=(None, all_ftr_size))
pseudo_lab_P = tf.placeholder(dtype=tf.int32, shape=(batch_size,))
seq_cluster_ftr_P = tf.placeholder(dtype=tf.float32, shape=(None, all_ftr_size))
pseudo_lab_B = tf.placeholder(dtype=tf.int32, shape=(batch_size,))
seq_cluster_ftr_B = tf.placeholder(dtype=tf.float32, shape=(None, all_ftr_size))
lbl_s = tf.placeholder(dtype=tf.int32, shape=(batch_size, nb_classes))
with tf.name_scope("Encoder"), tf.variable_scope("", reuse=tf.AUTO_REUSE):
H = int(FLAGS.H)
W_1 = tf.get_variable('W_1', shape=[joint_num * ft_size, H],
initializer=tf.glorot_uniform_initializer())
W_1_P = tf.get_variable('W_1_P', shape=[10 * ft_size, H],
initializer=tf.glorot_uniform_initializer())
W_1_B = tf.get_variable('W_1_B', shape=[5 * ft_size, H],
initializer=tf.glorot_uniform_initializer())
b_1 = tf.Variable(tf.zeros(shape=[H, ]))
b_1_P = tf.Variable(tf.zeros(shape=[H, ]))
b_1_B = tf.Variable(tf.zeros(shape=[H, ]))
W_2 = tf.get_variable('W_2', shape=[H, H], initializer=tf.glorot_uniform_initializer())
W_2_P = tf.get_variable('W_2_P', shape=[H, H], initializer=tf.glorot_uniform_initializer())
W_2_B = tf.get_variable('W_2_B', shape=[H, H], initializer=tf.glorot_uniform_initializer())
b_2 = tf.Variable(tf.zeros(shape=[H, ]))
b_2_P = tf.Variable(tf.zeros(shape=[H, ]))
b_2_B = tf.Variable(tf.zeros(shape=[H, ]))
inputs = tf.reshape(J_in, [time_step * batch_size, -1])
s_rep = tf.matmul(tf.nn.relu(tf.matmul(inputs, W_1) + b_1), W_2) + b_2
inputs_P = tf.reshape(P_in, [time_step * batch_size, -1])
inputs_B = tf.reshape(B_in, [time_step * batch_size, -1])
s_rep_P = tf.matmul(tf.nn.relu(tf.matmul(inputs_P, W_1_P) + b_1_P), W_2_P) + b_2_P
s_rep_B = tf.matmul(tf.nn.relu(tf.matmul(inputs_B, W_1_B) + b_1_B), W_2_B) + b_2_B
seq_ftr = tf.reshape(s_rep, [batch_size, time_step, -1])
seq_ftr_P = tf.reshape(s_rep_P, [batch_size, time_step, -1])
seq_ftr_B = tf.reshape(s_rep_B, [batch_size, time_step, -1])
seq_ftr_frames = seq_ftr
seq_ftr = tf.reduce_mean(seq_ftr, axis=1)
seq_ftr = tf.reshape(seq_ftr, [batch_size, -1])
seq_ftr_P_frames = seq_ftr_P
seq_ftr_P = tf.reduce_mean(seq_ftr_P, axis=1)
seq_ftr_P = tf.reshape(seq_ftr_P, [batch_size, -1])