text
stringlengths
1
93.6k
pseudo_labels_B = pseudo_labels_B_new
num_cluster = len(set(pseudo_labels)) - (1 if -1 in pseudo_labels else 0)
num_cluster_P = len(set(pseudo_labels_P)) - (1 if -1 in pseudo_labels_P else 0)
num_cluster_B = len(set(pseudo_labels_B)) - (1 if -1 in pseudo_labels_B else 0)
tr_step = 0
tr_size = X_train_J_new.shape[0]
batch_Hi_MPC_loss = []
while tr_step * batch_size < tr_size:
if (tr_step + 1) * batch_size > tr_size:
break
X_input_J = X_train_J_new[tr_step * batch_size:(tr_step + 1) * batch_size]
X_input_P = X_train_P_new[tr_step * batch_size:(tr_step + 1) * batch_size]
X_input_B = X_train_B_new[tr_step * batch_size:(tr_step + 1) * batch_size]
X_input_J = X_input_J.reshape([-1, joint_num, 3])
X_input_P = X_input_P.reshape([-1, 10, 3])
X_input_B = X_input_B.reshape([-1, 5, 3])
labels = pseudo_labels[tr_step * batch_size:(tr_step + 1) * batch_size]
labels_P = pseudo_labels_P[tr_step * batch_size:(tr_step + 1) * batch_size]
labels_B = pseudo_labels_B[tr_step * batch_size:(tr_step + 1) * batch_size]
_, loss, loss_J_, loss_P_, loss_B_, Seq_features = sess.run(
[train_op, Hi_MPC_loss, loss_J, loss_P, loss_B, seq_ftr],
feed_dict={
J_in: X_input_J,
P_in: X_input_P,
B_in: X_input_B,
pseudo_lab_J: labels,
pseudo_lab_P: labels_P,
pseudo_lab_B: labels_B,
seq_cluster_ftr_J: cluster_features,
seq_cluster_ftr_P: cluster_features_P,
seq_cluster_ftr_B: cluster_features_B})
Seq_features = torch.from_numpy(Seq_features)
batch_Hi_MPC_loss.append(loss)
if tr_step % display == 0:
print(
'[%s] Batch num: %d | Loss: %.3f | J/C/L Cluser num: %d, %d, %d | J/C/L Loss: %.3f, %.3f, %.3f ' %
(str(epoch), tr_step, loss, num_cluster, num_cluster_P, num_cluster_B, loss_J_, loss_P_, loss_B_))
tr_step += 1
sess.close()
elif FLAGS.mode == 'Eval':
checkpt_file = pre_dir + FLAGS.dataset + '/' + FLAGS.probe + change + '/best.ckpt'
with tf.Session(graph=loaded_graph, config=config) as sess:
loader = tf.train.import_meta_graph(checkpt_file + '.meta')
J_in = loaded_graph.get_tensor_by_name("Input/Placeholder:0")
P_in = loaded_graph.get_tensor_by_name("Input/Placeholder_1:0")
B_in = loaded_graph.get_tensor_by_name("Input/Placeholder_2:0")
pseudo_lab_J = loaded_graph.get_tensor_by_name("Input/Placeholder_3:0")
seq_cluster_ftr_J = loaded_graph.get_tensor_by_name("Input/Placeholder_4:0")
pseudo_lab_P = loaded_graph.get_tensor_by_name("Input/Placeholder_5:0")
seq_cluster_ftr_P = loaded_graph.get_tensor_by_name("Input/Placeholder_6:0")
pseudo_lab_B = loaded_graph.get_tensor_by_name("Input/Placeholder_7:0")
seq_cluster_ftr_B = loaded_graph.get_tensor_by_name("Input/Placeholder_8:0")
lbl_s = loaded_graph.get_tensor_by_name("Input/Placeholder_9:0")
seq_ftr_int, seq_ftr, seq_ftr_P, seq_ftr_B = loaded_graph.get_tensor_by_name("Hi_MPC/Hi_MPC/concat_24:0"), \
loaded_graph.get_tensor_by_name("Encoder/Reshape_6:0"), \
loaded_graph.get_tensor_by_name("Encoder/Reshape_7:0"), \
loaded_graph.get_tensor_by_name("Encoder/Reshape_8:0")
init_op = tf.global_variables_initializer()
sess.run(init_op)
loader.restore(sess, checkpt_file)
saver = tf.train.Saver()
def gal_loader(X_train_J, X_train_P, X_train_B, y_train):
tr_step = 0
tr_size = X_train_J.shape[0]
gal_logits_all = []
gal_labels_all = []
gal_features_all_int = []
gal_features_all = []
gal_features_all_P = []
gal_features_all_B = []
while tr_step * batch_size < tr_size:
if (tr_step + 1) * batch_size > tr_size:
break
X_input_J = X_train_J[tr_step * batch_size:(tr_step + 1) * batch_size]
X_input_P = X_train_P[tr_step * batch_size:(tr_step + 1) * batch_size]
X_input_B = X_train_B[tr_step * batch_size:(tr_step + 1) * batch_size]
X_input_J = X_input_J.reshape([-1, joint_num, 3])
X_input_P = X_input_P.reshape([-1, 10, 3])
X_input_B = X_input_B.reshape([-1, 5, 3])
labels = y_train[tr_step * batch_size:(tr_step + 1) * batch_size]
[Seq_features_int, Seq_features, Seq_features_P, Seq_features_B] = sess.run(
[seq_ftr_int, seq_ftr, seq_ftr_P, seq_ftr_B],