text stringlengths 1 93.6k |
|---|
params.total_parameters / 1e6))
|
stats_graph(loaded_graph)
|
exit()
|
# only initialization
|
cluster_features = np.random.random([batch_size, H])
|
cluster_features_P = np.random.random([batch_size, H])
|
cluster_features_B = np.random.random([batch_size, H])
|
def train_loader(X_train_J, X_train_P, X_train_B, y_train):
|
tr_step = 0
|
tr_size = X_train_J.shape[0]
|
train_labels_all = []
|
train_features_all_int = []
|
train_features_all = []
|
train_features_all_P = []
|
train_features_all_B = []
|
while tr_step * batch_size < tr_size:
|
if (tr_step + 1) * batch_size > tr_size:
|
break
|
X_input_J = X_train_J[tr_step * batch_size:(tr_step + 1) * batch_size]
|
X_input_P = X_train_P[tr_step * batch_size:(tr_step + 1) * batch_size]
|
X_input_B = X_train_B[tr_step * batch_size:(tr_step + 1) * batch_size]
|
X_input_J = X_input_J.reshape([-1, joint_num, 3])
|
X_input_P = X_input_P.reshape([-1, 10, 3])
|
X_input_B = X_input_B.reshape([-1, 5, 3])
|
labels = y_train[tr_step * batch_size:(tr_step + 1) * batch_size]
|
[all_features_int, all_features, all_features_P, all_features_B] = sess.run(
|
[seq_ftr_int, seq_ftr, seq_ftr_P, seq_ftr_B, ],
|
feed_dict={
|
J_in: X_input_J,
|
P_in: X_input_P,
|
B_in: X_input_B,
|
})
|
train_features_all_P.extend(all_features_P.tolist())
|
train_features_all_B.extend(all_features_B.tolist())
|
train_features_all_int.extend(all_features_int.tolist())
|
train_features_all.extend(all_features.tolist())
|
train_labels_all.extend(labels.tolist())
|
tr_step += 1
|
train_features_all_int = np.array(train_features_all_int).astype(np.float32)
|
train_features_all_int = torch.from_numpy(train_features_all_int)
|
train_features_all = np.array(train_features_all).astype(np.float32)
|
train_features_all = torch.from_numpy(train_features_all)
|
train_features_all_P = np.array(train_features_all_P).astype(np.float32)
|
train_features_all_P = torch.from_numpy(train_features_all_P)
|
train_features_all_B = np.array(train_features_all_B).astype(np.float32)
|
train_features_all_B = torch.from_numpy(train_features_all_B)
|
return train_features_all_int, train_features_all, train_features_all_P, train_features_all_B, train_labels_all
|
def gal_loader(X_train_J, X_train_P, X_train_B, y_train):
|
tr_step = 0
|
tr_size = X_train_J.shape[0]
|
gal_logits_all = []
|
gal_labels_all = []
|
gal_features_all_int = []
|
gal_features_all = []
|
gal_features_all_P = []
|
gal_features_all_B = []
|
while tr_step * batch_size < tr_size:
|
if (tr_step + 1) * batch_size > tr_size:
|
break
|
X_input_J = X_train_J[tr_step * batch_size:(tr_step + 1) * batch_size]
|
X_input_P = X_train_P[tr_step * batch_size:(tr_step + 1) * batch_size]
|
X_input_B = X_train_B[tr_step * batch_size:(tr_step + 1) * batch_size]
|
X_input_J = X_input_J.reshape([-1, joint_num, 3])
|
X_input_P = X_input_P.reshape([-1, 10, 3])
|
X_input_B = X_input_B.reshape([-1, 5, 3])
|
labels = y_train[tr_step * batch_size:(tr_step + 1) * batch_size]
|
[Seq_features_int, Seq_features, Seq_features_P, Seq_features_B] = sess.run(
|
[seq_ftr_int, seq_ftr, seq_ftr_P, seq_ftr_B],
|
feed_dict={
|
J_in: X_input_J,
|
P_in: X_input_P,
|
B_in: X_input_B,
|
})
|
gal_features_all_int.extend(Seq_features_int.tolist())
|
gal_features_all.extend(Seq_features.tolist())
|
gal_features_all_P.extend(Seq_features_P.tolist())
|
gal_features_all_B.extend(Seq_features_B.tolist())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.