text
stringlengths
1
93.6k
tf.equal(pred_lbl_P_frames, pseudo_lab_P_frames),
ones, -ones), [batch_size, time_step]), axis=-1)
imp_B_frames = tf.nn.softmax(tf.reshape(tf.reshape(imp_B_frames_unorm, [-1]) * tf.where(
tf.equal(pred_lbl_B_frames, pseudo_lab_B_frames),
ones, -ones), [batch_size, time_step]), axis=-1)
else:
imp_frames = tf.ones([batch_size, time_step])
imp_P_frames = tf.ones([batch_size, time_step])
imp_B_frames = tf.ones([batch_size, time_step])
W_att_int = all_ftr_trans_mean
W_att_int_P = all_ftr_trans_P_mean
W_att_int_B = all_ftr_trans_B_mean
pseudo_lab_frames = tf.reshape(pseudo_lab_frames, [batch_size, time_step])
pseudo_lab_P_frames = tf.reshape(pseudo_lab_P_frames, [batch_size, time_step])
pseudo_lab_B_frames = tf.reshape(pseudo_lab_B_frames, [batch_size, time_step])
if i == 0:
# weighted loss for all frames in each sequece, and then average all sequences
loss = tf.reduce_mean(tf.reduce_sum(
imp_frames * tf.nn.sparse_softmax_cross_entropy_with_logits(labels=pseudo_lab_frames,
logits=logits), axis=-1))
loss_P = tf.reduce_mean(tf.reduce_sum(
imp_P_frames * tf.nn.sparse_softmax_cross_entropy_with_logits(labels=pseudo_lab_P_frames,
logits=logits_P), axis=-1))
loss_B = tf.reduce_mean(tf.reduce_sum(
imp_B_frames * tf.nn.sparse_softmax_cross_entropy_with_logits(labels=pseudo_lab_B_frames,
logits=logits_B), axis=-1))
W_att_int_ave = W_att_int
W_att_int_P_ave = W_att_int_P
W_att_int_B_ave = W_att_int_B
# imp for evaluation
imp_val = tf.nn.softmax(logits, axis=-1)
imp_val_P = tf.nn.softmax(logits_P, axis=-1)
imp_val_B = tf.nn.softmax(logits_B, axis=-1)
else:
loss = loss + tf.reduce_mean(tf.reduce_sum(
imp_frames * tf.nn.sparse_softmax_cross_entropy_with_logits(labels=pseudo_lab_frames,
logits=logits), axis=-1))
loss_P = loss_P + tf.reduce_mean(tf.reduce_sum(
imp_P_frames * tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=pseudo_lab_P_frames, logits=logits_P), axis=-1))
loss_B = loss_B + tf.reduce_mean(tf.reduce_sum(
imp_B_frames * tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=pseudo_lab_B_frames, logits=logits_B), axis=-1))
W_att_int_ave = W_att_int_ave + W_att_int
W_att_int_P_ave = W_att_int_P_ave + W_att_int_P
W_att_int_B_ave = W_att_int_B_ave + W_att_int_B
# imp for evaluation
imp_val = imp_val + tf.nn.softmax(logits, axis=-1)
imp_val_P = imp_val_P + tf.nn.softmax(logits_P, axis=-1)
imp_val_B = imp_val_B + tf.nn.softmax(logits_B, axis=-1)
# imp for evaluation
imp_val = imp_val / M
imp_val_P = imp_val_P / M
imp_val_B = imp_val_B / M
loss_P = loss_P / M
loss_B = loss_B / M
W_att_int_ave = W_att_int_ave / M
W_att_int_P_ave = W_att_int_P_ave / M
W_att_int_B_ave = W_att_int_B_ave / M
return loss, loss_P, loss_B, W_att_int_ave, W_att_int_P_ave, W_att_int_B_ave
loss_J, loss_P, loss_B, W_att_int_ave, W_att_int_P_ave, W_att_int_B_ave = Hi_MPC_hard(
np.sqrt(H), pseudo_lab_J, seq_ftr_frames, seq_cluster_ftr_J, pseudo_lab_P, seq_ftr_P_frames,
seq_cluster_ftr_P, pseudo_lab_B, seq_ftr_B_frames, seq_cluster_ftr_B)
Hi_MPC_loss = (loss_J + loss_P + loss_B) / 3
seq_ftr_int = tf.concat([W_att_int_ave, W_att_int_P_ave, W_att_int_B_ave], axis=-1)
optimizer = tf.train.AdamOptimizer(learning_rate=float(FLAGS.lr))
optimizer = tf.train.AdamOptimizer(learning_rate=float(FLAGS.lr))
train_op = optimizer.minimize(Hi_MPC_loss)
saver = tf.train.Saver()
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
with tf.Session(config=config) as sess:
sess.run(init_op)
if FLAGS.model_size == '1':
# compute model size (M) and computational complexity (GFLOPs)
def stats_graph(graph):
flops = tf.profiler.profile(graph, options=tf.profiler.ProfileOptionBuilder.float_operation())
params = tf.profiler.profile(graph,
options=tf.profiler.ProfileOptionBuilder.trainable_variables_parameter())
print('FLOPs: {} GFLOPS; Trainable params: {} M'.format(flops.total_float_ops / 1e9,