text stringlengths 1 93.6k |
|---|
seq_ftr_B_frames = seq_ftr_B
|
seq_ftr_B = tf.reduce_mean(seq_ftr_B, axis=1)
|
seq_ftr_B = tf.reshape(seq_ftr_B, [batch_size, -1])
|
with tf.name_scope("Hi_MPC"), tf.variable_scope("Hi_MPC", reuse=tf.AUTO_REUSE):
|
def Hi_MPC_hard(t, pseudo_lab, all_ftr, cluster_ftr, pseudo_lab_P, all_ftr_P, cluster_ftr_P, pseudo_lab_B,
|
all_ftr_B, cluster_ftr_B):
|
global imp_val, imp_val_P, imp_val_B
|
M = int(FLAGS.M)
|
concat_heads = tf.zeros_like(seq_ftr)
|
concat_heads_clu = tf.zeros_like(cluster_ftr)
|
W_head = lambda: tf.Variable(tf.random_normal([H, H]))
|
all_ftr_mean = tf.reduce_mean(all_ftr, axis=1)
|
all_ftr_P_mean = tf.reduce_mean(all_ftr_P, axis=1)
|
all_ftr_B_mean = tf.reduce_mean(all_ftr_B, axis=1)
|
for i in range(M):
|
W_q_head = W_k_head = tf.Variable(initial_value=W_head)
|
W_q_head_P = W_k_head_P = tf.Variable(initial_value=W_head)
|
W_q_head_B = W_k_head_B = tf.Variable(initial_value=W_head)
|
all_ftr_trans = tf.matmul(all_ftr, W_q_head)
|
all_ftr_trans_mean = tf.matmul(all_ftr_mean, W_q_head)
|
cluster_ftr_trans = tf.matmul(cluster_ftr, W_k_head)
|
all_ftr_trans_P = tf.matmul(all_ftr_P, W_q_head_P)
|
all_ftr_trans_P_mean = tf.matmul(all_ftr_P_mean, W_q_head_P)
|
cluster_ftr_trans_P = tf.matmul(cluster_ftr_P, W_k_head_P)
|
all_ftr_trans_B = tf.matmul(all_ftr_B, W_q_head_B)
|
all_ftr_trans_B_mean = tf.matmul(all_ftr_B_mean, W_q_head_B)
|
cluster_ftr_trans_B = tf.matmul(cluster_ftr_B, W_k_head_B)
|
pred_lbl = tf.argmax(tf.matmul(all_ftr_trans_mean, tf.transpose(cluster_ftr_trans)) / np.sqrt(H),
|
-1)
|
pred_lbl_P = tf.argmax(
|
tf.matmul(all_ftr_trans_P_mean, tf.transpose(cluster_ftr_trans_P)) / np.sqrt(H), -1)
|
pred_lbl_B = tf.argmax(
|
tf.matmul(all_ftr_trans_B_mean, tf.transpose(cluster_ftr_trans_B)) / np.sqrt(H), -1)
|
# importance inference
|
logits = tf.matmul(all_ftr_trans, tf.transpose(cluster_ftr_trans)) / np.sqrt(H)
|
logits_P = tf.matmul(all_ftr_trans_P, tf.transpose(cluster_ftr_trans_P)) / np.sqrt(H)
|
logits_B = tf.matmul(all_ftr_trans_B, tf.transpose(cluster_ftr_trans_B)) / np.sqrt(H)
|
pred_lbl_frames = tf.reshape(tf.tile(tf.reshape(pred_lbl, [-1, 1]), [1, time_step]), [-1])
|
pred_lbl_P_frames = tf.reshape(tf.tile(tf.reshape(pred_lbl_P, [-1, 1]), [1, time_step]), [-1])
|
pred_lbl_B_frames = tf.reshape(tf.tile(tf.reshape(pred_lbl_B, [-1, 1]), [1, time_step]), [-1])
|
pred_lbl_frames = tf.cast(pred_lbl_frames, tf.int32)
|
pred_lbl_P_frames = tf.cast(pred_lbl_P_frames, tf.int32)
|
pred_lbl_B_frames = tf.cast(pred_lbl_B_frames, tf.int32)
|
# [batch_size, time_step]
|
pseudo_lab_frames = tf.reshape(tf.tile(tf.reshape(pseudo_lab, [-1, 1]), [1, time_step]), [-1])
|
pseudo_lab_P_frames = tf.reshape(tf.tile(tf.reshape(pseudo_lab_P, [-1, 1]), [1, time_step]), [-1])
|
pseudo_lab_B_frames = tf.reshape(tf.tile(tf.reshape(pseudo_lab_B, [-1, 1]), [1, time_step]), [-1])
|
# [batch_size, time_step]
|
# If pred. is true, focus on less-score frames, otherwise focus on high-score (wrong label) frames
|
indices = tf.concat([tf.reshape(tf.range(0, batch_size * time_step), [-1, 1]),
|
tf.reshape(pred_lbl_frames, [-1, 1])], axis=-1)
|
indices_P = tf.concat([tf.reshape(tf.range(0, batch_size * time_step), [-1, 1]),
|
tf.reshape(pred_lbl_P_frames, [-1, 1])], axis=-1)
|
indices_B = tf.concat([tf.reshape(tf.range(0, batch_size * time_step), [-1, 1]),
|
tf.reshape(pred_lbl_B_frames, [-1, 1])], axis=-1)
|
imp_frames_unorm = tf.gather_nd(tf.reshape(logits, [batch_size * time_step, -1]), indices)
|
imp_P_frames_unorm = tf.gather_nd(tf.reshape(logits_P, [batch_size * time_step, -1]), indices_P)
|
imp_B_frames_unorm = tf.gather_nd(tf.reshape(logits_B, [batch_size * time_step, -1]), indices_B)
|
imp_frames_unorm = tf.reshape(imp_frames_unorm, [-1, time_step])
|
imp_P_frames_unorm = tf.reshape(imp_P_frames_unorm, [-1, time_step])
|
imp_B_frames_unorm = tf.reshape(imp_B_frames_unorm, [-1, time_step])
|
ones = tf.ones_like(pseudo_lab_frames, dtype=tf.float32)
|
if FLAGS.focus == '1':
|
imp_frames = tf.nn.softmax(tf.reshape(
|
tf.reshape(imp_frames_unorm, [-1]) * tf.where(tf.equal(pred_lbl_frames, pseudo_lab_frames),
|
-ones, ones), [batch_size, time_step]),
|
axis=-1)
|
imp_P_frames = tf.nn.softmax(tf.reshape(tf.reshape(imp_P_frames_unorm, [-1]) * tf.where(
|
tf.equal(pred_lbl_P_frames, pseudo_lab_P_frames),
|
-ones, ones), [batch_size, time_step]), axis=-1)
|
imp_B_frames = tf.nn.softmax(tf.reshape(tf.reshape(imp_B_frames_unorm, [-1]) * tf.where(
|
tf.equal(pred_lbl_B_frames, pseudo_lab_B_frames),
|
-ones, ones), [batch_size, time_step]), axis=-1)
|
elif FLAGS.focus == '-1':
|
imp_frames = tf.nn.softmax(tf.reshape(tf.reshape(imp_frames_unorm, [-1]) * tf.where(
|
tf.equal(pred_lbl_frames, pseudo_lab_frames),
|
ones, -ones), [batch_size, time_step]), axis=-1)
|
imp_P_frames = tf.nn.softmax(tf.reshape(tf.reshape(imp_P_frames_unorm, [-1]) * tf.where(
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.