text stringlengths 1 93.6k |
|---|
img_trg, example_number)
|
# store image identifiers & caption
|
formated_caption, ref_identifier, trg_identifier = data_loader.dataset.get_triplet_info(example_number)
|
directory = os.path.join(args.heatmap_dir, args.exp_name, str(example_number))
|
with open(os.path.join(directory, "metadata.txt"), "a") as f:
|
f.write(f"{example_number}*{formated_caption}*{ref_identifier}*{trg_identifier}\n")
|
# Clean
|
del img_src, txt, txt_len, img_trg, ret_caps, data_id
|
# Iterate
|
itr += 1
|
def generate_heatmap_from_single_data(args, model, img_src, txt, txt_len,
|
img_trg, example_number):
|
"""
|
Generate and save heatmaps for a given data example.
|
Input:
|
example_number: index of the current data example in a dataloader.
|
"""
|
if torch.cuda.is_available():
|
img_src, img_trg, txt, txt_len = img_src.cuda(), img_trg.cuda(), txt.cuda(), txt_len.cuda()
|
img_src = img_src.requires_grad_(True)
|
img_trg = img_trg.requires_grad_(True)
|
# Forward pass, during which intermediate results are stored in model.hold_results
|
_ = model.forward_save_intermediary(img_src, img_trg, txt, txt_len) # output scores
|
# Generate heatmaps for each score, EM & IS
|
heatmap_from_score(args, model, example_number,
|
"IS", "A_IS_r", "A_IS_t", img_trg,
|
img_src=img_src, r_is_involved=True)
|
heatmap_from_score(args, model, example_number,
|
"EM", "Tr_m", "A_EM_t", img_trg)
|
def heatmap_from_score(args, model, example_number, s_name,
|
query_contrib_name, t_contrib_name,
|
img_trg, img_src=None, r_is_involved=False):
|
"""
|
Generate and save heatmaps for a given data example and score.
|
The studied score (indicated by s_name) results from the dot product of two
|
known subresults, one that is query-related (query_contrib_name), and the
|
other that is target-related (t_contrib_name).
|
Input:
|
example_number: index of the current data example in a dataloader.
|
s_name: score name (EM|IS)
|
query_contrib_name: name of the query-related subresult that contributes
|
to the score to study (Tr_m|A_IS_r)
|
t_contrib_name: name of the target-related subresult that contributes to
|
the score to study (A_EM_t|A_IS_t)
|
r_is_involved: whether the reference image is involved in the score.
|
"""
|
r_heatmap_tmp = None, None
|
t_heatmap_tmp = None, None
|
# get images activations
|
if r_is_involved:
|
r_activation = model.hold_results["r_activation"] # size (batch_size, channels, 7, 7)
|
t_activation = model.hold_results["t_activation"] # size (batch_size, channels, 7, 7)
|
# find the coeffs that contribute the most to the score
|
query_contrib = model.hold_results[query_contrib_name]
|
t_contrib = model.hold_results[t_contrib_name]
|
main_coeffs = get_main_coeffs(query_contrib, t_contrib)
|
# produce one heatmap per main coeff for each score
|
for main_coeff in main_coeffs:
|
# extract the contribution of the selected output coeff
|
score_contrib = (query_contrib*t_contrib)[:,main_coeff]
|
# get pooled gradients across the channels for the given output coeff
|
if r_is_involved:
|
r_weights = get_weights(model, score_contrib, r_activation) # size (batch_size, channels)
|
t_weights = get_weights(model, score_contrib, t_activation) # size (batch_size, channels)
|
# weight the channels of the activation map with the pooled gradients,
|
# and add this weighted activation map to the total heatmap, accounting for
|
# the contribution of the selected output coeff, if only one heatmap is required ;
|
# otherwise, save the current heatmap
|
if r_is_involved:
|
r_heatmap_tmp = (r_activation * r_weights.view(args.batch_size, -1, 1, 1)).sum(dim=1).detach().cpu() # size (batch_size, 7, 7)
|
t_heatmap_tmp = (t_activation * t_weights.view(args.batch_size, -1, 1, 1)).sum(dim=1).detach().cpu() # size (batch_size, 7, 7)
|
save_heatmaps(args,
|
example_number,
|
f"{s_name}_coeff_{main_coeff}",
|
round(score_contrib[0].item(), 4),
|
t_heatmap_tmp,
|
img_trg,
|
r_heatmap_tmp,
|
img_src,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.