text
stringlengths
1
93.6k
r_is_involved)
if r_is_involved:
del r_activation, r_weights, r_heatmap_tmp
del t_activation, t_weights, score_contrib, t_heatmap_tmp
del query_contrib, t_contrib, main_coeffs
def save_heatmaps(args, example_number, s_name, s_value, t_heatmap, img_trg,
r_heatmap=None, img_src=None, r_is_involved=False):
"""
Save provided heatmaps.
Files are stored in {args.ranking_dir}/{prefix}/{example_number}/, with the
names {s_name}_on_trg_heatmap.jpg and {s_name}_on_src_heatmap.jpg.
Additional information is stored as metadata.
Input:
args: parsed arguments
example_number: index of the current data example in a dataloader.
s_name: score name (or alternatively: what is observed)
s_value: score value (or alternatively: metric about what is observed)
t_heatmap: heatmap to apply on the target image.
img_trg: target image, as it is processed by the model (cropped & resized...)
r_heatmap: heatmap to apply on the reference image (optional)
img_src: reference image, as it is processed by the model (cropped & resized...)
r_is_involved: whether an heatmap and an image is provided for the reference image.
"""
# normalize the heatmaps
if r_is_involved:
r_heatmap = normalize_heatmap(r_heatmap)
t_heatmap = normalize_heatmap(t_heatmap)
# store interpretations
directory = os.path.join(args.heatmap_dir, args.exp_name, str(example_number))
if not os.path.isdir(directory):
os.makedirs(directory)
filename = os.path.join(directory, '{}_heatmap.jpg')
if r_is_involved:
merge_heatmap_on_image(r_heatmap, img_src, filename.format(f"{s_name}_on_src"))
merge_heatmap_on_image(t_heatmap, img_trg, filename.format(f"{s_name}_on_trg"))
# store captions as well for later visualization
with open(os.path.join(directory, "metadata.txt"), "a") as f:
f.write(f"{example_number}*{s_name}*{s_value}\n")
def get_weights(model, output, conv_activation):
"""
For a given input, the model produces a corresponding activation map
(`conv_activation`) and value (`output`).
This method returns the gradients of the value with regard to the activation
map, pooled over the activation weights.
"""
# reset the gradient for a fresh backward pass
model.zero_grad()
# backward pass : get the gradient of the output with respect to the last convolutional layer
gradients = grad(outputs=output, inputs=conv_activation, retain_graph=True)[0] # size (batch_size, 2048 or 512, 7, 7) : batch size, channels, 2D feature map (activation weights)
# pool the gradients across the channels
weights = gradients.mean(dim=[2,3]) # size (batch_size, 2048 or 512)
return weights
def normalize_heatmap(heatmap):
heatmap = torch.clamp(heatmap, 0) # relu on top of the heatmap
heatmap = normalize_image(heatmap)
return heatmap
def normalize_image(img):
if isinstance(img, torch.Tensor):
img -= torch.min(img)
maxi_value = torch.max(img)
if isinstance(img, np.ndarray):
img -= np.min(img)
maxi_value = np.max(img)
img /= maxi_value if maxi_value > 0 else 0.00001
return img
def merge_heatmap_on_image(heatmap, initial_img, produced_img_path):
"""
Superimpose the heatmap on the initial image.
The initial image must be the processed version (correctly resized &
centered/crop) of the original image (since the model made the computation
on the processed image)
Input :
heatmap: torch tensor of size (1, 7, 7), with values between 0 and 1
initial_img: (cuda) torch tensor of size (1, 3, 224, 224) -
unknown range of values
produced_img_path: where to save the new image, consisting in the
heatmap superimposed on the initial image
"""
# consider the first and unique element of the batch
# the initial image must be of size (heigth, width, color channels) to fit
# the cv2 processing (hence the permutation)
heatmap = heatmap[0].data.numpy()