text stringlengths 1 93.6k |
|---|
"""
|
categories = args.name_categories if ("all" in args.categories) else args.categories # if not applicable, `categories` becomes `[None]`
|
for category in categories:
|
# -- Find the indices of relevant data to use for heatmaps generation
|
if ONLY_BEST_RESULTS:
|
# Specify the category to be studied, if applicable
|
opt = copy.deepcopy(args)
|
if args.study_per_category and (args.number_categories > 1):
|
opt.categories = category
|
# Load data
|
queries_loader, targets_loader = data.get_eval_loaders(opt, vocab, args.studied_split)
|
# Find the best triplets
|
studied_indices, rank_of_GT = find_best_results(model, opt, queries_loader, targets_loader)
|
# Save metadata
|
d = {studied_indices[i]: int(rank_of_GT[i]) for i in range(len(studied_indices))}
|
directory = os.path.join(args.heatmap_dir, args.exp_name)
|
if not os.path.isdir(directory):
|
os.makedirs(directory)
|
with open(os.path.join(directory, "metadata.json"), "a") as f:
|
f.write(f"\n\nCategory: {category} \n")
|
json.dump(d, f)
|
print(f"Saving metadata (studied data indices, rank of GT) at {os.path.join(directory, 'metadata.json')}.")
|
else:
|
studied_indices = None
|
# -- Generate heatmaps
|
# Specify the category to be studied, if applicable
|
opt = copy.deepcopy(args)
|
if args.study_per_category and (args.number_categories > 1):
|
opt.categories = category
|
# Load data
|
opt.batch_size = 1
|
triplet_loader = data.get_train_loader(opt, vocab, split=args.studied_split, shuffle=False)
|
# Generate heatmaps
|
generate_heatmaps_from_dataloader(triplet_loader, model, opt,
|
studied_indices=studied_indices)
|
def find_best_results(model, opt, queries_loader, targets_loader):
|
"""
|
Return:
|
- a list containing the indices (within the dataloader) of the queries
|
raising the best results (ie. the ground truth target is well ranked)
|
- a list containing the rank of the correct target for the selected queries
|
"""
|
# Switch to eval mode
|
model.eval()
|
# Rank all the potential targets for all the queries
|
with torch.no_grad(): # no need to retain the computational graph and gradients
|
rank_of_GT = compute_and_process_compatibility_scores(queries_loader, targets_loader,
|
model, opt, output_type="metrics")
|
# Select the queries whose expected target is ranked the best
|
data_ids = rank_of_GT.sort()[1][:NUMBER_OF_EXAMPLES]
|
return data_ids.tolist(), rank_of_GT[data_ids].tolist()
|
def generate_heatmaps_from_dataloader(data_loader, model, args,
|
studied_indices=None):
|
"""
|
Generate and save heatmaps for several (specific) data examples from the
|
provided dataloader.
|
Input:
|
data_loader: train type, must handle batchs of size 1.
|
studied_indices: indices of the data examples that should be studied,
|
within the dataloader. If None, the processed data examples are
|
taken in the order of the provided dataloader.
|
"""
|
# set the evaluation mode
|
model.eval()
|
params_require_grad(model.txt_enc, False)
|
# iterate over the dataloader to produce the heatmaps
|
data_loader_iterator, itr = iter(data_loader), 0
|
while itr < NUMBER_OF_EXAMPLES:
|
# Get data
|
img_src, txt, txt_len, img_trg, ret_caps, data_id = next(data_loader_iterator)
|
example_number = data_id[0] # batch size is 1
|
if (studied_indices is None) or (example_number in studied_indices):
|
# Process data
|
generate_heatmap_from_single_data(args, model, img_src, txt, txt_len,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.