text stringlengths 1 93.6k |
|---|
global loss_weights
|
if loss_weights is None:
|
loss_weights = np.ones((len(PRED_DOWNSCALE_FACTORS), NUM_BOXES_PER_SCALE+1))
|
def test_function(img_batch, gt_batch, roi_batch):
|
global test_loss
|
global counter
|
gt_batch = (gt_batch > 0).astype(np.float32)
|
loss, pred_batch, gt_batch = test_funcs(img_batch, gt_batch, loss_weights, network)
|
test_loss += loss
|
counter += 1
|
return (*pred_batch), (*gt_batch)
|
if isinstance(print_output, str):
|
print_path = print_output
|
elif isinstance(print_output, bool) and print_output:
|
print_path = './models/dump'
|
else:
|
print_path = None
|
e = dataset.iterate_over_test_data(test_function, set_name)
|
for e_idx, e_iter in enumerate(e):
|
image_split = e_iter[1].split('/')
|
image_name = image_split[len(image_split)-1]
|
image = cv2.imread(e_iter[1])
|
maps = [(image, {}),
|
(e_iter[2], {'cmap': 'jet', 'vmin': 0., 'vmax': 1.})]
|
pred_dot_map, pred_box_map = get_box_and_dot_maps(e_iter[0][0:4], thresh=thresh) # prediction_downscale
|
# -- Plotting boxes
|
boxed_image_pred = get_boxed_img(image, pred_box_map, pred_box_map, \
|
pred_dot_map, prediction_downscale=2, \
|
thickness=2, multi_colours=False)
|
boxed_image_pred_path = os.path.join(print_path, image_name + '_boxed_image.png')
|
cv2.imwrite(boxed_image_pred_path, boxed_image_pred.astype(np.uint8).transpose((1, 2, 0)))
|
print_graph(maps, "", os.path.join(print_path, image_name))
|
# -- Calculate metrics
|
metrics_test = calculate_metrics(pred_dot_map, e_iter[2], metrics_test)
|
for m in metrics_:
|
metrics_test[m] /= float(e_idx+1)
|
metrics_test['mse'] = np.sqrt(metrics_test['mse'])
|
metrics_test['loss1'] = test_loss / float(counter)
|
txt = ''
|
for metric in metrics_test.keys():
|
if metric == "mle" and (args.mle == False):
|
continue
|
txt += '%s: %s ' % (metric, metrics_test[metric])
|
return metrics_test, txt
|
'''
|
This function calculates the various counting and localization metrics
|
Parameters
|
----------
|
pred: dot map prediction of LSC-CNN (HxW)
|
true: ground truth map (HxW)
|
metrics_test: dictionary of metrics
|
Returns
|
----------
|
metrics_test: updated dictionary of metrics
|
'''
|
def calculate_metrics(pred, true, metrics_test):
|
pred_count = np.sum(pred)
|
true_count = np.sum(true)
|
head_x_true, head_y_true = np.where(pred > 0)[-2:]
|
head_x_pred, head_y_pred = np.where(true > 0)[-2:]
|
if args.mle:
|
if len(head_x_pred) == 0:
|
off = 16*len(head_y_pred)
|
else:
|
off, _, _ = get_offset_error(head_x_pred, head_y_pred, head_x_true, head_y_true, output_downscale)
|
metrics_test['mle'] += off
|
metrics_test['new_mae'] += np.abs(true_count - pred_count)
|
metrics_test['mse'] += (true_count - pred_count) ** 2
|
return metrics_test
|
'''
|
This function finds the optimal threshold on the validation set.
|
Parameters
|
----------
|
f: (file object) file writer
|
iters: Number of iterations to run the binary search
|
test_funcs: lsccnn test function
|
splits: number of splits to the range of thresholds
|
beg: beginning threshold
|
end: ending threshold
|
Returns
|
----------
|
optimal_threshold: optimal threshold where the mae is
|
lowest on validation set.
|
'''
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.