text stringlengths 1 93.6k |
|---|
gallery_cams = np.asarray(gallery_cams)
|
# Sort and find correct matches
|
indices = np.argsort(distmat, axis=1)
|
matches = (gallery_ids[indices] == query_ids[:, np.newaxis])
|
# Compute AP for each query
|
aps = []
|
if (FLAGS.probe_type == 'nm.nm' or FLAGS.probe_type == 'cl.cl' or FLAGS.probe_type == 'bg.bg'):
|
for i in range(1, m):
|
valid = ((gallery_ids[indices[i]] != query_ids[i]) |
|
(gallery_cams[indices[i]] != query_cams[i]))
|
y_true = matches[i, valid]
|
y_score = -distmat[i][indices[i]][valid]
|
# discard nan
|
y_score[np.isnan(y_score)] = 0
|
if not np.any(y_true): continue
|
aps.append(average_precision_score(y_true, y_score))
|
else:
|
for i in range(m):
|
valid = ((gallery_ids[indices[i]] != query_ids[i]) |
|
(gallery_cams[indices[i]] != query_cams[i]))
|
y_true = matches[i, valid]
|
y_score = -distmat[i][indices[i]][valid]
|
# discard nan
|
# y_score = np.nan_to_num(y_score)
|
if not np.any(y_true): continue
|
aps.append(average_precision_score(y_true, y_score))
|
if len(aps) == 0:
|
raise RuntimeError("No valid query")
|
return np.mean(aps)
|
def metrics(X, y, t_X, t_y):
|
# compute Euclidean distance
|
if dataset != 'CASIA_B':
|
a, b = torch.from_numpy(t_X), torch.from_numpy(X)
|
m, n = a.size(0), b.size(0)
|
a = a.view(m, -1)
|
b = b.view(n, -1)
|
dist_m = torch.pow(a, 2).sum(dim=1, keepdim=True).expand(m, n) + \
|
torch.pow(b, 2).sum(dim=1, keepdim=True).expand(n, m).t()
|
dist_m.addmm_(1, -2, a, b.t())
|
dist_m = (dist_m.clamp(min=1e-12)).sqrt()
|
mAP = mean_ap(distmat=dist_m.numpy(), query_ids=t_y, gallery_ids=y)
|
_, dist_sort = dist_m.sort(1)
|
dist_sort = dist_sort.numpy()
|
else:
|
X = np.array(X)
|
t_X = np.array(t_X)
|
dist_m = [(np.linalg.norm(X - i, axis=1)).tolist() for i in t_X]
|
dist_m = np.array(dist_m)
|
mAP = mean_ap(distmat=dist_m, query_ids=t_y, gallery_ids=y)
|
dist_sort = [np.argsort(np.linalg.norm(X - i, axis=1)).tolist() for i in t_X]
|
dist_sort = np.array(dist_sort)
|
top_1 = top_5 = top_10 = 0
|
probe_num = dist_sort.shape[0]
|
if (FLAGS.probe_type == 'nm.nm' or
|
FLAGS.probe_type == 'cl.cl' or FLAGS.probe_type == 'bg.bg'):
|
for i in range(probe_num):
|
if t_y[i] in y[dist_sort[i, 1:2]]:
|
top_1 += 1
|
if t_y[i] in y[dist_sort[i, 1:6]]:
|
top_5 += 1
|
if t_y[i] in y[dist_sort[i, 1:11]]:
|
top_10 += 1
|
else:
|
for i in range(probe_num):
|
if t_y[i] in y[dist_sort[i, :1]]:
|
top_1 += 1
|
if t_y[i] in y[dist_sort[i, :5]]:
|
top_5 += 1
|
if t_y[i] in y[dist_sort[i, :10]]:
|
top_10 += 1
|
return mAP, top_1 / probe_num, top_5 / probe_num, top_10 / probe_num
|
mAP_int, top_1_int, top_5_int, top_10_int = metrics(X_int, y, t_X_int, t_y)
|
mAP, top_1, top_5, top_10 = metrics(X, y, t_X, t_y)
|
mAP_P, top_1_P, top_5_P, top_10_P = metrics(X_P, y, t_X_P, t_y)
|
mAP_B, top_1_B, top_5_B, top_10_B = metrics(X_B, y, t_X_B, t_y)
|
del X, y, t_X, t_y, X_P, t_X_P, X_B, t_X_B, pro_labels_all, pro_features_all
|
gc.collect()
|
return mAP_int, top_1_int, top_5_int, top_10_int, \
|
mAP, top_1, top_5, top_10, \
|
mAP_P, top_1_P, top_5_P, top_10_P, mAP_B, top_1_B, top_5_B, top_10_B
|
if dataset == 'KGBD' or dataset == 'KS20':
|
if FLAGS.dataset == 'KS20':
|
nb_nodes = 25
|
X_train_J, X_train_P, X_train_B, _, _, y_train, X_gal_J, X_gal_P, X_gal_B, _, _, y_gal, \
|
adj_J, biases_J, _, _, _, _, _, _, _, _, nb_classes, X_train_J_D, X_gal_J_D = \
|
process.gen_train_data(dataset=dataset, split='gallery', time_step=time_step,
|
nb_nodes=nb_nodes, nhood=nhood, global_att=global_att, batch_size=batch_size,
|
norm=norm
|
)
|
nb_nodes = 20
|
elif dataset == 'BIWI':
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.