text
stringlengths
1
93.6k
top_10_max_P = top_10_P
if epoch > 0 and top_1_B > max_acc_2_B:
max_acc_1_B = mAP_B
max_acc_2_B = top_1_B
top_5_max_B = top_5_B
top_10_max_B = top_10_B
if epoch > 0:
# print(
# '[Joint-Level] %s - %s | Top-1: %.4f (%.4f) | Top-5: %.4f (%.4f) | Top-10: %.4f (%.4f) | mAP: %.4f (%.4f)' % (
# FLAGS.dataset, FLAGS.probe,
# top_1, max_acc_2, top_5, top_5_max, top_10, top_10_max, mAP, max_acc_1,))
#
# print(
# '[Component-Level] %s - %s | Top-1: %.4f (%.4f) | Top-5: %.4f (%.4f) | Top-10: %.4f (%.4f) | mAP: %.4f (%.4f) ' % (
# FLAGS.dataset, FLAGS.probe,
# top_1_P, max_acc_2_P, top_5_P, top_5_max_P, top_10_P, top_10_max_P, mAP_P,
# max_acc_1_P,))
# print(
# '[Limb-Level] %s - %s | Top-1: %.4f (%.4f) | Top-5: %.4f (%.4f) | Top-10: %.4f (%.4f) | mAP: %.4f (%.4f) ' % (
# FLAGS.dataset, FLAGS.probe,
# top_1_B, max_acc_2_B, top_5_B, top_5_max_B, top_10_B, top_10_max_B, mAP_B,
# max_acc_1_B,))
print(
'[MSMR] %s - %s | Top-1: %.4f (%.4f) | Top-5: %.4f (%.4f) | Top-10: %.4f (%.4f) | mAP: %.4f (%.4f) ' % (
FLAGS.dataset, FLAGS.probe,
top_1_int, max_acc_2_int, top_5_int, top_5_max_int, top_10_int, top_10_max_int,
mAP_int, max_acc_1_int,))
# print(
# " %.4f-%.4f-%.4f-%.4f \n %.4f-%.4f-%.4f-%.4f \n %.4f-%.4f-%.4f-%.4f \n %.4f-%.4f-%.4f-%.4f" % (
# max_acc_2, top_5_max, top_10_max, max_acc_1,
# max_acc_2_P, top_5_max_P, top_10_max_P, max_acc_1_P, max_acc_2_B, top_5_max_B,
# top_10_max_B, max_acc_1_B,
# max_acc_2_int, top_5_max_int, top_10_max_int, max_acc_1_int))
if cur_patience == patience:
break
def generate_cluster_features(labels, features):
centers = collections.defaultdict(list)
for i, label in enumerate(labels):
if label == -1:
continue
centers[labels[i]].append(features[i])
centers = [
torch.stack(centers[idx], dim=0).mean(0) for idx in sorted(centers.keys())
]
centers = torch.stack(centers, dim=0)
return centers
rerank_dist = compute_jaccard_distance(train_features_all, k1=k1, k2=k2)
rerank_dist_P = compute_jaccard_distance(train_features_all_P, k1=k1, k2=k2)
rerank_dist_B = compute_jaccard_distance(train_features_all_B, k1=k1, k2=k2)
cluster = DBSCAN(eps=eps, min_samples=min_samples, metric='precomputed', n_jobs=-1)
pseudo_labels = cluster.fit_predict(rerank_dist)
pseudo_labels_P = cluster.fit_predict(rerank_dist_P)
pseudo_labels_B = cluster.fit_predict(rerank_dist_B)
X_train_J_new = X_train_J
cluster_features = generate_cluster_features(pseudo_labels, train_features_all)
cluster_features = cluster_features.numpy()
cluster_features = cluster_features.astype(np.float64)
cluster_features_P = generate_cluster_features(pseudo_labels_P, train_features_all_P)
cluster_features_P = cluster_features_P.numpy()
cluster_features_P = cluster_features_P.astype(np.float64)
cluster_features_B = generate_cluster_features(pseudo_labels_B, train_features_all_B)
cluster_features_B = cluster_features_B.numpy()
cluster_features_B = cluster_features_B.astype(np.float64)
X_train_J_new = X_train_J[
np.where((pseudo_labels != -1) & (pseudo_labels_P != -1) & (pseudo_labels_B != -1))]
X_train_P_new = X_train_P[
np.where((pseudo_labels != -1) & (pseudo_labels_P != -1) & (pseudo_labels_B != -1))]
X_train_B_new = X_train_B[
np.where((pseudo_labels != -1) & (pseudo_labels_P != -1) & (pseudo_labels_B != -1))]
outlier_num = np.sum((pseudo_labels == -1))
pseudo_labels_new = pseudo_labels[
np.where((pseudo_labels != -1) & (pseudo_labels_P != -1) & (pseudo_labels_B != -1))]
pseudo_labels_P_new = pseudo_labels_P[
np.where((pseudo_labels != -1) & (pseudo_labels_P != -1) & (pseudo_labels_B != -1))]
pseudo_labels_B_new = pseudo_labels_B[
np.where((pseudo_labels != -1) & (pseudo_labels_P != -1) & (pseudo_labels_B != -1))]
train_labels_all = np.array(train_labels_all)
train_labels_all = train_labels_all[
np.where((pseudo_labels != -1) & (pseudo_labels_P != -1) & (pseudo_labels_B != -1))]
pseudo_labels = pseudo_labels_new
outlier_num_P = np.sum((pseudo_labels_P == -1))
pseudo_labels_P = pseudo_labels_P_new
outlier_num_B = np.sum((pseudo_labels_B == -1))