text
stringlengths
1
93.6k
for idx in range(16):
sam_new = np.copy(sam_masks[:, idx*w:(idx+1)*w])
parts_upbd = max(parts_upbd, np.max(sam_new)+1)
parts_lobd = min(parts_lobd, np.max(sam_new)+1)
for idi in range(max(0, np.min(sam_new)), np.max(sam_new)+1):
part_dicts[(idx, idi)] = count_part
count_part += 1
parts_upbd *= 2
mask_cents = {} # key: vertex id, value: [center, img_id, img_x, img_y]
imgs_3dpts = {} # key: img id, value: 3d points
for idx in range(16):
# data loading for current frame
dep_ = np.copy(depth_maps[:, idx*w:(idx+1)*w])
alp_ = np.copy(alpha_maps[:, idx*w:(idx+1)*w])
sam_ = np.copy(sam_masks[:, idx*w:(idx+1)*w])
val_pixs = np.where(alp_.reshape(-1,) >= 0.95)[0]
pose_ = np.concatenate([poses[idx], np.array([0,0,0,1]).reshape(1,4)], axis=0)
pose_ = np.linalg.inv(pose_)
pose_ = torch.from_numpy(pose_).float()
## visualize selected centers in 2D image
vis_map_ = np.zeros((h,w))
## project pixels in the current frame to 3D points
pixs_ = coords.clone()
deps_ = torch.from_numpy(dep_.reshape(-1,1)).float()
points_all = get_3d_points(K_, pose_, pixs_.unsqueeze(0), deps_.unsqueeze(0)).squeeze(0)
points = points_all[val_pixs]
pts_sam = sam_.reshape(-1,)[val_pixs]
points_all = points_all.reshape(h,w,3).numpy()
imgs_3dpts[idx] = [points, pts_sam]
## assign mask centers to each part vertex
for idi in range(max(0, np.min(sam_)), np.max(sam_)+1):
xt, yt = find_new_cent(coords_np, sam_, idi)
partid = part_dicts[(idx, idi)]
mask_cents[partid] = [points_all[xt, yt], idx, xt, yt, np.sum(sam_==idi)]
vis_map_[xt, yt] = 255
overlap_lists = {'a': 0.2, 'b': 0.25, 'c': 0.3, 'd': 0.35, 'e': 0.4, 'f': 0.45, 'g': 0.5, 'h': 0.55, 'i': 0.6, 'j': 0.7}
cents_lists, solid_cents_num = [], []
for ol_code, ol_rate in overlap_lists.items():
# vis_maps = []
# build initial graph
vertices = {} # key: vertex id, value: connected parts (edge)
count_part = 0
for pk, pv in part_dicts.items():
vertices[count_part] = []
count_part += 1
for idx in range(16):
points = imgs_3dpts[idx][0]
pts_sam = imgs_3dpts[idx][1]
## search for edges by warping to neighboring frames
neigbs = [j % 16 for j in range(idx-neighbs, idx+neighbs+1) if j!=idx]
for j, idn in enumerate(neigbs):
dep_ = np.copy(depth_maps[:, idn*w:(idn+1)*w])
sam_ = np.copy(sam_masks[:, idn*w:(idn+1)*w])
## load pose for neighboring frame
pose_ = np.concatenate([poses[idn], np.array([0,0,0,1]).reshape(1,4)], axis=0)
pose_ = np.linalg.inv(pose_)
pose_ = torch.from_numpy(pose_).float()
### propogate to next frames
pixs, projdep_ = get_2d_pixels(K_, pose_, points.unsqueeze(0))
pixs = pixs.squeeze(0).numpy()
pixs_r, pixs = find_neighb_pixs(pixs, default_size)
renddep_ = dep_[pixs_r[...,1], pixs_r[...,0]]
visible_mask_ = (projdep_.numpy() < 1.05 * renddep_)
visible_mask_ = np.concatenate([visible_mask_ for idt in range(4)], axis=0)
pts_sam_ = np.concatenate([pts_sam for idt in range(4)], axis=0)
pixs, pts_sam_ = unique_2d(pixs[visible_mask_], default_size, pts_sam_[visible_mask_])
# if idnx==0:
# vis_map_[pixs[...,1], pixs[...,0]] = 255.0
overlap_masks = sam_[pixs[...,1], pixs[...,0]]
overlap_maskid = np.unique(overlap_masks).tolist()
overlap_maskid = [idi for idi in overlap_maskid if idi > -1]
for k, idi in enumerate(overlap_maskid):
partid0 = part_dicts[(idn, idi)]
## corresponding parts in the current frame
ref_sam = pts_sam_[overlap_masks==idi]
ref_maskid = np.unique(ref_sam).tolist()
ref_maskid = [idf for idf in ref_maskid if idf > -1]
for jk, idf in enumerate(ref_maskid):
cond1 = np.sum(ref_sam==idf)
cond2 = np.sum(pts_sam_==idf)