text
stringlengths
1
93.6k
def find_new_cent(coords_, sam_, index):
skel = skeletonize(remove_small_holes(sam_==index, area_threshold=4))
skel_x, skel_y = np.where(skel)
branch_pt = False
for i in range(skel_x.shape[0]):
if np.sum(skel[skel_x[i]-1:skel_x[i]+2, skel_y[i]-1:skel_y[i]+2]) > 3:
med_y, med_x = skel_x[i], skel_y[i]
branch_pt = True
if not branch_pt:
med_y = int(np.median(skel_x))
med_x = int(np.median(skel_y))
return med_y, med_x
def find_neighb_pixs(pixs_, img_size):
pixs_r = np.round(pixs_).astype(np.int64) # nc,2
pixs_r = np.clip(pixs_r, 0, img_size-1)
pixs_f, pixs_u = np.floor(pixs_), np.ceil(pixs_)
pixs_f1, pixs_u1 = pixs_f.copy(), pixs_f.copy()
pixs_f1[...,0] = pixs_f1[...,0] + 1
pixs_u1[...,1] = pixs_u1[...,1] + 1
pixs = np.concatenate([pixs_f, pixs_u, pixs_f1, pixs_u1], axis=0).astype(np.int64)
pixs = np.clip(pixs, 0, img_size-1)
return pixs_r, pixs
def unique_2d(x, img_size, ox):
x_ = x[...,1] * img_size + x[...,0]
x_, indices = np.unique(x_, return_index=True)
x_new = np.zeros((len(x_), 2))
x_new[...,0] = x_ % img_size
x_new[...,1] = x_ // img_size
return x_new.astype(np.int64), ox[indices]
def component_search(vertices, visited, root=0, mask_cents=None, largest=None):
if not visited[root]:
visited[root] = True
for edge in vertices[root]:
if not visited[edge]:
if mask_cents[edge][4] > largest[1]:
largest[0] = edge
largest[1] = mask_cents[edge][4]
component_search(vertices, visited, edge, mask_cents, largest)
def find_connected_parts(vertices, visited, parts=None, mask_cents=None, largest=None):
part_count = 0
for k in vertices:
if not visited[k]:
# if len(vertices[k]) > 0:
parts[part_count] = k
largest[0] = k
largest[1] = mask_cents[k][4]
component_search(vertices, visited, k, mask_cents, largest)
parts[part_count] = largest[0]
part_count += 1
def estimate_partnum(name):
samdir = "output/mvimgs"
renderdir = "renderer"
isoproc = True
neighbs = 1
depth_maps = np.load(os.path.join('output', renderdir, name, 'save_depth.npy'))
alpha_maps = np.load(os.path.join('output', renderdir, name, 'save_alpha.npy'))
objn = name.split('_')[0]
sam_mask_dir = os.path.join(samdir, objn)
sam_mask_path = [os.path.join(sam_mask_dir, f) for f in os.listdir(sam_mask_dir) if ('.npy' in f)]
sam_masks = np.load(sam_mask_path[0])
### load pre-defined K and poses, borrowed from Syncdreamer
K, _, _, _, poses = read_pickle(f'meta_info/camera-16.pkl')
h, w = 256, 256
default_size = 256
K = np.diag([w/default_size,h/default_size,1.0]) @ K
K_ = torch.from_numpy(K.astype(np.float32)).unsqueeze(0) # [1,3,3]
coords = torch.stack(torch.meshgrid(torch.arange(h), torch.arange(w)), -1)[:, :, (1, 0)] # h,w,2
coords_np = coords.clone().numpy()
coords = coords.float().reshape(h * w, 2) # h*w,2
coords = torch.cat([coords, torch.ones(h * w, 1, dtype=torch.float32)], 1) # h*w,3
# preprocess SAM masks
for idx in range(16):
alp_ = np.copy(alpha_maps[:, idx*w:(idx+1)*w])
sam_ = np.copy(sam_masks[:, idx*w:(idx+1)*w])
sam_new = sam_mask_preprocess(sam_, alp_, 0.02)
sam_masks[:, idx*w:(idx+1)*w] = sam_new
# common information of each sam mask
part_dicts = {} # key: (imgid, image_partid), value: vertex id
count_part = 0
parts_upbd, parts_lobd = 0, 100