text stringlengths 1 93.6k |
|---|
pts_linspace(p4, p1),
|
]
|
# wall-wall
|
# loss_wallwall = 0
|
# walls_idx = [
|
# wallidx(p1, 1024, 512, -1, pc_height),
|
# wallidx(p2, 1024, 512, -1, pc_height),
|
# wallidx(p3, 1024, 512, -1, pc_height),
|
# wallidx(p4, 1024, 512, -1, pc_height),
|
# ]
|
# for wall_idx in walls_idx:
|
# wall_coordinates = torch.stack([wall_idx[:, 1], wall_idx[:, 0]])
|
# loss_wallwall -= map_coordinates(scoreedg[..., 0], wall_coordinates).mean() / len(walls_idx)
|
# ceil-wall
|
loss_ceilwall = 0
|
for seg in segs:
|
ceil_uv = xyz2uv(seg, z=-1)
|
ceil_idx = uv2idx(ceil_uv, 1024, 512)
|
ceil_coordinates = torch.stack([ceil_idx[:, 1], ceil_idx[:, 0]])
|
loss_ceilwall -= map_coordinates(scoreedg[..., 1], ceil_coordinates).mean() / len(segs)
|
# floor-wall
|
loss_floorwall = 0
|
for seg in segs:
|
floor_uv = xyz2uv(seg, z=pc_height)
|
floor_idx = uv2idx(floor_uv, 1024, 512)
|
floor_coordinates = torch.stack([floor_idx[:, 1], floor_idx[:, 0]])
|
loss_floorwall -= map_coordinates(scoreedg[..., 2], floor_coordinates).mean() / len(segs)
|
#losses = 1.0 * loss_cor + 0.1 * loss_wallwall + 0.5 * loss_ceilwall + 1.0 * loss_floorwall
|
losses = 1.0 * loss_cor + 1.0 * loss_ceilwall + 1.0 * loss_floorwall
|
if i_step is not None:
|
with torch.no_grad():
|
print('step %d: %.3f (cor %.3f, wall %.3f, ceil %.3f, floor %.3f)' % (
|
i_step, losses,
|
loss_cor, loss_wallwall,
|
loss_ceilwall, loss_floorwall))
|
return losses
|
def optimize_cor_id(cor_id, scoreedg, scorecor, num_iters=100, verbose=False):
|
assert scoreedg.shape == (512, 1024, 3)
|
assert scorecor.shape == (512, 1024)
|
Z = -1
|
ceil_cor_id = cor_id[0::2]
|
floor_cor_id = cor_id[1::2]
|
ceil_cor_id, ceil_cor_id_xy = pano.constraint_cor_id_same_z(ceil_cor_id, scorecor, Z)
|
ceil_cor_id_xyz = np.hstack([ceil_cor_id_xy, np.zeros(4).reshape(-1, 1) + Z])
|
#pc = (ceil_cor_id_xy[0] + ceil_cor_id_xy[2]) / 2
|
pc = np.mean(ceil_cor_id_xy, axis=0)
|
pc_vec = ceil_cor_id_xy[0] - pc
|
pc_theta = vecang(pc_vec, ceil_cor_id_xy[1] - pc)
|
pc_height = pano.fit_avg_z(floor_cor_id, ceil_cor_id_xy, scorecor)
|
scoreedg = torch.FloatTensor(scoreedg)
|
scorecor = torch.FloatTensor(scorecor)
|
pc = torch.FloatTensor(pc)
|
pc_vec = torch.FloatTensor(pc_vec)
|
pc_theta = torch.FloatTensor([pc_theta])
|
pc_height = torch.FloatTensor([pc_height])
|
pc.requires_grad = True
|
pc_vec.requires_grad = True
|
pc_theta.requires_grad = True
|
pc_height.requires_grad = True
|
optimizer = optim.SGD([
|
pc, pc_vec, pc_theta, pc_height
|
], lr=1e-3, momentum=0.9)
|
best = {'score': 1e9}
|
for i_step in range(num_iters):
|
i = i_step if verbose else None
|
optimizer.zero_grad()
|
score = project2sphere_score(pc, pc_vec, pc_theta, pc_height, scoreedg, scorecor, i)
|
if score.item() < best['score']:
|
best['score'] = score.item()
|
best['pc'] = pc.clone()
|
best['pc_vec'] = pc_vec.clone()
|
best['pc_theta'] = pc_theta.clone()
|
best['pc_height'] = pc_height.clone()
|
score.backward()
|
optimizer.step()
|
pc = best['pc']
|
pc_vec = best['pc_vec']
|
pc_theta = best['pc_theta']
|
pc_height = best['pc_height']
|
opt_cor_id = pc2cor_id(pc, pc_vec, pc_theta, pc_height).detach().numpy()
|
opt_cor_id = np.stack([opt_cor_id[:4], opt_cor_id[4:]], axis=1).reshape(8, 2)
|
return opt_cor_id
|
# <FILESEP>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.