text
stringlengths
1
93.6k
"""
for scale in self.opt.scales:
disp = outputs[("disp", scale)]
if self.opt.v1_multiscale:
source_scale = scale
else:
disp = F.interpolate(
disp, [self.opt.height, self.opt.width], mode="bilinear", align_corners=False)
source_scale = 0
_, depth = disp_to_depth(disp, self.opt.min_depth, self.opt.max_depth)
outputs[("depth", 0, scale)] = depth
for i, frame_id in enumerate(self.opt.frame_ids[1:]):
if frame_id == "s":
T = inputs["stereo_T"]
else:
T = outputs[("cam_T_cam", 0, frame_id)]
# from the authors of https://arxiv.org/abs/1712.00175
if self.opt.pose_model_type == "posecnn":
axisangle = outputs[("axisangle", 0, frame_id)]
translation = outputs[("translation", 0, frame_id)]
inv_depth = 1 / depth
mean_inv_depth = inv_depth.mean(3, True).mean(2, True)
T = transformation_from_parameters(axisangle[:, 0], translation[:, 0] * mean_inv_depth[:, 0], frame_id < 0)
if self.opt.use_learnable_K:
K = torch.eye(4, device=self.device, dtype=torch.float32)
K[0, 0] = self.K[0]
K[1, 1] = self.K[1]
K[0, 2] = self.K[2]
K[1, 2] = self.K[3]
K[0, :] *= self.opt.width // (2 ** source_scale)
K[1, :] *= self.opt.height // (2 ** source_scale)
inv_K = torch.linalg.pinv(K)
cam_points = self.backproject_depth[source_scale](depth, inv_K.unsqueeze(0))
pix_coords = self.project_3d[source_scale](cam_points, K.unsqueeze(0), T)
else:
cam_points = self.backproject_depth[source_scale](depth, inputs[("inv_K", source_scale)])
pix_coords = self.project_3d[source_scale](cam_points, inputs[("K", source_scale)], T)
outputs[("sample", frame_id, scale)] = pix_coords
outputs[("color", frame_id, scale)] = F.grid_sample(
inputs[("color", frame_id, source_scale)],
outputs[("sample", frame_id, scale)],
padding_mode="border", align_corners=True)
if not self.opt.disable_automasking:
outputs[("color_identity", frame_id, scale)] = \
inputs[("color", frame_id, source_scale)]
if self.opt.use_depth_hints:
if self.opt.v1_multiscale:
raise NotImplementedError("Depth hints are currently not implemented for v1"
"multiscale, please remove --v1_multiscape flag ")
elif frame_id == 's' and scale == 0:
# generate depth hint warped image (only max scale and for stereo image)
depth = inputs['depth_hint']
cam_points = self.backproject_depth[source_scale](depth, inputs[("inv_K", source_scale)])
pix_coords = self.project_3d[source_scale](cam_points, inputs[("K", source_scale)], T)
outputs[("color_depth_hint", frame_id, scale)] = F.grid_sample(inputs[("color", frame_id, source_scale)], pix_coords, padding_mode="border", align_corners=True)
def compute_reprojection_loss(self, pred, target):
"""Computes reprojection loss between a batch of predicted and target images
"""
abs_diff = torch.abs(target - pred)
l1_loss = abs_diff.mean(1, True)
if self.opt.no_ssim:
reprojection_loss = l1_loss
else:
ssim_loss = self.ssim(pred, target).mean(1, True)
reprojection_loss = 0.85 * ssim_loss + 0.15 * l1_loss
return reprojection_loss
@staticmethod
def compute_proxy_supervised_loss(pred, target, valid_pixels, loss_mask):
""" Compute proxy supervised loss (depth hint loss) for prediction.
- valid_pixels is a mask of valid depth hint pixels (i.e. non-zero depth values).
- loss_mask is a mask of where to apply the proxy supervision (i.e. the depth hint gave
the smallest reprojection error)"""
# first compute proxy supervised loss for all valid pixels
depth_hint_loss = torch.log(torch.abs(target - pred) + 1) * valid_pixels
# only keep pixels where depth hints reprojection loss is smallest
depth_hint_loss = depth_hint_loss * loss_mask
return depth_hint_loss