text
stringlengths
1
93.6k
if self.opt.avg_reprojection:
reprojection_loss = reprojection_losses.mean(1, keepdim=True)
else:
# differently to Monodepth2, compute mins as we go
reprojection_loss, _ = torch.min(reprojection_losses, dim=1, keepdim=True)
if not self.opt.disable_automasking:
# add random numbers to break ties
identity_reprojection_loss += torch.randn(
identity_reprojection_loss.shape).cuda() * 0.00001
# find minimum losses from [reprojection, identity, depth hints reprojection]
reprojection_loss_mask, depth_hint_loss_mask = \
self.compute_loss_masks(reprojection_loss,
identity_reprojection_loss,
depth_hint_reproj_loss)
# standard reprojection loss
reprojection_loss = reprojection_loss * reprojection_loss_mask
reprojection_loss = reprojection_loss.sum() / (reprojection_loss_mask.sum() + 1e-7)
outputs["identity_selection/{}".format(scale)] = (1 - reprojection_loss_mask).float()
losses['reproj_loss/{}'.format(scale)] = reprojection_loss
# proxy supervision loss
depth_hint_loss = 0
if self.opt.use_depth_hints:
target = inputs['depth_hint']
pred = outputs[('depth', 0, scale)]
valid_pixels = inputs['depth_hint_mask']
depth_hint_loss = self.compute_proxy_supervised_loss(pred, target, valid_pixels,
depth_hint_loss_mask)
depth_hint_loss = depth_hint_loss.sum() / (depth_hint_loss_mask.sum() + 1e-7)
# save for logging
outputs["depth_hint_pixels/{}".format(scale)] = depth_hint_loss_mask
losses['depth_hint_loss/{}'.format(scale)] = depth_hint_loss
loss += reprojection_loss + depth_hint_loss
mean_disp = disp.mean(2, True).mean(3, True)
norm_disp = disp / (mean_disp + 1e-7)
smooth_loss = get_smooth_loss(norm_disp, color)
loss += self.opt.disparity_smoothness * smooth_loss / (2 ** scale)
total_loss += loss
losses["loss/{}".format(scale)] = loss
total_loss /= self.num_scales
losses["loss"] = total_loss
return losses
def compute_depth_losses(self, inputs, outputs, losses):
"""Compute depth metrics, to allow monitoring during training
This isn't particularly accurate as it averages over the entire batch,
so is only used to give an indication of validation performance
"""
depth_pred = outputs[("depth", 0, 0)]
depth_pred = torch.clamp(F.interpolate(
depth_pred, [375, 1242], mode="bilinear", align_corners=False), 1e-3, 80)
depth_pred = depth_pred.detach()
depth_gt = inputs["depth_gt"]
mask = depth_gt > 0
# garg/eigen crop
crop_mask = torch.zeros_like(mask)
crop_mask[:, :, 153:371, 44:1197] = 1
mask = mask * crop_mask
depth_gt = depth_gt[mask]
depth_pred = depth_pred[mask]
depth_pred *= torch.median(depth_gt) / torch.median(depth_pred)
depth_pred = torch.clamp(depth_pred, min=1e-3, max=80)
depth_errors = compute_depth_errors(depth_gt, depth_pred)
for i, metric in enumerate(self.depth_metric_names):
losses[metric] = np.array(depth_errors[i].cpu())
def log_time(self, batch_idx, duration, loss):
"""Print a logging statement to the terminal
"""
samples_per_sec = self.opt.batch_size / duration
time_sofar = time.time() - self.start_time
training_time_left = (
self.num_total_steps / self.step - 1.0) * time_sofar if self.step > 0 else 0
print_string = "epoch {:>3} | batch {:>6} | examples/s: {:5.1f}" + \
" | loss: {:.5f} | time elapsed: {} | time left: {}"
print(print_string.format(self.epoch, batch_idx, samples_per_sec, loss,
sec_to_hm_str(time_sofar), sec_to_hm_str(training_time_left)))
def log(self, mode, inputs, outputs, losses):
"""Write an event to the tensorboard events file
"""
writer = self.writers[mode]
for l, v in losses.items():