text
stringlengths 1
93.6k
|
|---|
outputs = self.models["depth"](features[0])
|
else:
|
# Otherwise, we only feed the image with frame_id 0 through the depth encoder
|
features = self.models["encoder"](inputs["color_aug", 0, 0])
|
outputs = self.models["depth"](features)
|
if self.opt.predictive_mask:
|
outputs["predictive_mask"] = self.models["predictive_mask"](features)
|
if self.use_pose_net:
|
outputs.update(self.predict_poses(inputs, features))
|
self.generate_images_pred(inputs, outputs)
|
losses = self.compute_losses(inputs, outputs)
|
return outputs, losses
|
def predict_poses(self, inputs, features):
|
"""Predict poses between input frames for monocular sequences.
|
"""
|
outputs = {}
|
if self.num_pose_frames == 2:
|
# In this setting, we compute the pose to each source frame via a
|
# separate forward pass through the pose network.
|
# select what features the pose network takes as input
|
if self.opt.pose_model_type == "shared":
|
pose_feats = {f_i: features[f_i] for f_i in self.opt.frame_ids}
|
else:
|
pose_feats = {f_i: inputs["color_aug", f_i, 0] for f_i in self.opt.frame_ids}
|
for f_i in self.opt.frame_ids[1:]:
|
if f_i != "s":
|
# To maintain ordering we always pass frames in temporal order
|
if f_i < 0:
|
pose_inputs = [pose_feats[f_i], pose_feats[0]]
|
else:
|
pose_inputs = [pose_feats[0], pose_feats[f_i]]
|
if self.opt.pose_model_type == "separate_resnet":
|
pose_inputs = [self.models["pose_encoder"](torch.cat(pose_inputs, 1))]
|
elif self.opt.pose_model_type == "posecnn":
|
pose_inputs = torch.cat(pose_inputs, 1)
|
axisangle, translation = self.models["pose"](pose_inputs)
|
outputs[("axisangle", 0, f_i)] = axisangle
|
outputs[("translation", 0, f_i)] = translation
|
# Invert the matrix if the frame id is negative
|
outputs[("cam_T_cam", 0, f_i)] = transformation_from_parameters(
|
axisangle[:, 0], translation[:, 0], invert=(f_i < 0))
|
else:
|
# Here we input all frames to the pose net (and predict all poses) together
|
if self.opt.pose_model_type in ["separate_resnet", "posecnn"]:
|
pose_inputs = torch.cat(
|
[inputs[("color_aug", i, 0)] for i in self.opt.frame_ids if i != "s"], 1)
|
if self.opt.pose_model_type == "separate_resnet":
|
pose_inputs = [self.models["pose_encoder"](pose_inputs)]
|
elif self.opt.pose_model_type == "shared":
|
pose_inputs = [features[i] for i in self.opt.frame_ids if i != "s"]
|
axisangle, translation = self.models["pose"](pose_inputs)
|
for i, f_i in enumerate(self.opt.frame_ids[1:]):
|
if f_i != "s":
|
outputs[("axisangle", 0, f_i)] = axisangle
|
outputs[("translation", 0, f_i)] = translation
|
outputs[("cam_T_cam", 0, f_i)] = transformation_from_parameters(
|
axisangle[:, i], translation[:, i])
|
return outputs
|
def val(self):
|
"""Validate the model on a single minibatch
|
"""
|
self.set_eval()
|
try:
|
inputs = next(self.val_iter)
|
except StopIteration:
|
self.val_iter = iter(self.val_loader)
|
inputs = self.val_iter.next()
|
with torch.no_grad():
|
outputs, losses = self.process_batch(inputs)
|
if "depth_gt" in inputs:
|
self.compute_depth_losses(inputs, outputs, losses)
|
self.log("val", inputs, outputs, losses)
|
del inputs, outputs, losses
|
self.set_train()
|
def generate_images_pred(self, inputs, outputs):
|
"""Generate the warped (reprojected) color images for a minibatch.
|
Generated images are saved into the `outputs` dictionary.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.