text
stringlengths 1
93.6k
|
|---|
if not self.opt.no_ssim:
|
self.ssim = SSIM()
|
self.ssim.to(self.device)
|
self.backproject_depth = {}
|
self.project_3d = {}
|
for scale in self.opt.scales:
|
h = self.opt.height // (2 ** scale)
|
w = self.opt.width // (2 ** scale)
|
self.backproject_depth[scale] = BackprojectDepth(self.opt.batch_size, h, w)
|
self.backproject_depth[scale].to(self.device)
|
self.project_3d[scale] = Project3D(self.opt.batch_size, h, w)
|
self.project_3d[scale].to(self.device)
|
self.depth_metric_names = [
|
"de/abs_rel", "de/sq_rel", "de/rms", "de/log_rms", "da/a1", "da/a2", "da/a3"]
|
print("Using split:\n ", self.opt.split)
|
print("There are {:d} training items and {:d} validation items\n".format(
|
len(train_dataset), len(val_dataset)))
|
self.save_opts()
|
def set_train(self):
|
"""Convert all models to training mode
|
"""
|
for m in self.models.values():
|
m.train()
|
def set_eval(self):
|
"""Convert all models to testing/evaluation mode
|
"""
|
for m in self.models.values():
|
m.eval()
|
def train(self):
|
"""Run the entire training pipeline
|
"""
|
self.epoch = 0
|
self.step = 0
|
self.start_time = time.time()
|
for self.epoch in range(self.opt.num_epochs):
|
self.run_epoch()
|
if (self.epoch + 1) % self.opt.save_frequency == 0:
|
self.save_model()
|
def run_epoch(self):
|
"""Run a single epoch of training and validation
|
"""
|
self.model_lr_scheduler.step()
|
print("Training")
|
self.set_train()
|
for batch_idx, inputs in enumerate(self.train_loader):
|
before_op_time = time.time()
|
outputs, losses = self.process_batch(inputs)
|
self.model_optimizer.zero_grad()
|
losses["loss"].backward()
|
self.model_optimizer.step()
|
duration = time.time() - before_op_time
|
# log less frequently after the first 2000 steps to save time & disk space
|
early_phase = batch_idx % self.opt.log_frequency == 0 and self.step < 2000
|
late_phase = self.step % 2000 == 0
|
if early_phase or late_phase:
|
self.log_time(batch_idx, duration, losses["loss"].cpu().data)
|
if "depth_gt" in inputs:
|
self.compute_depth_losses(inputs, outputs, losses)
|
self.log("train", inputs, outputs, losses)
|
self.val()
|
self.step += 1
|
def process_batch(self, inputs):
|
"""Pass a minibatch through the network and generate images and losses
|
"""
|
for key, ipt in inputs.items():
|
inputs[key] = ipt.to(self.device)
|
if self.opt.pose_model_type == "shared":
|
# If we are using a shared encoder for both depth and pose (as advocated
|
# in monodepthv1), then all images are fed separately through the depth encoder.
|
all_color_aug = torch.cat([inputs[("color_aug", i, 0)] for i in self.opt.frame_ids])
|
all_features = self.models["encoder"](all_color_aug)
|
all_features = [torch.split(f, self.opt.batch_size) for f in all_features]
|
features = {}
|
for i, k in enumerate(self.opt.frame_ids):
|
features[k] = [f[i] for f in all_features]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.