text
stringlengths 1
93.6k
|
|---|
writer.add_scalar("{}".format(l), v, self.step)
|
for j in range(min(4, self.opt.batch_size)): # write a maxmimum of four images
|
for s in self.opt.scales:
|
for frame_id in self.opt.frame_ids:
|
writer.add_image(
|
"color_{}_{}/{}".format(frame_id, s, j),
|
inputs[("color", frame_id, s)][j].data, self.step)
|
if s == 0 and frame_id != 0:
|
writer.add_image(
|
"color_pred_{}_{}/{}".format(frame_id, s, j),
|
outputs[("color", frame_id, s)][j].data, self.step)
|
writer.add_image(
|
"disp_{}/{}".format(s, j),
|
normalize_image(outputs[("disp", s)][j]), self.step)
|
if self.opt.predictive_mask:
|
for f_idx, frame_id in enumerate(self.opt.frame_ids[1:]):
|
writer.add_image(
|
"predictive_mask_{}_{}/{}".format(frame_id, s, j),
|
outputs["predictive_mask"][("disp", s)][j, f_idx],
|
self.step)
|
elif not self.opt.disable_automasking:
|
writer.add_image(
|
"automask_{}/{}".format(s, j),
|
outputs["identity_selection/{}".format(s)][j], self.step)
|
# depth hint logging
|
if self.opt.use_depth_hints:
|
if s == 0:
|
disp = 1 / (inputs['depth_hint'] + 1e-7) * inputs['depth_hint_mask']
|
writer.add_image(
|
"depth_hint/{}".format(j),
|
normalize_image(disp[j]), self.step)
|
writer.add_image(
|
"depth_hint_pixels_{}/{}".format(s, j),
|
outputs["depth_hint_pixels/{}".format(s)][j][None, ...], self.step)
|
def save_opts(self):
|
"""Save options to disk so we know what we ran this experiment with
|
"""
|
models_dir = os.path.join(self.log_path, "models")
|
if not os.path.exists(models_dir):
|
os.makedirs(models_dir)
|
to_save = self.opt.__dict__.copy()
|
with open(os.path.join(models_dir, 'opt.json'), 'w') as f:
|
json.dump(to_save, f, indent=2)
|
def save_model(self):
|
"""Save model weights to disk
|
"""
|
save_folder = os.path.join(self.log_path, "models", "weights_{}".format(self.epoch))
|
if not os.path.exists(save_folder):
|
os.makedirs(save_folder)
|
for model_name, model in self.models.items():
|
save_path = os.path.join(save_folder, "{}.pth".format(model_name))
|
to_save = model.state_dict()
|
if model_name == 'encoder':
|
# save the sizes - these are needed at prediction time
|
to_save['height'] = self.opt.height
|
to_save['width'] = self.opt.width
|
to_save['use_stereo'] = self.opt.use_stereo
|
torch.save(to_save, save_path)
|
save_path = os.path.join(save_folder, "{}.pth".format("adam"))
|
torch.save(self.model_optimizer.state_dict(), save_path)
|
def load_model(self):
|
"""Load model(s) from disk"""
|
self.opt.load_weights_folder = os.path.expanduser(self.opt.load_weights_folder)
|
assert os.path.isdir(self.opt.load_weights_folder), f"Cannot find folder {self.opt.load_weights_folder}"
|
print("loading model from folder {}".format(self.opt.load_weights_folder))
|
for n in self.opt.models_to_load:
|
print("Loading {} weights...".format(n))
|
path = os.path.join(self.opt.load_weights_folder, "{}.pth".format(n))
|
model_dict = self.models[n].state_dict()
|
pretrained_dict = torch.load(path)
|
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
|
model_dict.update(pretrained_dict)
|
self.models[n].load_state_dict(model_dict)
|
print(f"Loaded {len(pretrained_dict)} terms from weights to {n} ({len(model_dict)}).")
|
# loading adam state
|
optimizer_load_path = os.path.join(self.opt.load_weights_folder, "adam.pth")
|
if os.path.isfile(optimizer_load_path):
|
print("Loading Adam weights")
|
optimizer_dict = torch.load(optimizer_load_path)
|
self.model_optimizer.load_state_dict(optimizer_dict)
|
else:
|
print("Cannot find Adam weights so Adam is randomly initialized")
|
# <FILESEP>
|
"""An example of how to setup and start an Accessory.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.