text
stringlengths
1
93.6k
#dist1, dist2 , _, _ = distChamfer(x_map, self.target)
dist1, dist2 , _, _ = distChamfer(self.partial, x)
directed_cd_loss = dist1.mean()
if bool_gt:
dist1, dist2 , _, _ = distChamfer(x, self.gt)
cd_loss = dist1.mean() + dist2.mean()
# nll corresponds to a negative log-likelihood loss
#nll = self.z**2 / 2
#nll = hidden_z**2 / 2
#nll = nll.mean()
### loss
#loss = ftr_loss * self.w_D_loss[0] + nll * self.args.w_nll \
# + cd_loss * 1
ucd_loss = directed_cd_loss
uhd_loss = self.directed_hausdorff(self.partial.permute([0,2,1]), x.permute([0,2,1]))
#uhd_loss = dist1.max()
#loss += directed_hausdorff_loss*self.args.w_directed_hausdorff_loss * 0.001
# save checkpoint for each stage
self.checkpoint_flags.append('s_'+str(stage)+' x')
self.checkpoint_pcd.append(x)
#self.checkpoint_flags.append('s_'+str(stage)+' x_map')
#self.checkpoint_pcd.append(x_map)
# test only for each stage
#if self.gt is not None:
# loss_dict = {
# 'ftr_loss': np.asscalar(ftr_loss.detach().cpu().numpy()),
# 'nll': np.asscalar(nll.detach().cpu().numpy()),
# 'cd': np.asscalar(test_cd.detach().cpu().numpy()),
# }
# self.loss_log.append(loss_dict)
### save point clouds
self.x = x
if not osp.isdir(self.args.save_inversion_path):
os.mkdir(self.args.save_inversion_path)
x_np = x[0].detach().cpu().numpy()
#x_map_np = x_map[0].detach().cpu().numpy()
partial_np = self.partial[0].detach().cpu().numpy()
if ith == -1:
basename = str(self.pcd_id)
else:
basename = str(self.pcd_id)+'_'+str(ith)
if self.gt is not None:
gt_np = self.gt[0].detach().cpu().numpy()
np.savetxt(osp.join(self.args.save_inversion_path,basename+'_gt.txt'), gt_np, fmt = "%f;%f;%f")
np.savetxt(osp.join(self.args.save_inversion_path,basename+'_x.txt'), x_np, fmt = "%f;%f;%f")
#np.savetxt(osp.join(self.args.save_inversion_path,basename+'_xmap.txt'), x_map_np, fmt = "%f;%f;%f")
np.savetxt(osp.join(self.args.save_inversion_path,basename+'_partial.txt'), partial_np, fmt = "%f;%f;%f")
if bool_gt:
return ucd_loss.item(), uhd_loss.item(), cd_loss.item()
else:
return ucd_loss.item(), uhd_loss.item()
def train_consistency_one_batch(self, curr_step, cons_feature, return_generated=False, ith=-1):
loss_dict = {}
count = 0
stage = 0
consistency_loss_value = 0
# setup learning rate
for model_name in ["Encoder", "Decoder", "DI", "DS", "MS"]:
self.schedulers[model_name].update(curr_step, self.args.G_lrs[0], ratio=0.99998)
# forward
for model_name in ["Encoder", "Decoder", "DI", "DS", "MS"]:
self.models[model_name].eval()
for model_name in ["Encoder", "Decoder", "DI", "DS", "MS"]:
self.models[model_name].optim.zero_grad()
x = self.Decoder(cons_feature)
tree = [x]
hidden_z = self.Encoder(tree)
f_di = self.DI_Disentangler(hidden_z)
f_ms = self.MS_Disentangler(hidden_z)
f_ds = self.DS_Disentangler(hidden_z)
f_combine = torch.cat([f_di, f_ms, f_ds], 1)
consistency_loss = self.consistency_criterion(f_combine, cons_feature)
consistency_loss_value += consistency_loss.item()
consistency_loss *= 0.06
consistency_loss.backward()
for model_name in ["Encoder", "Decoder", "DI", "DS", "MS"]:
self.models[model_name].optim.step()
if return_generated:
return x
else:
return consistency_loss_value
def train_domain_one_batch(self, curr_step, alpha, switch_idx_default=None, ith=-1):
loss_dict = {}
count = 0
stage = 0
di_loss_value = 0
ds_loss_value = 0
vp_loss_value = 0
# setup learning rate
for model_name in ["Encoder", "Decoder", "DI", "DS", "MS", "DIC", "DSC", "VP"]:
self.schedulers[model_name].update(curr_step, self.args.G_lrs[0], ratio=0.99998)