text
stringlengths
1
93.6k
self.partial = self.pre_process(self.gt, stage=-1)
# for visualization
self.checkpoint_flags.append('partial')
self.checkpoint_pcd.append(self.partial)
def run(self, ith=-1):
self.train_one_batch(ith)
return
def test_one_batch(self, use_ema=False, ith=-1):
loss_dict = {}
count = 0
stage = 0
# forward
tree = [self.partial]
for model_name in ["Encoder", "Decoder", "DI", "DS", "MS"]:
self.models[model_name].eval()
hidden_z = self.Encoder(tree)
f_di = self.DI_Disentangler(hidden_z)
#f_di_c = self.Z_Mapper(f_di)
f_ms = self.MS_Disentangler(hidden_z)
f_ds = self.DS_Disentangler(hidden_z)
f_combine_c = torch.cat([f_di, f_ms*0., f_ds], 1)
x = self.Decoder(f_combine_c)
### compute losses
#ftr_loss = self.criterion(self.ftr_net, x_map, self.target)
ftr_loss = self.criterion(self.ftr_net, x, self.gt)
#dist1, dist2 , _, _ = distChamfer(x_map, self.target)
dist1, dist2 , _, _ = distChamfer(x, self.gt)
cd_loss = dist1.mean() + dist2.mean()
# nll corresponds to a negative log-likelihood loss
#nll = self.z**2 / 2
### loss
loss = ftr_loss * self.w_D_loss[0] \
+ cd_loss * 1
# save checkpoint for each stage
self.checkpoint_flags.append('s_'+str(stage)+' x')
self.checkpoint_pcd.append(x)
#self.checkpoint_flags.append('s_'+str(stage)+' x_map')
#self.checkpoint_pcd.append(x_map)
# test only for each stage
if self.gt is not None:
dist1, dist2 , _, _ = distChamfer(x,self.gt)
test_cd = dist1.mean() + dist2.mean()
if self.gt is not None:
loss_dict = {
'ftr_loss': np.asscalar(ftr_loss.detach().cpu().numpy()),
#'nll': np.asscalar(nll.detach().cpu().numpy()),
'cd': np.asscalar(test_cd.detach().cpu().numpy()),
}
self.loss_log.append(loss_dict)
### save point clouds
self.x = x
if not osp.isdir(self.args.save_inversion_path):
os.mkdir(self.args.save_inversion_path)
x_np = x[0].detach().cpu().numpy()
#x_map_np = x_map[0].detach().cpu().numpy()
partial_np = self.partial[0].detach().cpu().numpy()
if ith == -1:
basename = str(self.pcd_id)
else:
basename = str(self.pcd_id)+'_'+str(ith)
if self.gt is not None:
gt_np = self.gt[0].detach().cpu().numpy()
#np.savetxt(osp.join(self.args.save_inversion_path,basename+'_gt.txt'), gt_np, fmt = "%f;%f;%f")
#np.savetxt(osp.join(self.args.save_inversion_path,basename+'_x.txt'), x_np, fmt = "%f;%f;%f")
#np.savetxt(osp.join(self.args.save_inversion_path,basename+'_xmap.txt'), x_map_np, fmt = "%f;%f;%f")
#np.savetxt(osp.join(self.args.save_inversion_path,basename+'_partial.txt'), partial_np, fmt = "%f;%f;%f")
return test_cd.item()
def test_real_one_batch(self, bool_gt=False, use_ema=False, ith=-1):
loss_dict = {}
count = 0
stage = 0
# forward
tree = [self.partial]
for model_name in ["Encoder", "Decoder", "DI", "DS", "MS"]:
self.models[model_name].eval()
hidden_z = self.Encoder(tree)
f_di = self.DI_Disentangler(hidden_z)
#f_di_c = self.Z_Mapper(f_di)
f_ms = self.MS_Disentangler(hidden_z)
f_ds = self.DS_Disentangler(hidden_z)
f_combine_c = torch.cat([f_di, f_ms*0., f_ds], 1)
x = self.Decoder(f_combine_c)
### compute losses
#ftr_loss = self.criterion(self.ftr_net, x_map, self.target)
#ftr_loss = self.criterion(self.ftr_net, x, self.gt)