text
stringlengths
1
93.6k
x_map = self.pre_process(x,stage=stage)
### compute losses
#ftr_loss = self.criterion(self.ftr_net, x_map, self.target)
#ftr_loss = self.criterion(self.ftr_net, x, self.gt)
dist1, dist2 , _, _ = distChamfer(self.partial, x_rec)
rec_cd_loss = dist1.mean() + dist2.mean()
dist1, dist2 , _, _ = distChamfer(self.partial, x)
#cd_loss = dist1.mean() + dist2.mean()
directed_cd_loss = dist1.mean()
#
dist1, dist2, _, _ = distChamfer(x_map, self.partial)
mask_cd_loss = dist1.mean()
# nll corresponds to a negative log-likelihood loss
#nll = self.z**2 / 2
nll = f_combine**2 / 2
nll = nll.mean()
### loss
#loss = directed_cd_loss + rec_cd_loss * 0.15 + mask_cd_loss * 0.01
if epoch < 160:
loss = rec_cd_loss * 0.15 + nll * self.args.w_nll
else:
loss = directed_cd_loss + rec_cd_loss * 0.15 + nll * self.args.w_nll
# optional to use directed_hausdorff
#if self.args.directed_hausdorff:
#directed_hausdorff_loss = self.directed_hausdorff(self.partial, x)
#loss += directed_hausdorff_loss*self.args.w_directed_hausdorff_loss * 0.001
# backward
loss.backward()
#print(self.G.optim.state_dict()['param_groups'][0]['lr'])
for model_name in ["Encoder", "Decoder", "DI", "DS", "MS"]:
self.models[model_name].optim.step()
# save checkpoint for each stage
self.checkpoint_flags.append('s_'+str(stage)+' x')
self.checkpoint_pcd.append(x)
#self.checkpoint_flags.append('s_'+str(stage)+' x_map')
#self.checkpoint_pcd.append(x_map)
# test only for each stage
#directed_hausdorff_loss = self.directed_hausdorff(self.partial, x)
return loss.item()
def select_z(self, select_y=False):
tic = time.time()
with torch.no_grad():
self.select_num = 0
if self.select_num == 0:
self.z.zero_()
return
def save_checkpoint(self, ckpt_path_name):
torch.save({
'Encoder_state_dict': self.Encoder.state_dict(),
'Decoder_state_dict': self.Decoder.state_dict(),
'DI': self.DI_Disentangler.state_dict(),
'DS': self.DS_Disentangler.state_dict(),
#'Mapper': self.Z_Mapper.state_dict(),
'MS': self.MS_Disentangler.state_dict(),
'DIC': self.DI_Classifier.state_dict(),
'DSC': self.DS_Classifier.state_dict()
}, ckpt_path_name)
return
def pre_process(self,pcd,stage=-1):
"""
transfer a pcd in the observation space:
with the following mask_type:
none: for ['reconstruction', 'jittering', 'morphing']
ball_hole, knn_hole: randomly create the holes from complete pcd, similar to PF-Net
voxel_mask: baseline in ShapeInversion
tau_mask: baseline in ShapeInversion
k_mask: proposed component by ShapeInversion
"""
if self.mask_type == 'none':
return pcd
elif self.mask_type in ['ball_hole', 'knn_hole']:
### set static mask for each new partial pcd
if self.to_reset_mask:
# either ball hole or knn_hole, hence there might be unused configs
self.hole_k = self.args.hole_k
self.hole_radius = self.args.hole_radius
self.hole_n = self.args.hole_n
seeds = farthest_point_sample(pcd, self.hole_n) # shape (B,hole_n)
self.hole_centers = torch.stack([img[seed] for img, seed in zip(pcd,seeds)]) # (B, hole_n, 3)
# turn off mask after set mask, until next partial pcd
self.to_reset_mask = False
### preprocess
flag_map = torch.ones(1,2048,1).cuda()
pcd_new = pcd.unsqueeze(2).repeat(1,1,self.hole_n,1)
seeds_new = self.hole_centers.unsqueeze(1).repeat(1,2048,1,1)
delta = pcd_new.add(-seeds_new) # (B, 2048, hole_n, 3)