text
stringlengths
1
93.6k
hidden_z = self.Encoder(tree)
f_di = self.DI_Disentangler(hidden_z)
f_ms = self.MS_Disentangler(hidden_z)
#f_di_c = self.Z_Mapper(f_di)
f_ds = self.DS_Disentangler(hidden_z)
f_combine = torch.cat([f_di, f_ms, f_ds], 1)
f_combine_c = torch.cat([f_di, f_ms*0., f_ds], 1)
x_rec = self.Decoder(f_combine)
x = self.Decoder(f_combine_c)
# masking
#x_map = self.pre_process(x,stage=stage)
### compute losses
#ftr_loss = self.criterion(self.ftr_net, x_map, self.target)
ftr_loss = self.criterion(self.ftr_net, x, self.gt)
#dist1, dist2 , _, _ = distChamfer(x_map, self.target)
dist1, dist2 , _, _ = distChamfer(x_rec, self.partial)
rec_cd_loss = dist1.mean() + dist2.mean()
rec_cd_loss *= 0.5
dist1, dist2 , _, _ = distChamfer(x, self.gt)
cd_loss = dist1.mean() + dist2.mean()
# nll corresponds to a negative log-likelihood loss
#nll = self.z**2 / 2
nll_ms = f_ms**2 / 2
nll_ms = nll_ms.mean()
nll = f_combine**2 / 2
nll = nll.mean()
### loss
if complete_train:
loss = ftr_loss * self.w_D_loss[0] + nll_ms * self.args.w_nll \
+ cd_loss * 1 + rec_cd_loss
else:
loss = ftr_loss * self.w_D_loss[0] + nll * self.args.w_nll \
+ cd_loss * 1 + rec_cd_loss
# optional to use directed_hausdorff
#if self.args.directed_hausdorff:
# directed_hausdorff_loss = self.directed_hausdorff(self.target, x)
# loss += directed_hausdorff_loss*self.args.w_directed_hausdorff_loss
# backward
loss.backward()
#print(self.G.optim.state_dict()['param_groups'][0]['lr'])
for model_name in ["Encoder", "Decoder", "DI", "DS", "MS"]:
self.models[model_name].optim.step()
# save checkpoint for each stage
self.checkpoint_flags.append('s_'+str(stage)+' x')
self.checkpoint_pcd.append(x)
#self.checkpoint_flags.append('s_'+str(stage)+' x_map')
#self.checkpoint_pcd.append(x_map)
# test only for each stage
if self.gt is not None:
dist1, dist2 , _, _ = distChamfer(x,self.gt)
test_cd = dist1.mean() + dist2.mean()
if self.gt is not None:
loss_dict = {
'ftr_loss': np.asscalar(ftr_loss.detach().cpu().numpy()),
'nll': np.asscalar(nll.detach().cpu().numpy()),
'cd': np.asscalar(test_cd.detach().cpu().numpy()),
}
self.loss_log.append(loss_dict)
return test_cd.item()
def train_real_one_batch(self, curr_step, epoch, ith=-1):
loss_dict = {}
count = 0
stage = 0
# setup learning rate
for model_name in ["Encoder", "Decoder", "DI", "DS", "MS"]:
self.schedulers[model_name].update(curr_step, self.args.G_lrs[0], ratio=0.99998)
# forward
for model_name in ["Encoder", "Decoder", "DI", "DS", "MS"]:
self.models[model_name].eval()
for model_name in ["Encoder", "Decoder", "DI", "DS", "MS"]:
self.models[model_name].optim.zero_grad()
#tree = [self.z]
tree = [self.partial]
hidden_z = self.Encoder(tree)
f_di = self.DI_Disentangler(hidden_z)
#f_di_c = self.Z_Mapper(f_di)
f_ms = self.MS_Disentangler(hidden_z)
f_ds = self.DS_Disentangler(hidden_z)
f_combine = torch.cat([f_di, f_ms, f_ds], 1)
f_combine_c = torch.cat([f_di, f_ms*0., f_ds], 1)
x_rec = self.Decoder(f_combine)
x = self.Decoder(f_combine_c)
# masking
self.args.masking_option = "indexing"