text
stringlengths
1
93.6k
# for visualization
self.checkpoint_pcd = [] # to save the staged checkpoints
self.checkpoint_flags = [] # plot subtitle
if len(args.w_D_loss) == 1:
self.w_D_loss = args.w_D_loss * len(args.G_lrs)
else:
self.w_D_loss = args.w_D_loss
def finetune(self, bool_gt=False, save_curve=False, ith=-1):
# forward
if bool_gt:
self.args.G_lrs = [2e-7, 1e-6, 1e-6, 2e-7]
self.args.z_lrs = [9e-3, 2e-3, 1e-3, 1e-6]
self.iterations = [0, 0, 2, 1]
#self.iterations = [24, 8, 2, 4]
#self.iterations = [12, 2, 1, 1]
self.k_mask_k = [1, 1, 1, 1]
else:
self.args.G_lrs = [2e-7, 1e-6, 1e-6, 2e-7]
self.args.z_lrs = [9e-3, 2e-3, 1e-3, 1e-6]
self.iterations = [1, 4, 4, 1]
self.k_mask_k = [1, 1, 1, 1]
tree = [self.partial]
for model_name in ["Encoder", "Decoder", "DI", "DS", "MS"]:
self.models[model_name].eval()
with torch.no_grad():
hidden_z = self.Encoder(tree)
f_di = self.DI_Disentangler(hidden_z)
#f_di_c = self.Z_Mapper(f_di)
f_ms = self.MS_Disentangler(hidden_z)
f_ds = self.DS_Disentangler(hidden_z)
f_combine_c = torch.cat([f_di, f_ms*0., f_ds], 1)
self.z.copy_(f_combine_c)
loss_dict = {}
curr_step = 0
count = 0
if save_curve:
cd_curve_list = []
for stage, iteration in enumerate(self.iterations):
for i in range(iteration):
curr_step += 1
# setup learning rate
self.schedulers['Decoder'].update(curr_step, self.args.G_lrs[stage])
self.z_scheduler.update(curr_step, self.args.z_lrs[stage])
# forward
self.z_optim.zero_grad()
if self.update_G_stages[stage]:
self.Decoder.optim.zero_grad()
tree = self.z
x = self.Decoder(tree)
# masking
x_map = self.pre_process(x,stage=stage)
### compute losses
ftr_loss = self.criterion(self.ftr_net, x_map, self.partial)
dist1, dist2 , _, _ = distChamfer(self.partial, x)
ucd_loss = dist1.mean()
dist1, dist2 , _, _ = distChamfer(x_map, self.partial)
cd_loss = dist1.mean() + dist2.mean()
if self.gt is not None:
dist1, dist2 , _, _ = distChamfer(x, self.gt)
gt_cd_loss = dist1.mean() + dist2.mean()
# optional early stopping
if self.args.early_stopping:
if cd_loss.item() < self.args.stop_cd:
break
# nll corresponds to a negative log-likelihood loss
nll = self.z**2 / 2
nll = nll.mean()
### loss
loss = ftr_loss * self.w_D_loss[0] + nll * self.args.w_nll \
+ cd_loss * 1
# optional to use directed_hausdorff
directed_hausdorff_loss = self.directed_hausdorff(self.partial.permute([0,2,1]), x.permute([0,2,1]))
if self.args.directed_hausdorff:
print("Using Hausdorff")
loss += directed_hausdorff_loss*self.args.w_directed_hausdorff_loss
# backward
loss.backward()
self.z_optim.step()
if self.update_G_stages[stage]:
self.Decoder.optim.step()
if save_curve:
if self.gt is not None:
dist1, dist2 , _, _ = distChamfer(x,self.gt)
test_cd = dist1.mean() + dist2.mean()
cd_curve_list.append(test_cd.item())
self.x = x