text
stringlengths
1
93.6k
# forward
for model_name in ["Encoder", "Decoder", "DI", "DS", "MS", "DIC", "DSC", "VP"]:
self.models[model_name].eval()
for model_name in ["Encoder", "Decoder", "DI", "DS", "MS", "DIC", "DSC", "VP"]:
self.models[model_name].optim.zero_grad()
#tree = [self.z]
virtual_tree = [self.virtual_partial]
virtual_hidden_z = self.Encoder(virtual_tree)
virtual_f_di = self.DI_Disentangler(virtual_hidden_z)
virtual_f_di = ReverseLayerF.apply(virtual_f_di, alpha)
virtual_f_ds = self.DS_Disentangler(virtual_hidden_z)
virtual_f_ms = self.MS_Disentangler(virtual_hidden_z)
virtual_f_di_cl = self.DI_Classifier(virtual_f_di)
virtual_f_ds_cl = self.DS_Classifier(virtual_f_ds)
virtual_f_vp = self.V_Predictor(virtual_f_ms)
di_s_label = torch.zeros(virtual_f_di_cl.shape[0]).long().cuda()
ds_s_label = torch.zeros(virtual_f_ds_cl.shape[0]).long().cuda()
virtual_di_loss = self.di_criterion(virtual_f_di_cl, di_s_label)
virtual_ds_loss = self.ds_criterion(virtual_f_ds_cl, ds_s_label)
if self.args.vp_mode == 'matrix':
virtual_f_vp = self.rotmatdecoder(virtual_f_vp)
virtual_vp_loss = self.vp_criterion(virtual_f_vp, self.rotmat)
elif self.args.vp_mode == 'angle':
virtual_vp_loss = self.vp_criterion(virtual_f_vp, self.azel)
else:
raise NotImplementedError
di_loss_value += virtual_di_loss.item()
ds_loss_value += virtual_ds_loss.item()
vp_loss_value += virtual_vp_loss.item()
virtual_loss = (virtual_di_loss * 0.01 + virtual_ds_loss + virtual_vp_loss) * 0.004
virtual_loss.backward()
real_tree = [self.real_partial]
real_hidden_z = self.Encoder(real_tree)
real_f_di = self.DI_Disentangler(real_hidden_z)
real_f_di = ReverseLayerF.apply(real_f_di, alpha)
real_f_ds = self.DS_Disentangler(real_hidden_z)
real_f_ms = self.MS_Disentangler(real_hidden_z)
real_f_di_cl = self.DI_Classifier(real_f_di)
real_f_ds_cl = self.DS_Classifier(real_f_ds)
di_t_label = torch.ones(real_f_di_cl.shape[0]).long().cuda()
ds_t_label = torch.ones(real_f_ds_cl.shape[0]).long().cuda()
real_di_loss = self.di_criterion(real_f_di_cl, di_t_label)
real_ds_loss = self.ds_criterion(real_f_ds_cl, ds_t_label)
di_loss_value += real_di_loss.item()
ds_loss_value += real_ds_loss.item()
real_loss = (real_di_loss * 0.01 + real_ds_loss) * 0.004
real_loss.backward()
with torch.no_grad():
cons_feature_di = torch.cat([virtual_f_di, real_f_di], 0)
cons_feature_ds = torch.cat([virtual_f_ds, real_f_ds], 0)
cons_feature_ms = torch.cat([virtual_f_ms, real_f_ms], 0)
if switch_idx_default is None:
switch_idx = np.random.randint(3)
else:
switch_idx = switch_idx_default
switch_perm = np.random.permutation(cons_feature_di.shape[0])
batch_size = cons_feature_di.shape[0]
switch_perm = np.arange(batch_size)
switch_perm = np.concatenate([switch_perm[batch_size//2:],switch_perm[:batch_size//2]], axis=0)
if switch_idx == 0:
pass
cons_feature_di = cons_feature_di[switch_perm]
elif switch_idx == 1:
cons_feature_ms = cons_feature_ms[switch_perm]
elif switch_idx == 2:
cons_feature_ds = cons_feature_ds[switch_perm]
cons_feature = torch.cat([cons_feature_di,cons_feature_ms, cons_feature_ds], 1)
#print(self.G.optim.state_dict()['param_groups'][0]['lr'])
for model_name in ["Encoder", "DI", "DS", "MS", "DIC", "DSC", "VP"]:
self.models[model_name].optim.step()
# save checkpoint for each stage
#self.checkpoint_flags.append('s_'+str(stage)+' x')
#self.checkpoint_pcd.append(x)
#self.checkpoint_flags.append('s_'+str(stage)+' x_map')
#self.checkpoint_pcd.append(x_map)
# test only for each stage
return di_loss_value, ds_loss_value, vp_loss_value, cons_feature
def train_one_batch(self, curr_step, ith=-1, complete_train=False):
loss_dict = {}
count = 0
stage = 0
# setup learning rate
for model_name in ["Encoder", "Decoder", "DI", "DS", "MS"]:
self.schedulers[model_name].update(curr_step, self.args.G_lrs[0], ratio=0.99998)
# forward
for model_name in ["Encoder", "Decoder", "DI", "DS", "MS"]:
self.models[model_name].eval()
for model_name in ["Encoder", "Decoder", "DI", "DS", "MS"]:
self.models[model_name].optim.zero_grad()
#tree = [self.z]
tree = [self.partial]