text
stringlengths 1
93.6k
|
|---|
create_dir(base_dir,prompt=True)
|
create_dir(loss_dir)
|
create_dir(sample_B_dir)
|
create_dir(sample_F_dir)
|
create_dir(ckpt_dir)
|
# Evaluate initial FID and visualize X1 samples
|
t0 = disc.get_ts(disc_steps)[0]
|
t1 = disc.get_ts(disc_steps)[-1]
|
curr_PSNR_G, curr_SSIM_G, curr_LPIPS_G = eval_inverse(sampler, t1, t0, net_ema, n_FID, tweedie=False, verbose=True)
|
curr_PSNR_g, curr_SSIM_g, curr_LPIPS_g = eval_inverse(sampler, t1, t1, net_ema, n_FID, tweedie=True, verbose=True)
|
viz_img(X1_eval, t1, t1, net_ema, sample_B_dir, 0)
|
# Training loop
|
seed = int(time.time())
|
while True:
|
if double_iter is not None:
|
sub_steps = min(disc_steps,init_steps*2**(avgmeter.idx//double_iter))
|
else:
|
sub_steps = init_steps
|
opt_CTM.zero_grad()
|
for accum_idx in range(n_grad_accum):
|
# Sample data
|
X0, X1 = sampler.sample_joint()
|
t_sm_idx, t_sm = disc.sample_sm_times(bs, disc_steps)
|
Xt_sm = (1 - t_sm).reshape(-1,1,1,1) * X0 + t_sm.reshape(-1,1,1,1) * X1
|
t_idx, s_idx, u_idx, v_idx, t, s, u, v = disc.sample_ctm_times(bs, sub_steps)
|
Xt = (1 - t).reshape(-1,1,1,1) * X0 + t.reshape(-1,1,1,1) * X1
|
# Calculate CTM Loss
|
with torch.no_grad():
|
if offline:
|
net_ema.eval()
|
Xu_real = solver.solve(Xt,t_idx,u_idx,net_ema,sub_steps,ODE_N)
|
else:
|
net.train()
|
Xu_real = solver.solve(Xt,t_idx,u_idx,net,sub_steps,ODE_N,seed)
|
net.train()
|
torch.manual_seed(seed)
|
Xs_real = net(Xu_real,u,s)[0]
|
net_ema.eval()
|
X_real = net_ema(Xs_real,s,t0*torch.zeros_like(s)) if compare_zero else Xs_real
|
net.train()
|
net_ema.eval()
|
torch.manual_seed(seed)
|
Xs_fake, cout = net(Xt,t,s)
|
X_fake = net_ema(Xs_fake,s,t0*torch.zeros_like(s)) if compare_zero else Xs_fake
|
loss_CTM = ctm_dist(X_fake,X_real,cout*(1-s/t))/(n_grad_accum*bs)
|
(lmda_CTM*loss_CTM).backward()
|
seed += 1
|
# Calculate DSM Loss
|
net.train()
|
X0_fake, cout = net(Xt_sm,t_sm,t_sm,return_g=True)
|
loss_DSM = l2_loss(X0_fake,X0,cout)/(n_grad_accum*bs)
|
loss_DSM.backward()
|
if accum_idx == n_grad_accum-1:
|
opt_CTM.step()
|
# EMA update
|
if double_iter is not None:
|
ema_list = [0.999, 0.9999, 0.99995]
|
ema_decay_curr = ema_list[min(int(avgmeter.idx//double_iter),2)]
|
else:
|
ema_decay_curr = ema_decay
|
with torch.no_grad():
|
for p, p_ema in zip(net.parameters(),net_ema.parameters()):
|
p_ema.data = ema_decay_curr * p_ema + (1 - ema_decay_curr) * p
|
# Loss tracker update
|
avgmeter.update({'DSM Loss' : loss_DSM.item()*n_grad_accum,
|
'CTM Loss' : loss_CTM.item()*n_grad_accum,
|
'PSNR G' : curr_PSNR_G,
|
'PSNR g' : curr_PSNR_g,
|
'SSIM G' : curr_SSIM_G,
|
'SSIM g' : curr_SSIM_g,
|
'LPIPS G' : curr_LPIPS_G,
|
'LPIPS g' : curr_LPIPS_g})
|
# Loss and sample visualization
|
if avgmeter.idx % v_iter == 0:
|
print(avgmeter)
|
avgmeter.plot_losses(os.path.join(loss_dir, 'losses.jpg'), nrows=2)
|
viz_img(X1_eval, t1, t0, net_ema, sample_B_dir, None)
|
# Saving checkpoint
|
if avgmeter.idx % s_iter == 0:
|
print('\nSaving checkpoint at [{}], Best PSNR : {:.2f}\n'.format(ckpt_dir,best_PSNR))
|
save_ckpt(X0_eval, X1_eval, net, net_ema, opt_DSM, opt_CTM, avgmeter, best_PSNR, ckpt_dir, avgmeter.idx)
|
delete_all_but_N_files(ckpt_dir, lambda x : int(x.split('_')[1]), n_save, 'best')
|
viz_img(X1_eval, t1, t0, net_ema, sample_B_dir, avgmeter.idx)
|
# Saving backup checkpoint
|
if avgmeter.idx % b_iter == 0:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.