text
stringlengths 1
93.6k
|
|---|
# When doing analysis of the websocket responses to try to identify exceptions
|
# and other errors, ignore these errors since they are common for the
|
# application under test
|
ignore_errors = []
|
#
|
# Do not touch these lines
|
#
|
send_payloads_in_websocket(ws_address,
|
messages,
|
session_active_message,
|
ignore_errors,
|
0,
|
log_path,
|
http_proxy_host,
|
http_proxy_port)
|
# <FILESEP>
|
from utils import create_dir, eval_inverse, viz_img, delete_all_but_N_files
|
from discretizations import get_discretization
|
from averagemeter import AverageMeter
|
from distances import get_distance
|
from solvers import get_solver
|
from networks import SongUNet
|
from data import Sampler
|
import torch.optim as optim
|
import numpy as np
|
import pprint
|
import argparse
|
import torch
|
import lpips
|
import copy
|
import math
|
import time
|
import os
|
def save_ckpt(X0_eval, X1_eval, net, net_ema, opt_DSM, opt_CTM, avgmeter, best_PSNR, ckpt_dir, idx, best=False):
|
ckpt = {
|
'X0_eval': X0_eval,
|
'X1_eval': X1_eval,
|
'net': net.state_dict(),
|
'net_ema' : net_ema.state_dict(),
|
'opt_DSM' : opt_DSM.state_dict(),
|
'opt_CTM' : opt_CTM.state_dict(),
|
'avgmeter': avgmeter.state_dict(),
|
'best_PSNR' : best_PSNR
|
}
|
if best:
|
torch.save(ckpt, os.path.join(ckpt_dir, 'idx_0_best.pt'))
|
else:
|
torch.save(ckpt, os.path.join(ckpt_dir, 'idx_{}_curr.pt'.format(idx)))
|
def train(datasets, data_roots, X1_eps_std, vars, coupling, lmda_CTM, solver, ctm_distance, compare_zero, size, rho, discretization, smin, smax, edm_rho,
|
t_sm_dists, disc_steps, init_steps, ODE_N, bs, coupling_bs, lr, use_pcgrad, ema_decay, n_grad_accum, offline, double_iter, t_ctm_dists,
|
nc, model_channels, num_blocks, dropout, param, v_iter, s_iter, b_iter, FID_iter, FID_bs, n_FID, n_viz, n_save, base_dir, ckpt_name):
|
size = max(size,32)
|
sampler = Sampler(datasets, data_roots, nc, size, X1_eps_std, coupling, coupling_bs, bs)
|
disc = get_discretization(discretization,disc_steps,smin=smin,smax=smax,rho=edm_rho,t_sm_dists=t_sm_dists,t_ctm_dists=t_ctm_dists)
|
ctm_dist, l2_loss = get_distance(ctm_distance), get_distance('l2')
|
solver = get_solver(solver,disc)
|
vars[1] += X1_eps_std**2
|
net = SongUNet(vars=vars, param=param, discretization=disc, img_resolution=size, in_channels=nc, out_channels=nc,
|
num_blocks=num_blocks, dropout=dropout, model_channels=model_channels).cuda()
|
opt_DSM = optim.Adam(net.parameters(), lr=lr/(lmda_CTM+1))
|
opt_CTM = optim.Adam(net.parameters(), lr=lr)
|
net_ema = copy.deepcopy(net)
|
avgmeter = AverageMeter(window=125,
|
loss_names=['DSM Loss', 'CTM Loss', 'PSNR G', 'PSNR g', 'SSIM G', 'SSIM g', 'LPIPS G', 'LPIPS g'],
|
yscales=['log','log','linear', 'linear','linear', 'linear','linear', 'linear'])
|
loss_dir = os.path.join(base_dir, 'losses')
|
sample_B_dir = os.path.join(base_dir, 'samples_B')
|
sample_F_dir = os.path.join(base_dir, 'samples_F')
|
ckpt_dir = os.path.join(base_dir, 'ckpts')
|
if ckpt_name:
|
print('\nLoading state from [{}]\n'.format(ckpt_name))
|
ckpt = torch.load(os.path.join(ckpt_dir, ckpt_name))
|
X0_eval = ckpt['X0_eval'].cuda()
|
X1_eval = ckpt['X1_eval'].cuda()
|
net.load_state_dict(ckpt['net'])
|
opt_DSM.load_state_dict(ckpt['opt_DSM'])
|
opt_CTM.load_state_dict(ckpt['opt_CTM'])
|
net_ema.load_state_dict(ckpt['net_ema'])
|
avgmeter.load_state_dict(ckpt['avgmeter'])
|
loss_DSM = avgmeter.losses['DSM Loss'][-1]
|
loss_CTM = avgmeter.losses['CTM Loss'][-1]
|
best_PSNR = ckpt['best_PSNR']
|
else:
|
X0_eval = torch.cat([sampler.sample_X0() for _ in range(math.ceil(n_viz/bs))], dim=0)[:n_viz]
|
X1_eval = torch.cat([sampler.sample_X1() for _ in range(math.ceil(n_viz/bs))], dim=0)[:n_viz]
|
best_PSNR = 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.