text
stringlengths 1
93.6k
|
|---|
torch.backends.cudnn.benchmark = True # gotta go fast!
|
from utils import utils
|
if __name__ == "__main__":
|
# parse arguments
|
parser = argparse.ArgumentParser(description='Train an autoencoder')
|
parser.add_argument('experconfig', type=str, help='experiment config file')
|
parser.add_argument('--profile', type=str, default='Train', help='config profile')
|
parser.add_argument('--devices', type=int, nargs='+', default=[0], help='devices')
|
parser.add_argument('--resume', action='store_true', help='resume training')
|
parser.add_argument('--noprogress', action='store_true', help='don\'t output training progress images')
|
parser.add_argument('--nostab', action='store_true', help='don\'t check loss stability')
|
parser.add_argument('--scripting', action='store_true', help='use torch.jit.script')
|
parsed, unknown = parser.parse_known_args()
|
for arg in unknown:
|
if arg.startswith(("-", "--")):
|
parser.add_argument(arg, type=eval)
|
args = parser.parse_args()
|
outpath = os.path.dirname(args.experconfig)
|
if args.resume:
|
iternum = utils.findmaxiters("{}/log.txt".format(outpath))
|
else:
|
iternum = 0
|
log = utils.Logger("{}/log.txt".format(outpath), "a" if args.resume else "w")
|
print("Python", sys.version)
|
print("PyTorch", torch.__version__)
|
print(" ".join(sys.argv))
|
print("Output path:", outpath)
|
# load config
|
starttime = time.time()
|
experconfig = utils.import_module(args.experconfig, "config")
|
profile = getattr(experconfig, args.profile)(**{k: v for k, v in vars(args).items() if k not in parsed})
|
if not args.noprogress:
|
progressprof = experconfig.Progress()
|
print("Config loaded ({:.2f} s)".format(time.time() - starttime))
|
# build dataset & testing dataset
|
starttime = time.time()
|
if not args.noprogress:
|
testdataset = progressprof.get_dataset()
|
dataloader = torch.utils.data.DataLoader(testdataset,
|
batch_size=progressprof.batchsize, shuffle=False,
|
drop_last=True, num_workers=0)
|
for testbatch in dataloader:
|
break
|
dataset = profile.get_dataset()
|
print("len(dataset)=", len(dataset))
|
if hasattr(profile, "get_dataset_sampler"):
|
dataloader = torch.utils.data.DataLoader(dataset,
|
batch_size=profile.batchsize,
|
sampler=profile.get_dataset_sampler(), drop_last=True,
|
num_workers=8, persistent_workers=True)
|
else:
|
dataloader = torch.utils.data.DataLoader(dataset,
|
batch_size=profile.batchsize, shuffle=True, drop_last=True,
|
num_workers=8, persistent_workers=True)
|
print("Dataset instantiated ({:.2f} s)".format(time.time() - starttime))
|
# data writer
|
starttime = time.time()
|
if not args.noprogress:
|
writer = progressprof.get_writer()
|
print("Writer instantiated ({:.2f} s)".format(time.time() - starttime))
|
# build autoencoder
|
starttime = time.time()
|
ae = profile.get_autoencoder(dataset)
|
ae = ae.to("cuda").train()
|
if args.resume:
|
ae.load_state_dict(torch.load("{}/aeparams.pt".format(outpath)), strict=False)
|
print("Autoencoder instantiated ({:.2f} s)".format(time.time() - starttime))
|
# compile with jit
|
if args.scripting:
|
ae.encoder = torch.jit.script(ae.encoder)
|
ae.decoder = torch.jit.script(ae.decoder)
|
# build optimizer
|
starttime = time.time()
|
optim = profile.get_optimizer(ae)
|
if args.resume:
|
optim.load_state_dict(torch.load("{}/optimparams.pt".format(outpath)))
|
lossweights = profile.get_loss_weights()
|
print("Optimizer instantiated ({:.2f} s)".format(time.time() - starttime))
|
# train
|
starttime = time.time()
|
evalpoints = np.geomspace(1., profile.maxiter, 100).astype(np.int32)
|
prevloss = np.inf
|
for epoch in range(10000):
|
for data in dataloader:
|
# forward
|
cudadata = utils.tocuda(data)
|
output, losses = ae(
|
trainiter=iternum,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.