text
stringlengths 1
93.6k
|
|---|
outputlist=profile.get_outputlist(),
|
losslist=lossweights.keys(),
|
**cudadata,
|
**(profile.get_ae_args() if hasattr(profile, "get_ae_args") else {}))
|
# compute final loss
|
loss = sum([
|
lossweights[k] * (torch.sum(v[0]) / torch.sum(v[1]) if isinstance(v, tuple) else torch.mean(v))
|
for k, v in losses.items()])
|
# print current information
|
print("Iteration {}: loss = {:.5f}, ".format(iternum, float(loss.item())) +
|
", ".join(["{} = {:.5f}".format(k,
|
float(torch.sum(v[0]) / torch.sum(v[1]) if isinstance(v, tuple) else torch.mean(v)))
|
for k, v in losses.items()]), end="")
|
if iternum % 10 == 0:
|
endtime = time.time()
|
ips = 10. / (endtime - starttime)
|
print(", iter/sec = {:.2f}".format(ips))
|
starttime = time.time()
|
else:
|
print()
|
# update parameters
|
optim.zero_grad()
|
loss.backward()
|
optim.step()
|
# compute evaluation output
|
if not args.noprogress and iternum in evalpoints:
|
with torch.no_grad():
|
testoutput, _ = ae(
|
trainiter=iternum,
|
outputlist=progressprof.get_outputlist() + ["rmtime"],
|
losslist=[],
|
**utils.tocuda(testbatch),
|
**progressprof.get_ae_args())
|
print("Iteration {}: rmtime = {:.5f}".format(iternum, testoutput["rmtime"] * 1000.))
|
writer.batch(iternum, iternum * profile.batchsize + torch.arange(0), **testbatch, **testoutput)
|
if not args.nostab and (loss.item() > 400 * prevloss or not np.isfinite(loss.item())):
|
print("unstable loss function; resetting")
|
ae.load_state_dict(torch.load("{}/aeparams.pt".format(outpath)), strict=False)
|
optim = profile.get_optimizer(ae)
|
prevloss = loss.item()
|
# save intermediate results
|
if iternum % 1000 == 0:
|
torch.save(ae.state_dict(), "{}/aeparams.pt".format(outpath))
|
torch.save(optim.state_dict(), "{}/optimparams.pt".format(outpath))
|
if iternum >= profile.maxiter:
|
break
|
iternum += 1
|
if iternum >= profile.maxiter:
|
break
|
# cleanup
|
writer.finalize()
|
# <FILESEP>
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
"""
|
This script is a simplified version of the training script in detectron2/tools.
|
"""
|
from functools import partial
|
import copy
|
import itertools
|
import logging
|
import os
|
from collections import OrderedDict
|
from typing import Any, Dict, List, Set
|
import detectron2.utils.comm as comm
|
import torch
|
from detectron2.checkpoint import DetectionCheckpointer
|
from detectron2.config import get_cfg
|
from detectron2.data import MetadataCatalog
|
from detectron2.engine import (
|
DefaultTrainer,
|
default_argument_parser,
|
default_setup,
|
launch,
|
)
|
from detectron2.evaluation import (
|
CityscapesSemSegEvaluator,
|
COCOPanopticEvaluator,
|
DatasetEvaluators,
|
verify_results,
|
)
|
from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler
|
from detectron2.solver.build import maybe_add_gradient_clipping
|
from detectron2.utils.logger import setup_logger
|
from detectron2.utils.events import CommonMetricPrinter, JSONWriter
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.