text
stringlengths 1
93.6k
|
|---|
defaults["lr"] = cfg.SOLVER.BASE_LR
|
defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY
|
norm_module_types = (
|
torch.nn.BatchNorm1d,
|
torch.nn.BatchNorm2d,
|
torch.nn.BatchNorm3d,
|
torch.nn.SyncBatchNorm,
|
# NaiveSyncBatchNorm inherits from BatchNorm2d
|
torch.nn.GroupNorm,
|
torch.nn.InstanceNorm1d,
|
torch.nn.InstanceNorm2d,
|
torch.nn.InstanceNorm3d,
|
torch.nn.LayerNorm,
|
torch.nn.LocalResponseNorm,
|
)
|
params: List[Dict[str, Any]] = []
|
memo: Set[torch.nn.parameter.Parameter] = set()
|
for module_name, module in model.named_modules():
|
for module_param_name, value in module.named_parameters(recurse=False):
|
if not value.requires_grad:
|
continue
|
# Avoid duplicating parameters
|
if value in memo:
|
continue
|
memo.add(value)
|
hyperparams = copy.copy(defaults)
|
if "backbone" in module_name:
|
hyperparams["lr"] = (
|
hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER
|
)
|
if (
|
"relative_position_bias_table" in module_param_name
|
or "absolute_pos_embed" in module_param_name
|
):
|
# print(module_param_name)
|
hyperparams["weight_decay"] = 0.0
|
if isinstance(module, norm_module_types):
|
hyperparams["weight_decay"] = weight_decay_norm
|
if isinstance(module, torch.nn.Embedding):
|
hyperparams["weight_decay"] = weight_decay_embed
|
params.append({"params": [value], **hyperparams})
|
def maybe_add_full_model_gradient_clipping(optim):
|
# detectron2 doesn't have full model gradient clipping now
|
clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
|
enable = (
|
cfg.SOLVER.CLIP_GRADIENTS.ENABLED
|
and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model"
|
and clip_norm_val > 0.0
|
)
|
class FullModelGradientClippingOptimizer(optim):
|
def step(self, closure=None):
|
all_params = itertools.chain(
|
*[x["params"] for x in self.param_groups]
|
)
|
torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
|
super().step(closure=closure)
|
return FullModelGradientClippingOptimizer if enable else optim
|
optimizer_type = cfg.SOLVER.OPTIMIZER
|
if optimizer_type == "SGD":
|
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)(
|
params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM
|
)
|
elif optimizer_type == "ADAMW":
|
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)(
|
params, cfg.SOLVER.BASE_LR
|
)
|
else:
|
raise NotImplementedError(f"no optimizer type {optimizer_type}")
|
if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model":
|
optimizer = maybe_add_gradient_clipping(cfg, optimizer)
|
return optimizer
|
@classmethod
|
def test_with_TTA(cls, cfg, model):
|
logger = logging.getLogger("detectron2.trainer")
|
# In the end of training, run an evaluation with TTA.
|
logger.info("Running inference with test-time augmentation ...")
|
model = SemanticSegmentorWithTTA(cfg, model)
|
evaluators = [
|
cls.build_evaluator(
|
cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA")
|
)
|
for name in cfg.DATASETS.TEST
|
]
|
res = cls.test(cfg, model, evaluators)
|
res = OrderedDict({k + "_TTA": v for k, v in res.items()})
|
return res
|
def setup(args):
|
"""
|
Create configs and perform basic setups.
|
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.