| | import copy |
| | import torch |
| | import torch.nn as nn |
| | from torch.nn.modules.batchnorm import _BatchNorm |
| |
|
| | from mmengine.dataset import Compose |
| | from mmengine.registry import MODELS as MM_BACKBONES |
| | from mmengine.runner import load_checkpoint |
| |
|
| | BACKBONES = MM_BACKBONES |
| |
|
| |
|
| | class BackboneWrapper(nn.Module): |
| | def __init__(self, cfg): |
| | super(BackboneWrapper, self).__init__() |
| | custom_cfg = cfg.custom |
| | model_cfg = copy.deepcopy(cfg) |
| | model_cfg.pop("custom") |
| |
|
| | |
| | self.model = BACKBONES.build(model_cfg) |
| |
|
| | |
| | |
| | if hasattr(custom_cfg, "pretrain") and custom_cfg.pretrain is not None: |
| | load_checkpoint(self.model, custom_cfg.pretrain, map_location="cpu") |
| | else: |
| | print( |
| | "Warning: no pretrain path is provided, the backbone will be randomly initialized,\ |
| | unless you have initialized the weights in the model.py." |
| | ) |
| |
|
| | |
| | if hasattr(custom_cfg, "pre_processing_pipeline"): |
| | self.pre_processing_pipeline = Compose(custom_cfg.pre_processing_pipeline) |
| | else: |
| | self.pre_processing_pipeline = None |
| |
|
| | |
| | if hasattr(custom_cfg, "post_processing_pipeline"): |
| | self.post_processing_pipeline = Compose(custom_cfg.post_processing_pipeline) |
| | else: |
| | self.post_processing_pipeline = None |
| |
|
| | |
| | self.norm_eval = getattr(custom_cfg, "norm_eval", True) |
| |
|
| | |
| | self.freeze_backbone = getattr(custom_cfg, "freeze_backbone", False) |
| |
|
| | print("freeze_backbone: {}, norm_eval: {}".format(self.freeze_backbone, self.norm_eval)) |
| |
|
| | def forward(self, frames, masks=None): |
| | |
| |
|
| | |
| | |
| |
|
| | |
| | self.set_norm_layer() |
| |
|
| | |
| | frames, _ = self.model.data_preprocessor.preprocess( |
| | self.tensor_to_list(frames), |
| | data_samples=None, |
| | training=False, |
| | ) |
| |
|
| | |
| | if self.pre_processing_pipeline is not None: |
| | frames = self.pre_processing_pipeline(dict(frames=frames))["frames"] |
| |
|
| | |
| | batches, num_segs = frames.shape[0:2] |
| | frames = frames.flatten(0, 1) |
| |
|
| | |
| | if self.freeze_backbone: |
| | with torch.no_grad(): |
| | features = self.model.backbone(frames) |
| |
|
| | else: |
| | features = self.model.backbone(frames) |
| |
|
| | |
| | if isinstance(features, (tuple, list)): |
| | features = torch.cat([self.unflatten_and_pool_features(f, batches, num_segs) for f in features], dim=1) |
| | else: |
| | features = self.unflatten_and_pool_features(features, batches, num_segs) |
| |
|
| | |
| | if masks is not None and features.dim() == 3: |
| | features = features * masks.unsqueeze(1).detach().float() |
| |
|
| | |
| | features = features.to(torch.float32) |
| | return features |
| |
|
| | def tensor_to_list(self, tensor): |
| | return [t for t in tensor] |
| |
|
| | def unflatten_and_pool_features(self, features, batches, num_segs): |
| | |
| | features = features.unflatten(dim=0, sizes=(batches, num_segs)) |
| |
|
| | |
| | if self.post_processing_pipeline is not None: |
| | features = self.post_processing_pipeline(dict(feats=features))["feats"] |
| | return features |
| |
|
| | def set_norm_layer(self): |
| | if self.norm_eval: |
| | for m in self.modules(): |
| | if isinstance(m, (nn.LayerNorm, nn.GroupNorm, _BatchNorm)): |
| | m.eval() |
| |
|
| | for param in m.parameters(): |
| | param.requires_grad = False |
| |
|