id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
3,569
import logging import numpy as np from typing import List, Union import pycocotools.mask as mask_util import torch from PIL import Image from detectron2.structures import ( BitMasks, Boxes, BoxMode, Instances, Keypoints, PolygonMasks, RotatedBoxes, polygons_to_bitmask, ) from detectron2.utils.file_io import PathManager from . import transforms as T from .catalog import MetadataCatalog The provided code snippet includes necessary dependencies for implementing the `annotations_to_instances` function. Write a Python function `def annotations_to_instances(annos, image_size, mask_format="polygon")` to solve the following problem: Create an :class:`Instances` object used by the models, from instance annotations in the dataset dict. Args: annos (list[dict]): a list of instance annotations in one image, each element for one instance. image_size (tuple): height, width Returns: Instances: It will contain fields "gt_boxes", "gt_classes", "gt_masks", "gt_keypoints", if they can be obtained from `annos`. This is the format that builtin models expect. Here is the function: def annotations_to_instances(annos, image_size, mask_format="polygon"): """ Create an :class:`Instances` object used by the models, from instance annotations in the dataset dict. Args: annos (list[dict]): a list of instance annotations in one image, each element for one instance. image_size (tuple): height, width Returns: Instances: It will contain fields "gt_boxes", "gt_classes", "gt_masks", "gt_keypoints", if they can be obtained from `annos`. This is the format that builtin models expect. """ boxes = ( np.stack( [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos] ) if len(annos) else np.zeros((0, 4)) ) target = Instances(image_size) target.gt_boxes = Boxes(boxes) classes = [int(obj["category_id"]) for obj in annos] classes = torch.tensor(classes, dtype=torch.int64) target.gt_classes = classes if len(annos) and "segmentation" in annos[0]: segms = [obj["segmentation"] for obj in annos] if mask_format == "polygon": try: masks = PolygonMasks(segms) except ValueError as e: raise ValueError( "Failed to use mask_format=='polygon' from the given annotations!" ) from e else: assert mask_format == "bitmask", mask_format masks = [] for segm in segms: if isinstance(segm, list): # polygon masks.append(polygons_to_bitmask(segm, *image_size)) elif isinstance(segm, dict): # COCO RLE masks.append(mask_util.decode(segm)) elif isinstance(segm, np.ndarray): assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format( segm.ndim ) # mask array masks.append(segm) else: raise ValueError( "Cannot convert segmentation of type '{}' to BitMasks!" "Supported types are: polygons as list[list[float] or ndarray]," " COCO-style RLE as a dict, or a binary segmentation mask " " in a 2D numpy array of shape HxW.".format(type(segm)) ) # torch.from_numpy does not support array with negative stride. masks = BitMasks( torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks]) ) target.gt_masks = masks if len(annos) and "keypoints" in annos[0]: kpts = [obj.get("keypoints", []) for obj in annos] target.gt_keypoints = Keypoints(kpts) return target
Create an :class:`Instances` object used by the models, from instance annotations in the dataset dict. Args: annos (list[dict]): a list of instance annotations in one image, each element for one instance. image_size (tuple): height, width Returns: Instances: It will contain fields "gt_boxes", "gt_classes", "gt_masks", "gt_keypoints", if they can be obtained from `annos`. This is the format that builtin models expect.
3,570
import logging import numpy as np from typing import List, Union import pycocotools.mask as mask_util import torch from PIL import Image from detectron2.structures import ( BitMasks, Boxes, BoxMode, Instances, Keypoints, PolygonMasks, RotatedBoxes, polygons_to_bitmask, ) from detectron2.utils.file_io import PathManager from . import transforms as T from .catalog import MetadataCatalog The provided code snippet includes necessary dependencies for implementing the `annotations_to_instances_rotated` function. Write a Python function `def annotations_to_instances_rotated(annos, image_size)` to solve the following problem: Create an :class:`Instances` object used by the models, from instance annotations in the dataset dict. Compared to `annotations_to_instances`, this function is for rotated boxes only Args: annos (list[dict]): a list of instance annotations in one image, each element for one instance. image_size (tuple): height, width Returns: Instances: Containing fields "gt_boxes", "gt_classes", if they can be obtained from `annos`. This is the format that builtin models expect. Here is the function: def annotations_to_instances_rotated(annos, image_size): """ Create an :class:`Instances` object used by the models, from instance annotations in the dataset dict. Compared to `annotations_to_instances`, this function is for rotated boxes only Args: annos (list[dict]): a list of instance annotations in one image, each element for one instance. image_size (tuple): height, width Returns: Instances: Containing fields "gt_boxes", "gt_classes", if they can be obtained from `annos`. This is the format that builtin models expect. """ boxes = [obj["bbox"] for obj in annos] target = Instances(image_size) boxes = target.gt_boxes = RotatedBoxes(boxes) boxes.clip(image_size) classes = [obj["category_id"] for obj in annos] classes = torch.tensor(classes, dtype=torch.int64) target.gt_classes = classes return target
Create an :class:`Instances` object used by the models, from instance annotations in the dataset dict. Compared to `annotations_to_instances`, this function is for rotated boxes only Args: annos (list[dict]): a list of instance annotations in one image, each element for one instance. image_size (tuple): height, width Returns: Instances: Containing fields "gt_boxes", "gt_classes", if they can be obtained from `annos`. This is the format that builtin models expect.
3,571
import logging import numpy as np from typing import List, Union import pycocotools.mask as mask_util import torch from PIL import Image from detectron2.structures import ( BitMasks, Boxes, BoxMode, Instances, Keypoints, PolygonMasks, RotatedBoxes, polygons_to_bitmask, ) from detectron2.utils.file_io import PathManager from . import transforms as T from .catalog import MetadataCatalog The provided code snippet includes necessary dependencies for implementing the `filter_empty_instances` function. Write a Python function `def filter_empty_instances( instances, by_box=True, by_mask=True, box_threshold=1e-5, return_mask=False )` to solve the following problem: Filter out empty instances in an `Instances` object. Args: instances (Instances): by_box (bool): whether to filter out instances with empty boxes by_mask (bool): whether to filter out instances with empty masks box_threshold (float): minimum width and height to be considered non-empty return_mask (bool): whether to return boolean mask of filtered instances Returns: Instances: the filtered instances. tensor[bool], optional: boolean mask of filtered instances Here is the function: def filter_empty_instances( instances, by_box=True, by_mask=True, box_threshold=1e-5, return_mask=False ): """ Filter out empty instances in an `Instances` object. Args: instances (Instances): by_box (bool): whether to filter out instances with empty boxes by_mask (bool): whether to filter out instances with empty masks box_threshold (float): minimum width and height to be considered non-empty return_mask (bool): whether to return boolean mask of filtered instances Returns: Instances: the filtered instances. tensor[bool], optional: boolean mask of filtered instances """ assert by_box or by_mask r = [] if by_box: r.append(instances.gt_boxes.nonempty(threshold=box_threshold)) if instances.has("gt_masks") and by_mask: r.append(instances.gt_masks.nonempty()) # TODO: can also filter visible keypoints if not r: return instances m = r[0] for x in r[1:]: m = m & x if return_mask: return instances[m], m return instances[m]
Filter out empty instances in an `Instances` object. Args: instances (Instances): by_box (bool): whether to filter out instances with empty boxes by_mask (bool): whether to filter out instances with empty masks box_threshold (float): minimum width and height to be considered non-empty return_mask (bool): whether to return boolean mask of filtered instances Returns: Instances: the filtered instances. tensor[bool], optional: boolean mask of filtered instances
3,572
import logging import numpy as np from typing import List, Union import pycocotools.mask as mask_util import torch from PIL import Image from detectron2.structures import ( BitMasks, Boxes, BoxMode, Instances, Keypoints, PolygonMasks, RotatedBoxes, polygons_to_bitmask, ) from detectron2.utils.file_io import PathManager from . import transforms as T from .catalog import MetadataCatalog def check_metadata_consistency(key, dataset_names): """ Check that the datasets have consistent metadata. Args: key (str): a metadata key dataset_names (list[str]): a list of dataset names Raises: AttributeError: if the key does not exist in the metadata ValueError: if the given datasets do not have the same metadata values defined by key """ if len(dataset_names) == 0: return logger = logging.getLogger(__name__) entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names] for idx, entry in enumerate(entries_per_dataset): if entry != entries_per_dataset[0]: logger.error( "Metadata '{}' for dataset '{}' is '{}'".format(key, dataset_names[idx], str(entry)) ) logger.error( "Metadata '{}' for dataset '{}' is '{}'".format( key, dataset_names[0], str(entries_per_dataset[0]) ) ) raise ValueError("Datasets have different metadata '{}'!".format(key)) MetadataCatalog = _MetadataCatalog() MetadataCatalog.__doc__ = ( _MetadataCatalog.__doc__ + """ .. automethod:: detectron2.data.catalog.MetadataCatalog.get """ ) The provided code snippet includes necessary dependencies for implementing the `create_keypoint_hflip_indices` function. Write a Python function `def create_keypoint_hflip_indices(dataset_names: Union[str, List[str]]) -> List[int]` to solve the following problem: Args: dataset_names: list of dataset names Returns: list[int]: a list of size=#keypoints, storing the horizontally-flipped keypoint indices. Here is the function: def create_keypoint_hflip_indices(dataset_names: Union[str, List[str]]) -> List[int]: """ Args: dataset_names: list of dataset names Returns: list[int]: a list of size=#keypoints, storing the horizontally-flipped keypoint indices. """ if isinstance(dataset_names, str): dataset_names = [dataset_names] check_metadata_consistency("keypoint_names", dataset_names) check_metadata_consistency("keypoint_flip_map", dataset_names) meta = MetadataCatalog.get(dataset_names[0]) names = meta.keypoint_names # TODO flip -> hflip flip_map = dict(meta.keypoint_flip_map) flip_map.update({v: k for k, v in flip_map.items()}) flipped_names = [i if i not in flip_map else flip_map[i] for i in names] flip_indices = [names.index(i) for i in flipped_names] return flip_indices
Args: dataset_names: list of dataset names Returns: list[int]: a list of size=#keypoints, storing the horizontally-flipped keypoint indices.
3,573
import logging import numpy as np from typing import List, Union import pycocotools.mask as mask_util import torch from PIL import Image from detectron2.structures import ( BitMasks, Boxes, BoxMode, Instances, Keypoints, PolygonMasks, RotatedBoxes, polygons_to_bitmask, ) from detectron2.utils.file_io import PathManager from . import transforms as T from .catalog import MetadataCatalog The provided code snippet includes necessary dependencies for implementing the `gen_crop_transform_with_instance` function. Write a Python function `def gen_crop_transform_with_instance(crop_size, image_size, instance)` to solve the following problem: Generate a CropTransform so that the cropping region contains the center of the given instance. Args: crop_size (tuple): h, w in pixels image_size (tuple): h, w instance (dict): an annotation dict of one instance, in Detectron2's dataset format. Here is the function: def gen_crop_transform_with_instance(crop_size, image_size, instance): """ Generate a CropTransform so that the cropping region contains the center of the given instance. Args: crop_size (tuple): h, w in pixels image_size (tuple): h, w instance (dict): an annotation dict of one instance, in Detectron2's dataset format. """ crop_size = np.asarray(crop_size, dtype=np.int32) bbox = BoxMode.convert(instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS) center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5 assert ( image_size[0] >= center_yx[0] and image_size[1] >= center_yx[1] ), "The annotation bounding box is outside of the image!" assert ( image_size[0] >= crop_size[0] and image_size[1] >= crop_size[1] ), "Crop size is larger than image size!" min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0) max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0) max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32)) y0 = np.random.randint(min_yx[0], max_yx[0] + 1) x0 = np.random.randint(min_yx[1], max_yx[1] + 1) return T.CropTransform(x0, y0, crop_size[1], crop_size[0])
Generate a CropTransform so that the cropping region contains the center of the given instance. Args: crop_size (tuple): h, w in pixels image_size (tuple): h, w instance (dict): an annotation dict of one instance, in Detectron2's dataset format.
3,574
import logging import numpy as np from typing import List, Union import pycocotools.mask as mask_util import torch from PIL import Image from detectron2.structures import ( BitMasks, Boxes, BoxMode, Instances, Keypoints, PolygonMasks, RotatedBoxes, polygons_to_bitmask, ) from detectron2.utils.file_io import PathManager from . import transforms as T from .catalog import MetadataCatalog The provided code snippet includes necessary dependencies for implementing the `build_augmentation` function. Write a Python function `def build_augmentation(cfg, is_train)` to solve the following problem: Create a list of default :class:`Augmentation` from config. Now it includes resizing and flipping. Returns: list[Augmentation] Here is the function: def build_augmentation(cfg, is_train): """ Create a list of default :class:`Augmentation` from config. Now it includes resizing and flipping. Returns: list[Augmentation] """ if is_train: min_size = cfg.INPUT.MIN_SIZE_TRAIN max_size = cfg.INPUT.MAX_SIZE_TRAIN sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING else: min_size = cfg.INPUT.MIN_SIZE_TEST max_size = cfg.INPUT.MAX_SIZE_TEST sample_style = "choice" augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)] if is_train and cfg.INPUT.RANDOM_FLIP != "none": augmentation.append( T.RandomFlip( horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal", vertical=cfg.INPUT.RANDOM_FLIP == "vertical", ) ) return augmentation
Create a list of default :class:`Augmentation` from config. Now it includes resizing and flipping. Returns: list[Augmentation]
3,575
import inspect import numpy as np import pprint from typing import Any, List, Optional, Tuple, Union from fvcore.transforms.transform import Transform, TransformList def _check_img_dtype(img): assert isinstance(img, np.ndarray), "[Augmentation] Needs an numpy array, but got a {}!".format( type(img) ) assert not isinstance(img.dtype, np.integer) or ( img.dtype == np.uint8 ), "[Augmentation] Got image of type {}, use uint8 or floating points instead!".format( img.dtype ) assert img.ndim in [2, 3], img.ndim
null
3,576
import inspect import numpy as np import pprint from typing import Any, List, Optional, Tuple, Union from fvcore.transforms.transform import Transform, TransformList The provided code snippet includes necessary dependencies for implementing the `_get_aug_input_args` function. Write a Python function `def _get_aug_input_args(aug, aug_input) -> List[Any]` to solve the following problem: Get the arguments to be passed to ``aug.get_transform`` from the input ``aug_input``. Here is the function: def _get_aug_input_args(aug, aug_input) -> List[Any]: """ Get the arguments to be passed to ``aug.get_transform`` from the input ``aug_input``. """ if aug.input_args is None: # Decide what attributes are needed automatically prms = list(inspect.signature(aug.get_transform).parameters.items()) # The default behavior is: if there is one parameter, then its "image" # (work automatically for majority of use cases, and also avoid BC breaking), # Otherwise, use the argument names. if len(prms) == 1: names = ("image",) else: names = [] for name, prm in prms: if prm.kind in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD): raise TypeError( f""" \ The default implementation of `{type(aug)}.__call__` does not allow \ `{type(aug)}.get_transform` to use variable-length arguments (*args, **kwargs)! \ If arguments are unknown, reimplement `__call__` instead. \ """ ) names.append(name) aug.input_args = tuple(names) args = [] for f in aug.input_args: try: args.append(getattr(aug_input, f)) except AttributeError as e: raise AttributeError( f"{type(aug)}.get_transform needs input attribute '{f}', " f"but it is not an attribute of {type(aug_input)}!" ) from e return args
Get the arguments to be passed to ``aug.get_transform`` from the input ``aug_input``.
3,577
import inspect import numpy as np import pprint from typing import Any, List, Optional, Tuple, Union from fvcore.transforms.transform import Transform, TransformList class Augmentation: """ Augmentation defines (often random) policies/strategies to generate :class:`Transform` from data. It is often used for pre-processing of input data. A "policy" that generates a :class:`Transform` may, in the most general case, need arbitrary information from input data in order to determine what transforms to apply. Therefore, each :class:`Augmentation` instance defines the arguments needed by its :meth:`get_transform` method. When called with the positional arguments, the :meth:`get_transform` method executes the policy. Note that :class:`Augmentation` defines the policies to create a :class:`Transform`, but not how to execute the actual transform operations to those data. Its :meth:`__call__` method will use :meth:`AugInput.transform` to execute the transform. The returned `Transform` object is meant to describe deterministic transformation, which means it can be re-applied on associated data, e.g. the geometry of an image and its segmentation masks need to be transformed together. (If such re-application is not needed, then determinism is not a crucial requirement.) """ input_args: Optional[Tuple[str]] = None """ Stores the attribute names needed by :meth:`get_transform`, e.g. ``("image", "sem_seg")``. By default, it is just a tuple of argument names in :meth:`self.get_transform`, which often only contain "image". As long as the argument name convention is followed, there is no need for users to touch this attribute. """ def _init(self, params=None): if params: for k, v in params.items(): if k != "self" and not k.startswith("_"): setattr(self, k, v) def get_transform(self, *args) -> Transform: """ Execute the policy based on input data, and decide what transform to apply to inputs. Args: args: Any fixed-length positional arguments. By default, the name of the arguments should exist in the :class:`AugInput` to be used. Returns: Transform: Returns the deterministic transform to apply to the input. Examples: :: class MyAug: # if a policy needs to know both image and semantic segmentation def get_transform(image, sem_seg) -> T.Transform: pass tfm: Transform = MyAug().get_transform(image, sem_seg) new_image = tfm.apply_image(image) Notes: Users can freely use arbitrary new argument names in custom :meth:`get_transform` method, as long as they are available in the input data. In detectron2 we use the following convention: * image: (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or floating point in range [0, 1] or [0, 255]. * boxes: (N,4) ndarray of float32. It represents the instance bounding boxes of N instances. Each is in XYXY format in unit of absolute coordinates. * sem_seg: (H,W) ndarray of type uint8. Each element is an integer label of pixel. We do not specify convention for other types and do not include builtin :class:`Augmentation` that uses other types in detectron2. """ raise NotImplementedError def __call__(self, aug_input) -> Transform: """ Augment the given `aug_input` **in-place**, and return the transform that's used. This method will be called to apply the augmentation. In most augmentation, it is enough to use the default implementation, which calls :meth:`get_transform` using the inputs. But a subclass can overwrite it to have more complicated logic. Args: aug_input (AugInput): an object that has attributes needed by this augmentation (defined by ``self.get_transform``). Its ``transform`` method will be called to in-place transform it. Returns: Transform: the transform that is applied on the input. """ args = _get_aug_input_args(self, aug_input) tfm = self.get_transform(*args) assert isinstance(tfm, (Transform, TransformList)), ( f"{type(self)}.get_transform must return an instance of Transform! " f"Got {type(tfm)} instead." ) aug_input.transform(tfm) return tfm def _rand_range(self, low=1.0, high=None, size=None): """ Uniform float random number between low and high. """ if high is None: low, high = 0, low if size is None: size = [] return np.random.uniform(low, high, size) def __repr__(self): """ Produce something like: "MyAugmentation(field1={self.field1}, field2={self.field2})" """ try: sig = inspect.signature(self.__init__) classname = type(self).__name__ argstr = [] for name, param in sig.parameters.items(): assert ( param.kind != param.VAR_POSITIONAL and param.kind != param.VAR_KEYWORD ), "The default __repr__ doesn't support *args or **kwargs" assert hasattr(self, name), ( "Attribute {} not found! " "Default __repr__ only works if attributes match the constructor.".format(name) ) attr = getattr(self, name) default = param.default if default is attr: continue attr_str = pprint.pformat(attr) if "\n" in attr_str: # don't show it if pformat decides to use >1 lines attr_str = "..." argstr.append("{}={}".format(name, attr_str)) return "{}({})".format(classname, ", ".join(argstr)) except AssertionError: return super().__repr__() __str__ = __repr__ The provided code snippet includes necessary dependencies for implementing the `_transform_to_aug` function. Write a Python function `def _transform_to_aug(tfm_or_aug)` to solve the following problem: Wrap Transform into Augmentation. Private, used internally to implement augmentations. Here is the function: def _transform_to_aug(tfm_or_aug): """ Wrap Transform into Augmentation. Private, used internally to implement augmentations. """ assert isinstance(tfm_or_aug, (Transform, Augmentation)), tfm_or_aug if isinstance(tfm_or_aug, Augmentation): return tfm_or_aug else: class _TransformToAug(Augmentation): def __init__(self, tfm: Transform): self.tfm = tfm def get_transform(self, *args): return self.tfm def __repr__(self): return repr(self.tfm) __str__ = __repr__ return _TransformToAug(tfm_or_aug)
Wrap Transform into Augmentation. Private, used internally to implement augmentations.
3,578
import inspect import numpy as np import pprint from typing import Any, List, Optional, Tuple, Union from fvcore.transforms.transform import Transform, TransformList class Augmentation: """ Augmentation defines (often random) policies/strategies to generate :class:`Transform` from data. It is often used for pre-processing of input data. A "policy" that generates a :class:`Transform` may, in the most general case, need arbitrary information from input data in order to determine what transforms to apply. Therefore, each :class:`Augmentation` instance defines the arguments needed by its :meth:`get_transform` method. When called with the positional arguments, the :meth:`get_transform` method executes the policy. Note that :class:`Augmentation` defines the policies to create a :class:`Transform`, but not how to execute the actual transform operations to those data. Its :meth:`__call__` method will use :meth:`AugInput.transform` to execute the transform. The returned `Transform` object is meant to describe deterministic transformation, which means it can be re-applied on associated data, e.g. the geometry of an image and its segmentation masks need to be transformed together. (If such re-application is not needed, then determinism is not a crucial requirement.) """ input_args: Optional[Tuple[str]] = None """ Stores the attribute names needed by :meth:`get_transform`, e.g. ``("image", "sem_seg")``. By default, it is just a tuple of argument names in :meth:`self.get_transform`, which often only contain "image". As long as the argument name convention is followed, there is no need for users to touch this attribute. """ def _init(self, params=None): if params: for k, v in params.items(): if k != "self" and not k.startswith("_"): setattr(self, k, v) def get_transform(self, *args) -> Transform: """ Execute the policy based on input data, and decide what transform to apply to inputs. Args: args: Any fixed-length positional arguments. By default, the name of the arguments should exist in the :class:`AugInput` to be used. Returns: Transform: Returns the deterministic transform to apply to the input. Examples: :: class MyAug: # if a policy needs to know both image and semantic segmentation def get_transform(image, sem_seg) -> T.Transform: pass tfm: Transform = MyAug().get_transform(image, sem_seg) new_image = tfm.apply_image(image) Notes: Users can freely use arbitrary new argument names in custom :meth:`get_transform` method, as long as they are available in the input data. In detectron2 we use the following convention: * image: (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or floating point in range [0, 1] or [0, 255]. * boxes: (N,4) ndarray of float32. It represents the instance bounding boxes of N instances. Each is in XYXY format in unit of absolute coordinates. * sem_seg: (H,W) ndarray of type uint8. Each element is an integer label of pixel. We do not specify convention for other types and do not include builtin :class:`Augmentation` that uses other types in detectron2. """ raise NotImplementedError def __call__(self, aug_input) -> Transform: """ Augment the given `aug_input` **in-place**, and return the transform that's used. This method will be called to apply the augmentation. In most augmentation, it is enough to use the default implementation, which calls :meth:`get_transform` using the inputs. But a subclass can overwrite it to have more complicated logic. Args: aug_input (AugInput): an object that has attributes needed by this augmentation (defined by ``self.get_transform``). Its ``transform`` method will be called to in-place transform it. Returns: Transform: the transform that is applied on the input. """ args = _get_aug_input_args(self, aug_input) tfm = self.get_transform(*args) assert isinstance(tfm, (Transform, TransformList)), ( f"{type(self)}.get_transform must return an instance of Transform! " f"Got {type(tfm)} instead." ) aug_input.transform(tfm) return tfm def _rand_range(self, low=1.0, high=None, size=None): """ Uniform float random number between low and high. """ if high is None: low, high = 0, low if size is None: size = [] return np.random.uniform(low, high, size) def __repr__(self): """ Produce something like: "MyAugmentation(field1={self.field1}, field2={self.field2})" """ try: sig = inspect.signature(self.__init__) classname = type(self).__name__ argstr = [] for name, param in sig.parameters.items(): assert ( param.kind != param.VAR_POSITIONAL and param.kind != param.VAR_KEYWORD ), "The default __repr__ doesn't support *args or **kwargs" assert hasattr(self, name), ( "Attribute {} not found! " "Default __repr__ only works if attributes match the constructor.".format(name) ) attr = getattr(self, name) default = param.default if default is attr: continue attr_str = pprint.pformat(attr) if "\n" in attr_str: # don't show it if pformat decides to use >1 lines attr_str = "..." argstr.append("{}={}".format(name, attr_str)) return "{}({})".format(classname, ", ".join(argstr)) except AssertionError: return super().__repr__() __str__ = __repr__ class AugInput: """ Input that can be used with :meth:`Augmentation.__call__`. This is a standard implementation for the majority of use cases. This class provides the standard attributes **"image", "boxes", "sem_seg"** defined in :meth:`__init__` and they may be needed by different augmentations. Most augmentation policies do not need attributes beyond these three. After applying augmentations to these attributes (using :meth:`AugInput.transform`), the returned transforms can then be used to transform other data structures that users have. Examples: :: input = AugInput(image, boxes=boxes) tfms = augmentation(input) transformed_image = input.image transformed_boxes = input.boxes transformed_other_data = tfms.apply_other(other_data) An extended project that works with new data types may implement augmentation policies that need other inputs. An algorithm may need to transform inputs in a way different from the standard approach defined in this class. In those rare situations, users can implement a class similar to this class, that satify the following condition: * The input must provide access to these data in the form of attribute access (``getattr``). For example, if an :class:`Augmentation` to be applied needs "image" and "sem_seg" arguments, its input must have the attribute "image" and "sem_seg". * The input must have a ``transform(tfm: Transform) -> None`` method which in-place transforms all its attributes. """ # TODO maybe should support more builtin data types here def __init__( self, image: np.ndarray, *, boxes: Optional[np.ndarray] = None, sem_seg: Optional[np.ndarray] = None, ): """ Args: image (ndarray): (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or floating point in range [0, 1] or [0, 255]. The meaning of C is up to users. boxes (ndarray or None): Nx4 float32 boxes in XYXY_ABS mode sem_seg (ndarray or None): HxW uint8 semantic segmentation mask. Each element is an integer label of pixel. """ _check_img_dtype(image) self.image = image self.boxes = boxes self.sem_seg = sem_seg def transform(self, tfm: Transform) -> None: """ In-place transform all attributes of this class. By "in-place", it means after calling this method, accessing an attribute such as ``self.image`` will return transformed data. """ self.image = tfm.apply_image(self.image) if self.boxes is not None: self.boxes = tfm.apply_box(self.boxes) if self.sem_seg is not None: self.sem_seg = tfm.apply_segmentation(self.sem_seg) def apply_augmentations( self, augmentations: List[Union[Augmentation, Transform]] ) -> TransformList: """ Equivalent of ``AugmentationList(augmentations)(self)`` """ return AugmentationList(augmentations)(self) The provided code snippet includes necessary dependencies for implementing the `apply_augmentations` function. Write a Python function `def apply_augmentations(augmentations: List[Union[Transform, Augmentation]], inputs)` to solve the following problem: Use ``T.AugmentationList(augmentations)(inputs)`` instead. Here is the function: def apply_augmentations(augmentations: List[Union[Transform, Augmentation]], inputs): """ Use ``T.AugmentationList(augmentations)(inputs)`` instead. """ if isinstance(inputs, np.ndarray): # handle the common case of image-only Augmentation, also for backward compatibility image_only = True inputs = AugInput(inputs) else: image_only = False tfms = inputs.apply_augmentations(augmentations) return inputs.image if image_only else inputs, tfms
Use ``T.AugmentationList(augmentations)(inputs)`` instead.
3,579
import numpy as np import torch import torch.nn.functional as F from fvcore.transforms.transform import ( CropTransform, HFlipTransform, NoOpTransform, Transform, TransformList, ) from PIL import Image The provided code snippet includes necessary dependencies for implementing the `HFlip_rotated_box` function. Write a Python function `def HFlip_rotated_box(transform, rotated_boxes)` to solve the following problem: Apply the horizontal flip transform on rotated boxes. Args: rotated_boxes (ndarray): Nx5 floating point array of (x_center, y_center, width, height, angle_degrees) format in absolute coordinates. Here is the function: def HFlip_rotated_box(transform, rotated_boxes): """ Apply the horizontal flip transform on rotated boxes. Args: rotated_boxes (ndarray): Nx5 floating point array of (x_center, y_center, width, height, angle_degrees) format in absolute coordinates. """ # Transform x_center rotated_boxes[:, 0] = transform.width - rotated_boxes[:, 0] # Transform angle rotated_boxes[:, 4] = -rotated_boxes[:, 4] return rotated_boxes
Apply the horizontal flip transform on rotated boxes. Args: rotated_boxes (ndarray): Nx5 floating point array of (x_center, y_center, width, height, angle_degrees) format in absolute coordinates.
3,580
import numpy as np import torch import torch.nn.functional as F from fvcore.transforms.transform import ( CropTransform, HFlipTransform, NoOpTransform, Transform, TransformList, ) from PIL import Image The provided code snippet includes necessary dependencies for implementing the `Resize_rotated_box` function. Write a Python function `def Resize_rotated_box(transform, rotated_boxes)` to solve the following problem: Apply the resizing transform on rotated boxes. For details of how these (approximation) formulas are derived, please refer to :meth:`RotatedBoxes.scale`. Args: rotated_boxes (ndarray): Nx5 floating point array of (x_center, y_center, width, height, angle_degrees) format in absolute coordinates. Here is the function: def Resize_rotated_box(transform, rotated_boxes): """ Apply the resizing transform on rotated boxes. For details of how these (approximation) formulas are derived, please refer to :meth:`RotatedBoxes.scale`. Args: rotated_boxes (ndarray): Nx5 floating point array of (x_center, y_center, width, height, angle_degrees) format in absolute coordinates. """ scale_factor_x = transform.new_w * 1.0 / transform.w scale_factor_y = transform.new_h * 1.0 / transform.h rotated_boxes[:, 0] *= scale_factor_x rotated_boxes[:, 1] *= scale_factor_y theta = rotated_boxes[:, 4] * np.pi / 180.0 c = np.cos(theta) s = np.sin(theta) rotated_boxes[:, 2] *= np.sqrt(np.square(scale_factor_x * c) + np.square(scale_factor_y * s)) rotated_boxes[:, 3] *= np.sqrt(np.square(scale_factor_x * s) + np.square(scale_factor_y * c)) rotated_boxes[:, 4] = np.arctan2(scale_factor_x * s, scale_factor_y * c) * 180 / np.pi return rotated_boxes
Apply the resizing transform on rotated boxes. For details of how these (approximation) formulas are derived, please refer to :meth:`RotatedBoxes.scale`. Args: rotated_boxes (ndarray): Nx5 floating point array of (x_center, y_center, width, height, angle_degrees) format in absolute coordinates.
3,581
import contextlib import datetime import io import json import logging import numpy as np import os import shutil import pycocotools.mask as mask_util from fvcore.common.timer import Timer from iopath.common.file_io import file_lock from PIL import Image from detectron2.structures import Boxes, BoxMode, PolygonMasks, RotatedBoxes from detectron2.utils.file_io import PathManager from .. import DatasetCatalog, MetadataCatalog logger = logging.getLogger(__name__) def convert_to_coco_dict(dataset_name): """ Convert an instance detection/segmentation or keypoint detection dataset in detectron2's standard format into COCO json format. Generic dataset description can be found here: https://detectron2.readthedocs.io/tutorials/datasets.html#register-a-dataset COCO data format description can be found here: http://cocodataset.org/#format-data Args: dataset_name (str): name of the source dataset Must be registered in DatastCatalog and in detectron2's standard format. Must have corresponding metadata "thing_classes" Returns: coco_dict: serializable dict in COCO json format """ dataset_dicts = DatasetCatalog.get(dataset_name) metadata = MetadataCatalog.get(dataset_name) # unmap the category mapping ids for COCO if hasattr(metadata, "thing_dataset_id_to_contiguous_id"): reverse_id_mapping = {v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items()} reverse_id_mapper = lambda contiguous_id: reverse_id_mapping[contiguous_id] # noqa else: reverse_id_mapper = lambda contiguous_id: contiguous_id # noqa categories = [ {"id": reverse_id_mapper(id), "name": name} for id, name in enumerate(metadata.thing_classes) ] logger.info("Converting dataset dicts into COCO format") coco_images = [] coco_annotations = [] for image_id, image_dict in enumerate(dataset_dicts): coco_image = { "id": image_dict.get("image_id", image_id), "width": int(image_dict["width"]), "height": int(image_dict["height"]), "file_name": str(image_dict["file_name"]), } coco_images.append(coco_image) anns_per_image = image_dict.get("annotations", []) for annotation in anns_per_image: # create a new dict with only COCO fields coco_annotation = {} # COCO requirement: XYWH box format for axis-align and XYWHA for rotated bbox = annotation["bbox"] if isinstance(bbox, np.ndarray): if bbox.ndim != 1: raise ValueError(f"bbox has to be 1-dimensional. Got shape={bbox.shape}.") bbox = bbox.tolist() if len(bbox) not in [4, 5]: raise ValueError(f"bbox has to has length 4 or 5. Got {bbox}.") from_bbox_mode = annotation["bbox_mode"] to_bbox_mode = BoxMode.XYWH_ABS if len(bbox) == 4 else BoxMode.XYWHA_ABS bbox = BoxMode.convert(bbox, from_bbox_mode, to_bbox_mode) # COCO requirement: instance area if "segmentation" in annotation: # Computing areas for instances by counting the pixels segmentation = annotation["segmentation"] # TODO: check segmentation type: RLE, BinaryMask or Polygon if isinstance(segmentation, list): polygons = PolygonMasks([segmentation]) area = polygons.area()[0].item() elif isinstance(segmentation, dict): # RLE area = mask_util.area(segmentation).item() else: raise TypeError(f"Unknown segmentation type {type(segmentation)}!") else: # Computing areas using bounding boxes if to_bbox_mode == BoxMode.XYWH_ABS: bbox_xy = BoxMode.convert(bbox, to_bbox_mode, BoxMode.XYXY_ABS) area = Boxes([bbox_xy]).area()[0].item() else: area = RotatedBoxes([bbox]).area()[0].item() if "keypoints" in annotation: keypoints = annotation["keypoints"] # list[int] for idx, v in enumerate(keypoints): if idx % 3 != 2: # COCO's segmentation coordinates are floating points in [0, H or W], # but keypoint coordinates are integers in [0, H-1 or W-1] # For COCO format consistency we substract 0.5 # https://github.com/facebookresearch/detectron2/pull/175#issuecomment-551202163 keypoints[idx] = v - 0.5 if "num_keypoints" in annotation: num_keypoints = annotation["num_keypoints"] else: num_keypoints = sum(kp > 0 for kp in keypoints[2::3]) # COCO requirement: # linking annotations to images # "id" field must start with 1 coco_annotation["id"] = len(coco_annotations) + 1 coco_annotation["image_id"] = coco_image["id"] coco_annotation["bbox"] = [round(float(x), 3) for x in bbox] coco_annotation["area"] = float(area) coco_annotation["iscrowd"] = int(annotation.get("iscrowd", 0)) coco_annotation["category_id"] = int(reverse_id_mapper(annotation["category_id"])) # Add optional fields if "keypoints" in annotation: coco_annotation["keypoints"] = keypoints coco_annotation["num_keypoints"] = num_keypoints if "segmentation" in annotation: seg = coco_annotation["segmentation"] = annotation["segmentation"] if isinstance(seg, dict): # RLE counts = seg["counts"] if not isinstance(counts, str): # make it json-serializable seg["counts"] = counts.decode("ascii") coco_annotations.append(coco_annotation) logger.info( "Conversion finished, " f"#images: {len(coco_images)}, #annotations: {len(coco_annotations)}" ) info = { "date_created": str(datetime.datetime.now()), "description": "Automatically generated COCO json file for Detectron2.", } coco_dict = {"info": info, "images": coco_images, "categories": categories, "licenses": None} if len(coco_annotations) > 0: coco_dict["annotations"] = coco_annotations return coco_dict PathManager = PathManagerBase() PathManager.register_handler(HTTPURLHandler()) PathManager.register_handler(OneDrivePathHandler()) PathManager.register_handler(Detectron2Handler()) The provided code snippet includes necessary dependencies for implementing the `convert_to_coco_json` function. Write a Python function `def convert_to_coco_json(dataset_name, output_file, allow_cached=True)` to solve the following problem: Converts dataset into COCO format and saves it to a json file. dataset_name must be registered in DatasetCatalog and in detectron2's standard format. Args: dataset_name: reference from the config file to the catalogs must be registered in DatasetCatalog and in detectron2's standard format output_file: path of json file that will be saved to allow_cached: if json file is already present then skip conversion Here is the function: def convert_to_coco_json(dataset_name, output_file, allow_cached=True): """ Converts dataset into COCO format and saves it to a json file. dataset_name must be registered in DatasetCatalog and in detectron2's standard format. Args: dataset_name: reference from the config file to the catalogs must be registered in DatasetCatalog and in detectron2's standard format output_file: path of json file that will be saved to allow_cached: if json file is already present then skip conversion """ # TODO: The dataset or the conversion script *may* change, # a checksum would be useful for validating the cached data PathManager.mkdirs(os.path.dirname(output_file)) with file_lock(output_file): if PathManager.exists(output_file) and allow_cached: logger.warning( f"Using previously cached COCO format annotations at '{output_file}'. " "You need to clear the cache file if your dataset has been modified." ) else: logger.info(f"Converting annotations of dataset '{dataset_name}' to COCO format ...)") coco_dict = convert_to_coco_dict(dataset_name) logger.info(f"Caching COCO format annotations at '{output_file}' ...") tmp_file = output_file + ".tmp" with PathManager.open(tmp_file, "w") as f: json.dump(coco_dict, f) shutil.move(tmp_file, output_file)
Converts dataset into COCO format and saves it to a json file. dataset_name must be registered in DatasetCatalog and in detectron2's standard format. Args: dataset_name: reference from the config file to the catalogs must be registered in DatasetCatalog and in detectron2's standard format output_file: path of json file that will be saved to allow_cached: if json file is already present then skip conversion
3,582
import os from detectron2.data import DatasetCatalog, MetadataCatalog from .builtin_meta import ADE20K_SEM_SEG_CATEGORIES, _get_builtin_metadata from .cityscapes import load_cityscapes_instances, load_cityscapes_semantic from .cityscapes_panoptic import register_all_cityscapes_panoptic from .coco import load_sem_seg, register_coco_instances from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated from .lvis import get_lvis_instances_meta, register_lvis_instances from .pascal_voc import register_pascal_voc _PREDEFINED_SPLITS_COCO = {} _PREDEFINED_SPLITS_COCO["coco"] = { "coco_2014_train": ("coco/train2014", "coco/annotations/instances_train2014.json"), "coco_2014_val": ("coco/val2014", "coco/annotations/instances_val2014.json"), "coco_2014_minival": ("coco/val2014", "coco/annotations/instances_minival2014.json"), "coco_2014_valminusminival": ( "coco/val2014", "coco/annotations/instances_valminusminival2014.json", ), "coco_2017_train": ("coco/train2017", "coco/annotations/instances_train2017.json"), "coco_2017_val": ("coco/val2017", "coco/annotations/instances_val2017.json"), "coco_2017_test": ("coco/test2017", "coco/annotations/image_info_test2017.json"), "coco_2017_test-dev": ("coco/test2017", "coco/annotations/image_info_test-dev2017.json"), "coco_2017_val_100": ("coco/val2017", "coco/annotations/instances_val2017_100.json"), } _PREDEFINED_SPLITS_COCO["coco_person"] = { "keypoints_coco_2014_train": ( "coco/train2014", "coco/annotations/person_keypoints_train2014.json", ), "keypoints_coco_2014_val": ("coco/val2014", "coco/annotations/person_keypoints_val2014.json"), "keypoints_coco_2014_minival": ( "coco/val2014", "coco/annotations/person_keypoints_minival2014.json", ), "keypoints_coco_2014_valminusminival": ( "coco/val2014", "coco/annotations/person_keypoints_valminusminival2014.json", ), "keypoints_coco_2017_train": ( "coco/train2017", "coco/annotations/person_keypoints_train2017.json", ), "keypoints_coco_2017_val": ("coco/val2017", "coco/annotations/person_keypoints_val2017.json"), "keypoints_coco_2017_val_100": ( "coco/val2017", "coco/annotations/person_keypoints_val2017_100.json", ), } _PREDEFINED_SPLITS_COCO_PANOPTIC = { "coco_2017_train_panoptic": ( # This is the original panoptic annotation directory "coco/panoptic_train2017", "coco/annotations/panoptic_train2017.json", # This directory contains semantic annotations that are # converted from panoptic annotations. # It is used by PanopticFPN. # You can use the script at detectron2/datasets/prepare_panoptic_fpn.py # to create these directories. "coco/panoptic_stuff_train2017", ), "coco_2017_val_panoptic": ( "coco/panoptic_val2017", "coco/annotations/panoptic_val2017.json", "coco/panoptic_stuff_val2017", ), "coco_2017_val_100_panoptic": ( "coco/panoptic_val2017_100", "coco/annotations/panoptic_val2017_100.json", "coco/panoptic_stuff_val2017_100", ), } def _get_builtin_metadata(dataset_name): if dataset_name == "coco": return _get_coco_instances_meta() if dataset_name == "coco_panoptic_separated": return _get_coco_panoptic_separated_meta() elif dataset_name == "coco_panoptic_standard": meta = {} # The following metadata maps contiguous id from [0, #thing categories + # #stuff categories) to their names and colors. We have to replica of the # same name and color under "thing_*" and "stuff_*" because the current # visualization function in D2 handles thing and class classes differently # due to some heuristic used in Panoptic FPN. We keep the same naming to # enable reusing existing visualization functions. thing_classes = [k["name"] for k in COCO_CATEGORIES] thing_colors = [k["color"] for k in COCO_CATEGORIES] stuff_classes = [k["name"] for k in COCO_CATEGORIES] stuff_colors = [k["color"] for k in COCO_CATEGORIES] meta["thing_classes"] = thing_classes meta["thing_colors"] = thing_colors meta["stuff_classes"] = stuff_classes meta["stuff_colors"] = stuff_colors # Convert category id for training: # category id: like semantic segmentation, it is the class id for each # pixel. Since there are some classes not used in evaluation, the category # id is not always contiguous and thus we have two set of category ids: # - original category id: category id in the original dataset, mainly # used for evaluation. # - contiguous category id: [0, #classes), in order to train the linear # softmax classifier. thing_dataset_id_to_contiguous_id = {} stuff_dataset_id_to_contiguous_id = {} for i, cat in enumerate(COCO_CATEGORIES): if cat["isthing"]: thing_dataset_id_to_contiguous_id[cat["id"]] = i else: stuff_dataset_id_to_contiguous_id[cat["id"]] = i meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id return meta elif dataset_name == "coco_person": return { "thing_classes": ["person"], "keypoint_names": COCO_PERSON_KEYPOINT_NAMES, "keypoint_flip_map": COCO_PERSON_KEYPOINT_FLIP_MAP, "keypoint_connection_rules": KEYPOINT_CONNECTION_RULES, } elif dataset_name == "cityscapes": # fmt: off CITYSCAPES_THING_CLASSES = [ "person", "rider", "car", "truck", "bus", "train", "motorcycle", "bicycle", ] CITYSCAPES_STUFF_CLASSES = [ "road", "sidewalk", "building", "wall", "fence", "pole", "traffic light", "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car", "truck", "bus", "train", "motorcycle", "bicycle", ] # fmt: on return { "thing_classes": CITYSCAPES_THING_CLASSES, "stuff_classes": CITYSCAPES_STUFF_CLASSES, } raise KeyError("No built-in metadata for dataset {}".format(dataset_name)) def register_coco_instances(name, metadata, json_file, image_root): """ Register a dataset in COCO's json annotation format for instance detection, instance segmentation and keypoint detection. (i.e., Type 1 and 2 in http://cocodataset.org/#format-data. `instances*.json` and `person_keypoints*.json` in the dataset). This is an example of how to register a new dataset. You can do something similar to this function, to register new datasets. Args: name (str): the name that identifies a dataset, e.g. "coco_2014_train". metadata (dict): extra metadata associated with this dataset. You can leave it as an empty dict. json_file (str): path to the json instance annotation file. image_root (str or path-like): directory which contains all the images. """ assert isinstance(name, str), name assert isinstance(json_file, (str, os.PathLike)), json_file assert isinstance(image_root, (str, os.PathLike)), image_root # 1. register a function which returns dicts DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name)) # 2. Optionally, add metadata about this dataset, # since they might be useful in evaluation, visualization or logging MetadataCatalog.get(name).set( json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata ) def register_coco_panoptic( name, metadata, image_root, panoptic_root, panoptic_json, instances_json=None ): """ Register a "standard" version of COCO panoptic segmentation dataset named `name`. The dictionaries in this registered dataset follows detectron2's standard format. Hence it's called "standard". Args: name (str): the name that identifies a dataset, e.g. "coco_2017_train_panoptic" metadata (dict): extra metadata associated with this dataset. image_root (str): directory which contains all the images panoptic_root (str): directory which contains panoptic annotation images in COCO format panoptic_json (str): path to the json panoptic annotation file in COCO format sem_seg_root (none): not used, to be consistent with `register_coco_panoptic_separated`. instances_json (str): path to the json instance annotation file """ panoptic_name = name DatasetCatalog.register( panoptic_name, lambda: load_coco_panoptic_json(panoptic_json, image_root, panoptic_root, metadata), ) MetadataCatalog.get(panoptic_name).set( panoptic_root=panoptic_root, image_root=image_root, panoptic_json=panoptic_json, json_file=instances_json, evaluator_type="coco_panoptic_seg", ignore_label=255, label_divisor=1000, **metadata, ) def register_coco_panoptic_separated( name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json ): """ Register a "separated" version of COCO panoptic segmentation dataset named `name`. The annotations in this registered dataset will contain both instance annotations and semantic annotations, each with its own contiguous ids. Hence it's called "separated". It follows the setting used by the PanopticFPN paper: 1. The instance annotations directly come from polygons in the COCO instances annotation task, rather than from the masks in the COCO panoptic annotations. The two format have small differences: Polygons in the instance annotations may have overlaps. The mask annotations are produced by labeling the overlapped polygons with depth ordering. 2. The semantic annotations are converted from panoptic annotations, where all "things" are assigned a semantic id of 0. All semantic categories will therefore have ids in contiguous range [1, #stuff_categories]. This function will also register a pure semantic segmentation dataset named ``name + '_stuffonly'``. Args: name (str): the name that identifies a dataset, e.g. "coco_2017_train_panoptic" metadata (dict): extra metadata associated with this dataset. image_root (str): directory which contains all the images panoptic_root (str): directory which contains panoptic annotation images panoptic_json (str): path to the json panoptic annotation file sem_seg_root (str): directory which contains all the ground truth segmentation annotations. instances_json (str): path to the json instance annotation file """ panoptic_name = name + "_separated" DatasetCatalog.register( panoptic_name, lambda: merge_to_panoptic( load_coco_json(instances_json, image_root, panoptic_name), load_sem_seg(sem_seg_root, image_root), ), ) MetadataCatalog.get(panoptic_name).set( panoptic_root=panoptic_root, image_root=image_root, panoptic_json=panoptic_json, sem_seg_root=sem_seg_root, json_file=instances_json, # TODO rename evaluator_type="coco_panoptic_seg", ignore_label=255, **metadata, ) semantic_name = name + "_stuffonly" DatasetCatalog.register(semantic_name, lambda: load_sem_seg(sem_seg_root, image_root)) MetadataCatalog.get(semantic_name).set( sem_seg_root=sem_seg_root, image_root=image_root, evaluator_type="sem_seg", ignore_label=255, **metadata, ) def register_all_coco(root): for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items(): for key, (image_root, json_file) in splits_per_dataset.items(): # Assume pre-defined datasets live in `./datasets`. register_coco_instances( key, _get_builtin_metadata(dataset_name), os.path.join(root, json_file) if "://" not in json_file else json_file, os.path.join(root, image_root), ) for ( prefix, (panoptic_root, panoptic_json, semantic_root), ) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items(): prefix_instances = prefix[: -len("_panoptic")] instances_meta = MetadataCatalog.get(prefix_instances) image_root, instances_json = instances_meta.image_root, instances_meta.json_file # The "separated" version of COCO panoptic segmentation dataset, # e.g. used by Panoptic FPN register_coco_panoptic_separated( prefix, _get_builtin_metadata("coco_panoptic_separated"), image_root, os.path.join(root, panoptic_root), os.path.join(root, panoptic_json), os.path.join(root, semantic_root), instances_json, ) # The "standard" version of COCO panoptic segmentation dataset, # e.g. used by Panoptic-DeepLab register_coco_panoptic( prefix, _get_builtin_metadata("coco_panoptic_standard"), image_root, os.path.join(root, panoptic_root), os.path.join(root, panoptic_json), instances_json, )
null
3,583
import os from detectron2.data import DatasetCatalog, MetadataCatalog from .builtin_meta import ADE20K_SEM_SEG_CATEGORIES, _get_builtin_metadata from .cityscapes import load_cityscapes_instances, load_cityscapes_semantic from .cityscapes_panoptic import register_all_cityscapes_panoptic from .coco import load_sem_seg, register_coco_instances from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated from .lvis import get_lvis_instances_meta, register_lvis_instances from .pascal_voc import register_pascal_voc _PREDEFINED_SPLITS_LVIS = { "lvis_v1": { "lvis_v1_train": ("coco/", "lvis/lvis_v1_train.json"), "lvis_v1_val": ("coco/", "lvis/lvis_v1_val.json"), "lvis_v1_test_dev": ("coco/", "lvis/lvis_v1_image_info_test_dev.json"), "lvis_v1_test_challenge": ("coco/", "lvis/lvis_v1_image_info_test_challenge.json"), }, "lvis_v0.5": { "lvis_v0.5_train": ("coco/", "lvis/lvis_v0.5_train.json"), "lvis_v0.5_val": ("coco/", "lvis/lvis_v0.5_val.json"), "lvis_v0.5_val_rand_100": ("coco/", "lvis/lvis_v0.5_val_rand_100.json"), "lvis_v0.5_test": ("coco/", "lvis/lvis_v0.5_image_info_test.json"), }, "lvis_v0.5_cocofied": { "lvis_v0.5_train_cocofied": ("coco/", "lvis/lvis_v0.5_train_cocofied.json"), "lvis_v0.5_val_cocofied": ("coco/", "lvis/lvis_v0.5_val_cocofied.json"), }, } def register_lvis_instances(name, metadata, json_file, image_root): """ Register a dataset in LVIS's json annotation format for instance detection and segmentation. Args: name (str): a name that identifies the dataset, e.g. "lvis_v0.5_train". metadata (dict): extra metadata associated with this dataset. It can be an empty dict. json_file (str): path to the json instance annotation file. image_root (str or path-like): directory which contains all the images. """ DatasetCatalog.register(name, lambda: load_lvis_json(json_file, image_root, name)) MetadataCatalog.get(name).set( json_file=json_file, image_root=image_root, evaluator_type="lvis", **metadata ) def get_lvis_instances_meta(dataset_name): """ Load LVIS metadata. Args: dataset_name (str): LVIS dataset name without the split name (e.g., "lvis_v0.5"). Returns: dict: LVIS metadata with keys: thing_classes """ if "cocofied" in dataset_name: return _get_coco_instances_meta() if "v0.5" in dataset_name: return _get_lvis_instances_meta_v0_5() elif "v1" in dataset_name: return _get_lvis_instances_meta_v1() raise ValueError("No built-in metadata for dataset {}".format(dataset_name)) def register_all_lvis(root): for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_LVIS.items(): for key, (image_root, json_file) in splits_per_dataset.items(): register_lvis_instances( key, get_lvis_instances_meta(dataset_name), os.path.join(root, json_file) if "://" not in json_file else json_file, os.path.join(root, image_root), )
null
3,584
import os from detectron2.data import DatasetCatalog, MetadataCatalog from .builtin_meta import ADE20K_SEM_SEG_CATEGORIES, _get_builtin_metadata from .cityscapes import load_cityscapes_instances, load_cityscapes_semantic from .cityscapes_panoptic import register_all_cityscapes_panoptic from .coco import load_sem_seg, register_coco_instances from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated from .lvis import get_lvis_instances_meta, register_lvis_instances from .pascal_voc import register_pascal_voc _RAW_CITYSCAPES_SPLITS = { "cityscapes_fine_{task}_train": ("cityscapes/leftImg8bit/train/", "cityscapes/gtFine/train/"), "cityscapes_fine_{task}_val": ("cityscapes/leftImg8bit/val/", "cityscapes/gtFine/val/"), "cityscapes_fine_{task}_test": ("cityscapes/leftImg8bit/test/", "cityscapes/gtFine/test/"), } def _get_builtin_metadata(dataset_name): if dataset_name == "coco": return _get_coco_instances_meta() if dataset_name == "coco_panoptic_separated": return _get_coco_panoptic_separated_meta() elif dataset_name == "coco_panoptic_standard": meta = {} # The following metadata maps contiguous id from [0, #thing categories + # #stuff categories) to their names and colors. We have to replica of the # same name and color under "thing_*" and "stuff_*" because the current # visualization function in D2 handles thing and class classes differently # due to some heuristic used in Panoptic FPN. We keep the same naming to # enable reusing existing visualization functions. thing_classes = [k["name"] for k in COCO_CATEGORIES] thing_colors = [k["color"] for k in COCO_CATEGORIES] stuff_classes = [k["name"] for k in COCO_CATEGORIES] stuff_colors = [k["color"] for k in COCO_CATEGORIES] meta["thing_classes"] = thing_classes meta["thing_colors"] = thing_colors meta["stuff_classes"] = stuff_classes meta["stuff_colors"] = stuff_colors # Convert category id for training: # category id: like semantic segmentation, it is the class id for each # pixel. Since there are some classes not used in evaluation, the category # id is not always contiguous and thus we have two set of category ids: # - original category id: category id in the original dataset, mainly # used for evaluation. # - contiguous category id: [0, #classes), in order to train the linear # softmax classifier. thing_dataset_id_to_contiguous_id = {} stuff_dataset_id_to_contiguous_id = {} for i, cat in enumerate(COCO_CATEGORIES): if cat["isthing"]: thing_dataset_id_to_contiguous_id[cat["id"]] = i else: stuff_dataset_id_to_contiguous_id[cat["id"]] = i meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id return meta elif dataset_name == "coco_person": return { "thing_classes": ["person"], "keypoint_names": COCO_PERSON_KEYPOINT_NAMES, "keypoint_flip_map": COCO_PERSON_KEYPOINT_FLIP_MAP, "keypoint_connection_rules": KEYPOINT_CONNECTION_RULES, } elif dataset_name == "cityscapes": # fmt: off CITYSCAPES_THING_CLASSES = [ "person", "rider", "car", "truck", "bus", "train", "motorcycle", "bicycle", ] CITYSCAPES_STUFF_CLASSES = [ "road", "sidewalk", "building", "wall", "fence", "pole", "traffic light", "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car", "truck", "bus", "train", "motorcycle", "bicycle", ] # fmt: on return { "thing_classes": CITYSCAPES_THING_CLASSES, "stuff_classes": CITYSCAPES_STUFF_CLASSES, } raise KeyError("No built-in metadata for dataset {}".format(dataset_name)) def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=True): """ Args: image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train". gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train". from_json (bool): whether to read annotations from the raw json file or the png files. to_polygons (bool): whether to represent the segmentation as polygons (COCO's format) instead of masks (cityscapes's format). Returns: list[dict]: a list of dicts in Detectron2 standard format. (See `Using Custom Datasets </tutorials/datasets.html>`_ ) """ if from_json: assert to_polygons, ( "Cityscapes's json annotations are in polygon format. " "Converting to mask format is not supported now." ) files = _get_cityscapes_files(image_dir, gt_dir) logger.info("Preprocessing cityscapes annotations ...") # This is still not fast: all workers will execute duplicate works and will # take up to 10m on a 8GPU server. pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4)) ret = pool.map( functools.partial(_cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons), files, ) logger.info("Loaded {} images from {}".format(len(ret), image_dir)) # Map cityscape ids to contiguous ids from cityscapesscripts.helpers.labels import labels labels = [l for l in labels if l.hasInstances and not l.ignoreInEval] dataset_id_to_contiguous_id = {l.id: idx for idx, l in enumerate(labels)} for dict_per_image in ret: for anno in dict_per_image["annotations"]: anno["category_id"] = dataset_id_to_contiguous_id[anno["category_id"]] return ret def load_cityscapes_semantic(image_dir, gt_dir): """ Args: image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train". gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train". Returns: list[dict]: a list of dict, each has "file_name" and "sem_seg_file_name". """ ret = [] # gt_dir is small and contain many small files. make sense to fetch to local first gt_dir = PathManager.get_local_path(gt_dir) for image_file, _, label_file, json_file in _get_cityscapes_files(image_dir, gt_dir): label_file = label_file.replace("labelIds", "labelTrainIds") with PathManager.open(json_file, "r") as f: jsonobj = json.load(f) ret.append( { "file_name": image_file, "sem_seg_file_name": label_file, "height": jsonobj["imgHeight"], "width": jsonobj["imgWidth"], } ) assert len(ret), f"No images found in {image_dir}!" assert PathManager.isfile( ret[0]["sem_seg_file_name"] ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa return ret def register_all_cityscapes(root): for key, (image_dir, gt_dir) in _RAW_CITYSCAPES_SPLITS.items(): meta = _get_builtin_metadata("cityscapes") image_dir = os.path.join(root, image_dir) gt_dir = os.path.join(root, gt_dir) inst_key = key.format(task="instance_seg") DatasetCatalog.register( inst_key, lambda x=image_dir, y=gt_dir: load_cityscapes_instances( x, y, from_json=True, to_polygons=True ), ) MetadataCatalog.get(inst_key).set( image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_instance", **meta ) sem_key = key.format(task="sem_seg") DatasetCatalog.register( sem_key, lambda x=image_dir, y=gt_dir: load_cityscapes_semantic(x, y) ) MetadataCatalog.get(sem_key).set( image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_sem_seg", ignore_label=255, **meta, )
null
3,585
import os from detectron2.data import DatasetCatalog, MetadataCatalog from .builtin_meta import ADE20K_SEM_SEG_CATEGORIES, _get_builtin_metadata from .cityscapes import load_cityscapes_instances, load_cityscapes_semantic from .cityscapes_panoptic import register_all_cityscapes_panoptic from .coco import load_sem_seg, register_coco_instances from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated from .lvis import get_lvis_instances_meta, register_lvis_instances from .pascal_voc import register_pascal_voc def register_pascal_voc(name, dirname, split, year, class_names=CLASS_NAMES): DatasetCatalog.register(name, lambda: load_voc_instances(dirname, split, class_names)) MetadataCatalog.get(name).set( thing_classes=list(class_names), dirname=dirname, year=year, split=split ) def register_all_pascal_voc(root): SPLITS = [ ("voc_2007_trainval", "VOC2007", "trainval"), ("voc_2007_train", "VOC2007", "train"), ("voc_2007_val", "VOC2007", "val"), ("voc_2007_test", "VOC2007", "test"), ("voc_2012_trainval", "VOC2012", "trainval"), ("voc_2012_train", "VOC2012", "train"), ("voc_2012_val", "VOC2012", "val"), ] for name, dirname, split in SPLITS: year = 2007 if "2007" in name else 2012 register_pascal_voc(name, os.path.join(root, dirname), split, year) MetadataCatalog.get(name).evaluator_type = "pascal_voc"
null
3,586
import os from detectron2.data import DatasetCatalog, MetadataCatalog from .builtin_meta import ADE20K_SEM_SEG_CATEGORIES, _get_builtin_metadata from .cityscapes import load_cityscapes_instances, load_cityscapes_semantic from .cityscapes_panoptic import register_all_cityscapes_panoptic from .coco import load_sem_seg, register_coco_instances from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated from .lvis import get_lvis_instances_meta, register_lvis_instances from .pascal_voc import register_pascal_voc ADE20K_SEM_SEG_CATEGORIES = [ "wall", "building", "sky", "floor", "tree", "ceiling", "road, route", "bed", "window ", "grass", "cabinet", "sidewalk, pavement", "person", "earth, ground", "door", "table", "mountain, mount", "plant", "curtain", "chair", "car", "water", "painting, picture", "sofa", "shelf", "house", "sea", "mirror", "rug", "field", "armchair", "seat", "fence", "desk", "rock, stone", "wardrobe, closet, press", "lamp", "tub", "rail", "cushion", "base, pedestal, stand", "box", "column, pillar", "signboard, sign", "chest of drawers, chest, bureau, dresser", "counter", "sand", "sink", "skyscraper", "fireplace", "refrigerator, icebox", "grandstand, covered stand", "path", "stairs", "runway", "case, display case, showcase, vitrine", "pool table, billiard table, snooker table", "pillow", "screen door, screen", "stairway, staircase", "river", "bridge, span", "bookcase", "blind, screen", "coffee table", "toilet, can, commode, crapper, pot, potty, stool, throne", "flower", "book", "hill", "bench", "countertop", "stove", "palm, palm tree", "kitchen island", "computer", "swivel chair", "boat", "bar", "arcade machine", "hovel, hut, hutch, shack, shanty", "bus", "towel", "light", "truck", "tower", "chandelier", "awning, sunshade, sunblind", "street lamp", "booth", "tv", "plane", "dirt track", "clothes", "pole", "land, ground, soil", "bannister, banister, balustrade, balusters, handrail", "escalator, moving staircase, moving stairway", "ottoman, pouf, pouffe, puff, hassock", "bottle", "buffet, counter, sideboard", "poster, posting, placard, notice, bill, card", "stage", "van", "ship", "fountain", "conveyer belt, conveyor belt, conveyer, conveyor, transporter", "canopy", "washer, automatic washer, washing machine", "plaything, toy", "pool", "stool", "barrel, cask", "basket, handbasket", "falls", "tent", "bag", "minibike, motorbike", "cradle", "oven", "ball", "food, solid food", "step, stair", "tank, storage tank", "trade name", "microwave", "pot", "animal", "bicycle", "lake", "dishwasher", "screen", "blanket, cover", "sculpture", "hood, exhaust hood", "sconce", "vase", "traffic light", "tray", "trash can", "fan", "pier", "crt screen", "plate", "monitor", "bulletin board", "shower", "radiator", "glass, drinking glass", "clock", "flag", # noqa ] def load_sem_seg(gt_root, image_root, gt_ext="png", image_ext="jpg"): """ Load semantic segmentation datasets. All files under "gt_root" with "gt_ext" extension are treated as ground truth annotations and all files under "image_root" with "image_ext" extension as input images. Ground truth and input images are matched using file paths relative to "gt_root" and "image_root" respectively without taking into account file extensions. This works for COCO as well as some other datasets. Args: gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation annotations are stored as images with integer values in pixels that represent corresponding semantic labels. image_root (str): the directory where the input images are. gt_ext (str): file extension for ground truth annotations. image_ext (str): file extension for input images. Returns: list[dict]: a list of dicts in detectron2 standard format without instance-level annotation. Notes: 1. This function does not read the image and ground truth files. The results do not have the "image" and "sem_seg" fields. """ # We match input images with ground truth based on their relative filepaths (without file # extensions) starting from 'image_root' and 'gt_root' respectively. def file2id(folder_path, file_path): # extract relative path starting from `folder_path` image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path)) # remove file extension image_id = os.path.splitext(image_id)[0] return image_id input_files = sorted( (os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)), key=lambda file_path: file2id(image_root, file_path), ) gt_files = sorted( (os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)), key=lambda file_path: file2id(gt_root, file_path), ) assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root) # Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images if len(input_files) != len(gt_files): logger.warn( "Directory {} and {} has {} and {} files, respectively.".format( image_root, gt_root, len(input_files), len(gt_files) ) ) input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files] gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files] intersect = list(set(input_basenames) & set(gt_basenames)) # sort, otherwise each worker may obtain a list[dict] in different order intersect = sorted(intersect) logger.warn("Will use their intersection of {} files.".format(len(intersect))) input_files = [os.path.join(image_root, f + image_ext) for f in intersect] gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect] logger.info( "Loaded {} images with semantic segmentation from {}".format(len(input_files), image_root) ) dataset_dicts = [] for (img_path, gt_path) in zip(input_files, gt_files): record = {} record["file_name"] = img_path record["sem_seg_file_name"] = gt_path dataset_dicts.append(record) return dataset_dicts def register_all_ade20k(root): root = os.path.join(root, "ADEChallengeData2016") for name, dirname in [("train", "training"), ("val", "validation")]: image_dir = os.path.join(root, "images", dirname) gt_dir = os.path.join(root, "annotations_detectron2", dirname) name = f"ade20k_sem_seg_{name}" DatasetCatalog.register( name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg") ) MetadataCatalog.get(name).set( stuff_classes=ADE20K_SEM_SEG_CATEGORIES[:], image_root=image_dir, sem_seg_root=gt_dir, evaluator_type="sem_seg", ignore_label=255, )
null
3,587
import json import logging import os from detectron2.data import DatasetCatalog, MetadataCatalog from detectron2.data.datasets.builtin_meta import CITYSCAPES_CATEGORIES from detectron2.utils.file_io import PathManager def load_cityscapes_panoptic(image_dir, gt_dir, gt_json, meta): """ Args: image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train". gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/cityscapes_panoptic_train". gt_json (str): path to the json file. e.g., "~/cityscapes/gtFine/cityscapes_panoptic_train.json". meta (dict): dictionary containing "thing_dataset_id_to_contiguous_id" and "stuff_dataset_id_to_contiguous_id" to map category ids to contiguous ids for training. Returns: list[dict]: a list of dicts in Detectron2 standard format. (See `Using Custom Datasets </tutorials/datasets.html>`_ ) """ def _convert_category_id(segment_info, meta): if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]: segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][ segment_info["category_id"] ] else: segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][ segment_info["category_id"] ] return segment_info assert os.path.exists( gt_json ), "Please run `python cityscapesscripts/preparation/createPanopticImgs.py` to generate label files." # noqa with open(gt_json) as f: json_info = json.load(f) files = get_cityscapes_panoptic_files(image_dir, gt_dir, json_info) ret = [] for image_file, label_file, segments_info in files: sem_label_file = ( image_file.replace("leftImg8bit", "gtFine").split(".")[0] + "_labelTrainIds.png" ) segments_info = [_convert_category_id(x, meta) for x in segments_info] ret.append( { "file_name": image_file, "image_id": "_".join( os.path.splitext(os.path.basename(image_file))[0].split("_")[:3] ), "sem_seg_file_name": sem_label_file, "pan_seg_file_name": label_file, "segments_info": segments_info, } ) assert len(ret), f"No images found in {image_dir}!" assert PathManager.isfile( ret[0]["sem_seg_file_name"] ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa assert PathManager.isfile( ret[0]["pan_seg_file_name"] ), "Please generate panoptic annotation with python cityscapesscripts/preparation/createPanopticImgs.py" # noqa return ret _RAW_CITYSCAPES_PANOPTIC_SPLITS = { "cityscapes_fine_panoptic_train": ( "cityscapes/leftImg8bit/train", "cityscapes/gtFine/cityscapes_panoptic_train", "cityscapes/gtFine/cityscapes_panoptic_train.json", ), "cityscapes_fine_panoptic_val": ( "cityscapes/leftImg8bit/val", "cityscapes/gtFine/cityscapes_panoptic_val", "cityscapes/gtFine/cityscapes_panoptic_val.json", ), # "cityscapes_fine_panoptic_test": not supported yet } CITYSCAPES_CATEGORIES = [ {"color": (128, 64, 128), "isthing": 0, "id": 7, "trainId": 0, "name": "road"}, {"color": (244, 35, 232), "isthing": 0, "id": 8, "trainId": 1, "name": "sidewalk"}, {"color": (70, 70, 70), "isthing": 0, "id": 11, "trainId": 2, "name": "building"}, {"color": (102, 102, 156), "isthing": 0, "id": 12, "trainId": 3, "name": "wall"}, {"color": (190, 153, 153), "isthing": 0, "id": 13, "trainId": 4, "name": "fence"}, {"color": (153, 153, 153), "isthing": 0, "id": 17, "trainId": 5, "name": "pole"}, {"color": (250, 170, 30), "isthing": 0, "id": 19, "trainId": 6, "name": "traffic light"}, {"color": (220, 220, 0), "isthing": 0, "id": 20, "trainId": 7, "name": "traffic sign"}, {"color": (107, 142, 35), "isthing": 0, "id": 21, "trainId": 8, "name": "vegetation"}, {"color": (152, 251, 152), "isthing": 0, "id": 22, "trainId": 9, "name": "terrain"}, {"color": (70, 130, 180), "isthing": 0, "id": 23, "trainId": 10, "name": "sky"}, {"color": (220, 20, 60), "isthing": 1, "id": 24, "trainId": 11, "name": "person"}, {"color": (255, 0, 0), "isthing": 1, "id": 25, "trainId": 12, "name": "rider"}, {"color": (0, 0, 142), "isthing": 1, "id": 26, "trainId": 13, "name": "car"}, {"color": (0, 0, 70), "isthing": 1, "id": 27, "trainId": 14, "name": "truck"}, {"color": (0, 60, 100), "isthing": 1, "id": 28, "trainId": 15, "name": "bus"}, {"color": (0, 80, 100), "isthing": 1, "id": 31, "trainId": 16, "name": "train"}, {"color": (0, 0, 230), "isthing": 1, "id": 32, "trainId": 17, "name": "motorcycle"}, {"color": (119, 11, 32), "isthing": 1, "id": 33, "trainId": 18, "name": "bicycle"}, ] def register_all_cityscapes_panoptic(root): meta = {} # The following metadata maps contiguous id from [0, #thing categories + # #stuff categories) to their names and colors. We have to replica of the # same name and color under "thing_*" and "stuff_*" because the current # visualization function in D2 handles thing and class classes differently # due to some heuristic used in Panoptic FPN. We keep the same naming to # enable reusing existing visualization functions. thing_classes = [k["name"] for k in CITYSCAPES_CATEGORIES] thing_colors = [k["color"] for k in CITYSCAPES_CATEGORIES] stuff_classes = [k["name"] for k in CITYSCAPES_CATEGORIES] stuff_colors = [k["color"] for k in CITYSCAPES_CATEGORIES] meta["thing_classes"] = thing_classes meta["thing_colors"] = thing_colors meta["stuff_classes"] = stuff_classes meta["stuff_colors"] = stuff_colors # There are three types of ids in cityscapes panoptic segmentation: # (1) category id: like semantic segmentation, it is the class id for each # pixel. Since there are some classes not used in evaluation, the category # id is not always contiguous and thus we have two set of category ids: # - original category id: category id in the original dataset, mainly # used for evaluation. # - contiguous category id: [0, #classes), in order to train the classifier # (2) instance id: this id is used to differentiate different instances from # the same category. For "stuff" classes, the instance id is always 0; for # "thing" classes, the instance id starts from 1 and 0 is reserved for # ignored instances (e.g. crowd annotation). # (3) panoptic id: this is the compact id that encode both category and # instance id by: category_id * 1000 + instance_id. thing_dataset_id_to_contiguous_id = {} stuff_dataset_id_to_contiguous_id = {} for k in CITYSCAPES_CATEGORIES: if k["isthing"] == 1: thing_dataset_id_to_contiguous_id[k["id"]] = k["trainId"] else: stuff_dataset_id_to_contiguous_id[k["id"]] = k["trainId"] meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id for key, (image_dir, gt_dir, gt_json) in _RAW_CITYSCAPES_PANOPTIC_SPLITS.items(): image_dir = os.path.join(root, image_dir) gt_dir = os.path.join(root, gt_dir) gt_json = os.path.join(root, gt_json) DatasetCatalog.register( key, lambda x=image_dir, y=gt_dir, z=gt_json: load_cityscapes_panoptic(x, y, z, meta) ) MetadataCatalog.get(key).set( panoptic_root=gt_dir, image_root=image_dir, panoptic_json=gt_json, gt_dir=gt_dir.replace("cityscapes_panoptic_", ""), evaluator_type="cityscapes_panoptic_seg", ignore_label=255, label_divisor=1000, **meta, )
null
3,588
import itertools import logging import numpy as np import operator import pickle from typing import Any, Callable, Dict, List, Optional, Union import torch import torch.utils.data as torchdata from tabulate import tabulate from termcolor import colored from detectron2.config import configurable from detectron2.structures import BoxMode from detectron2.utils.comm import get_world_size from detectron2.utils.env import seed_all_rng from detectron2.utils.file_io import PathManager from detectron2.utils.logger import _log_api_usage, log_first_n from .catalog import DatasetCatalog, MetadataCatalog from .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset, ToIterableDataset from .dataset_mapper import DatasetMapper from .detection_utils import check_metadata_consistency from .samplers import ( InferenceSampler, RandomSubsetTrainingSampler, RepeatFactorTrainingSampler, TrainingSampler, ) def get_detection_dataset_dicts( names, filter_empty=True, min_keypoints=0, proposal_files=None, check_consistency=True, ): """ Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation. Args: names (str or list[str]): a dataset name or a list of dataset names filter_empty (bool): whether to filter out images without instance annotations min_keypoints (int): filter out images with fewer keypoints than `min_keypoints`. Set to 0 to do nothing. proposal_files (list[str]): if given, a list of object proposal files that match each dataset in `names`. check_consistency (bool): whether to check if datasets have consistent metadata. Returns: list[dict]: a list of dicts following the standard dataset dict format. """ if isinstance(names, str): names = [names] assert len(names), names dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names] for dataset_name, dicts in zip(names, dataset_dicts): assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) if proposal_files is not None: assert len(names) == len(proposal_files) # load precomputed proposals from proposal files dataset_dicts = [ load_proposals_into_dataset(dataset_i_dicts, proposal_file) for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files) ] if isinstance(dataset_dicts[0], torchdata.Dataset): return torchdata.ConcatDataset(dataset_dicts) dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts)) has_instances = "annotations" in dataset_dicts[0] if filter_empty and has_instances: dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts) if min_keypoints > 0 and has_instances: dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints) if check_consistency and has_instances: try: class_names = MetadataCatalog.get(names[0]).thing_classes check_metadata_consistency("thing_classes", names) print_instances_class_histogram(dataset_dicts, class_names) except AttributeError: # class names are not available for this dataset pass assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names)) return dataset_dicts def _log_api_usage(identifier: str): """ Internal function used to log the usage of different detectron2 components inside facebook's infra. """ torch._C._log_api_usage_once("detectron2." + identifier) class DatasetMapper: """ A callable which takes a dataset dict in Detectron2 Dataset format, and map it into a format used by the model. This is the default callable to be used to map your dataset dict into training data. You may need to follow it to implement your own one for customized logic, such as a different way to read or transform images. See :doc:`/tutorials/data_loading` for details. The callable currently does the following: 1. Read the image from "file_name" 2. Applies cropping/geometric transforms to the image and annotations 3. Prepare data and annotations to Tensor and :class:`Instances` """ def __init__( self, is_train: bool, *, augmentations: List[Union[T.Augmentation, T.Transform]], image_format: str, use_instance_mask: bool = False, use_keypoint: bool = False, instance_mask_format: str = "polygon", keypoint_hflip_indices: Optional[np.ndarray] = None, precomputed_proposal_topk: Optional[int] = None, recompute_boxes: bool = False, ): """ NOTE: this interface is experimental. Args: is_train: whether it's used in training or inference augmentations: a list of augmentations or deterministic transforms to apply image_format: an image format supported by :func:`detection_utils.read_image`. use_instance_mask: whether to process instance segmentation annotations, if available use_keypoint: whether to process keypoint annotations if available instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation masks into this format. keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices` precomputed_proposal_topk: if given, will load pre-computed proposals from dataset_dict and keep the top k proposals for each image. recompute_boxes: whether to overwrite bounding box annotations by computing tight bounding boxes from instance mask annotations. """ if recompute_boxes: assert use_instance_mask, "recompute_boxes requires instance masks" # fmt: off self.is_train = is_train self.augmentations = T.AugmentationList(augmentations) self.image_format = image_format self.use_instance_mask = use_instance_mask self.instance_mask_format = instance_mask_format self.use_keypoint = use_keypoint self.keypoint_hflip_indices = keypoint_hflip_indices self.proposal_topk = precomputed_proposal_topk self.recompute_boxes = recompute_boxes # fmt: on logger = logging.getLogger(__name__) mode = "training" if is_train else "inference" logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}") def from_config(cls, cfg, is_train: bool = True): augs = utils.build_augmentation(cfg, is_train) if cfg.INPUT.CROP.ENABLED and is_train: augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)) recompute_boxes = cfg.MODEL.MASK_ON else: recompute_boxes = False ret = { "is_train": is_train, "augmentations": augs, "image_format": cfg.INPUT.FORMAT, "use_instance_mask": cfg.MODEL.MASK_ON, "instance_mask_format": cfg.INPUT.MASK_FORMAT, "use_keypoint": cfg.MODEL.KEYPOINT_ON, "recompute_boxes": recompute_boxes, } if cfg.MODEL.KEYPOINT_ON: ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN) if cfg.MODEL.LOAD_PROPOSALS: ret["precomputed_proposal_topk"] = ( cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN if is_train else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST ) return ret def _transform_annotations(self, dataset_dict, transforms, image_shape): # USER: Modify this if you want to keep them for some reason. for anno in dataset_dict["annotations"]: if not self.use_instance_mask: anno.pop("segmentation", None) if not self.use_keypoint: anno.pop("keypoints", None) # USER: Implement additional transformations if you have other types of data annos = [ utils.transform_instance_annotations( obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices ) for obj in dataset_dict.pop("annotations") if obj.get("iscrowd", 0) == 0 ] instances = utils.annotations_to_instances( annos, image_shape, mask_format=self.instance_mask_format ) # After transforms such as cropping are applied, the bounding box may no longer # tightly bound the object. As an example, imagine a triangle object # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to # the intersection of original bounding box and the cropping box. if self.recompute_boxes: instances.gt_boxes = instances.gt_masks.get_bounding_boxes() dataset_dict["instances"] = utils.filter_empty_instances(instances) def __call__(self, dataset_dict): """ Args: dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. Returns: dict: a format that builtin models in detectron2 accept """ dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below # USER: Write your own image loading if it's not from a file image = utils.read_image(dataset_dict["file_name"], format=self.image_format) utils.check_image_size(dataset_dict, image) # USER: Remove if you don't do semantic/panoptic segmentation. if "sem_seg_file_name" in dataset_dict: sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2) else: sem_seg_gt = None aug_input = T.AugInput(image, sem_seg=sem_seg_gt) transforms = self.augmentations(aug_input) image, sem_seg_gt = aug_input.image, aug_input.sem_seg image_shape = image.shape[:2] # h, w # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, # but not efficient on large generic data structures due to the use of pickle & mp.Queue. # Therefore it's important to use torch.Tensor. dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) if sem_seg_gt is not None: dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long")) # USER: Remove if you don't use pre-computed proposals. # Most users would not need this feature. if self.proposal_topk is not None: utils.transform_proposals( dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk ) if not self.is_train: # USER: Modify this if you want to keep them for some reason. dataset_dict.pop("annotations", None) dataset_dict.pop("sem_seg_file_name", None) return dataset_dict if "annotations" in dataset_dict: self._transform_annotations(dataset_dict, transforms, image_shape) return dataset_dict def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None): if dataset is None: dataset = get_detection_dataset_dicts( cfg.DATASETS.TRAIN, filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.MODEL.KEYPOINT_ON else 0, proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, ) _log_api_usage("dataset." + cfg.DATASETS.TRAIN[0]) if mapper is None: mapper = DatasetMapper(cfg, True) if sampler is None: sampler_name = cfg.DATALOADER.SAMPLER_TRAIN logger = logging.getLogger(__name__) logger.info("Using training sampler {}".format(sampler_name)) if sampler_name == "TrainingSampler": sampler = TrainingSampler(len(dataset)) elif sampler_name == "RepeatFactorTrainingSampler": repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency( dataset, cfg.DATALOADER.REPEAT_THRESHOLD ) sampler = RepeatFactorTrainingSampler(repeat_factors) elif sampler_name == "RandomSubsetTrainingSampler": sampler = RandomSubsetTrainingSampler(len(dataset), cfg.DATALOADER.RANDOM_SUBSET_RATIO) else: raise ValueError("Unknown training sampler: {}".format(sampler_name)) return { "dataset": dataset, "sampler": sampler, "mapper": mapper, "total_batch_size": cfg.SOLVER.IMS_PER_BATCH, "aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING, "num_workers": cfg.DATALOADER.NUM_WORKERS, }
null
3,589
import itertools import logging import numpy as np import operator import pickle from typing import Any, Callable, Dict, List, Optional, Union import torch import torch.utils.data as torchdata from tabulate import tabulate from termcolor import colored from detectron2.config import configurable from detectron2.structures import BoxMode from detectron2.utils.comm import get_world_size from detectron2.utils.env import seed_all_rng from detectron2.utils.file_io import PathManager from detectron2.utils.logger import _log_api_usage, log_first_n from .catalog import DatasetCatalog, MetadataCatalog from .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset, ToIterableDataset from .dataset_mapper import DatasetMapper from .detection_utils import check_metadata_consistency from .samplers import ( InferenceSampler, RandomSubsetTrainingSampler, RepeatFactorTrainingSampler, TrainingSampler, ) def get_detection_dataset_dicts( names, filter_empty=True, min_keypoints=0, proposal_files=None, check_consistency=True, ): """ Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation. Args: names (str or list[str]): a dataset name or a list of dataset names filter_empty (bool): whether to filter out images without instance annotations min_keypoints (int): filter out images with fewer keypoints than `min_keypoints`. Set to 0 to do nothing. proposal_files (list[str]): if given, a list of object proposal files that match each dataset in `names`. check_consistency (bool): whether to check if datasets have consistent metadata. Returns: list[dict]: a list of dicts following the standard dataset dict format. """ if isinstance(names, str): names = [names] assert len(names), names dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names] for dataset_name, dicts in zip(names, dataset_dicts): assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) if proposal_files is not None: assert len(names) == len(proposal_files) # load precomputed proposals from proposal files dataset_dicts = [ load_proposals_into_dataset(dataset_i_dicts, proposal_file) for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files) ] if isinstance(dataset_dicts[0], torchdata.Dataset): return torchdata.ConcatDataset(dataset_dicts) dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts)) has_instances = "annotations" in dataset_dicts[0] if filter_empty and has_instances: dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts) if min_keypoints > 0 and has_instances: dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints) if check_consistency and has_instances: try: class_names = MetadataCatalog.get(names[0]).thing_classes check_metadata_consistency("thing_classes", names) print_instances_class_histogram(dataset_dicts, class_names) except AttributeError: # class names are not available for this dataset pass assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names)) return dataset_dicts class DatasetMapper: """ A callable which takes a dataset dict in Detectron2 Dataset format, and map it into a format used by the model. This is the default callable to be used to map your dataset dict into training data. You may need to follow it to implement your own one for customized logic, such as a different way to read or transform images. See :doc:`/tutorials/data_loading` for details. The callable currently does the following: 1. Read the image from "file_name" 2. Applies cropping/geometric transforms to the image and annotations 3. Prepare data and annotations to Tensor and :class:`Instances` """ def __init__( self, is_train: bool, *, augmentations: List[Union[T.Augmentation, T.Transform]], image_format: str, use_instance_mask: bool = False, use_keypoint: bool = False, instance_mask_format: str = "polygon", keypoint_hflip_indices: Optional[np.ndarray] = None, precomputed_proposal_topk: Optional[int] = None, recompute_boxes: bool = False, ): """ NOTE: this interface is experimental. Args: is_train: whether it's used in training or inference augmentations: a list of augmentations or deterministic transforms to apply image_format: an image format supported by :func:`detection_utils.read_image`. use_instance_mask: whether to process instance segmentation annotations, if available use_keypoint: whether to process keypoint annotations if available instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation masks into this format. keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices` precomputed_proposal_topk: if given, will load pre-computed proposals from dataset_dict and keep the top k proposals for each image. recompute_boxes: whether to overwrite bounding box annotations by computing tight bounding boxes from instance mask annotations. """ if recompute_boxes: assert use_instance_mask, "recompute_boxes requires instance masks" # fmt: off self.is_train = is_train self.augmentations = T.AugmentationList(augmentations) self.image_format = image_format self.use_instance_mask = use_instance_mask self.instance_mask_format = instance_mask_format self.use_keypoint = use_keypoint self.keypoint_hflip_indices = keypoint_hflip_indices self.proposal_topk = precomputed_proposal_topk self.recompute_boxes = recompute_boxes # fmt: on logger = logging.getLogger(__name__) mode = "training" if is_train else "inference" logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}") def from_config(cls, cfg, is_train: bool = True): augs = utils.build_augmentation(cfg, is_train) if cfg.INPUT.CROP.ENABLED and is_train: augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)) recompute_boxes = cfg.MODEL.MASK_ON else: recompute_boxes = False ret = { "is_train": is_train, "augmentations": augs, "image_format": cfg.INPUT.FORMAT, "use_instance_mask": cfg.MODEL.MASK_ON, "instance_mask_format": cfg.INPUT.MASK_FORMAT, "use_keypoint": cfg.MODEL.KEYPOINT_ON, "recompute_boxes": recompute_boxes, } if cfg.MODEL.KEYPOINT_ON: ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN) if cfg.MODEL.LOAD_PROPOSALS: ret["precomputed_proposal_topk"] = ( cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN if is_train else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST ) return ret def _transform_annotations(self, dataset_dict, transforms, image_shape): # USER: Modify this if you want to keep them for some reason. for anno in dataset_dict["annotations"]: if not self.use_instance_mask: anno.pop("segmentation", None) if not self.use_keypoint: anno.pop("keypoints", None) # USER: Implement additional transformations if you have other types of data annos = [ utils.transform_instance_annotations( obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices ) for obj in dataset_dict.pop("annotations") if obj.get("iscrowd", 0) == 0 ] instances = utils.annotations_to_instances( annos, image_shape, mask_format=self.instance_mask_format ) # After transforms such as cropping are applied, the bounding box may no longer # tightly bound the object. As an example, imagine a triangle object # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to # the intersection of original bounding box and the cropping box. if self.recompute_boxes: instances.gt_boxes = instances.gt_masks.get_bounding_boxes() dataset_dict["instances"] = utils.filter_empty_instances(instances) def __call__(self, dataset_dict): """ Args: dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. Returns: dict: a format that builtin models in detectron2 accept """ dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below # USER: Write your own image loading if it's not from a file image = utils.read_image(dataset_dict["file_name"], format=self.image_format) utils.check_image_size(dataset_dict, image) # USER: Remove if you don't do semantic/panoptic segmentation. if "sem_seg_file_name" in dataset_dict: sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2) else: sem_seg_gt = None aug_input = T.AugInput(image, sem_seg=sem_seg_gt) transforms = self.augmentations(aug_input) image, sem_seg_gt = aug_input.image, aug_input.sem_seg image_shape = image.shape[:2] # h, w # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, # but not efficient on large generic data structures due to the use of pickle & mp.Queue. # Therefore it's important to use torch.Tensor. dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) if sem_seg_gt is not None: dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long")) # USER: Remove if you don't use pre-computed proposals. # Most users would not need this feature. if self.proposal_topk is not None: utils.transform_proposals( dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk ) if not self.is_train: # USER: Modify this if you want to keep them for some reason. dataset_dict.pop("annotations", None) dataset_dict.pop("sem_seg_file_name", None) return dataset_dict if "annotations" in dataset_dict: self._transform_annotations(dataset_dict, transforms, image_shape) return dataset_dict The provided code snippet includes necessary dependencies for implementing the `_test_loader_from_config` function. Write a Python function `def _test_loader_from_config(cfg, dataset_name, mapper=None)` to solve the following problem: Uses the given `dataset_name` argument (instead of the names in cfg), because the standard practice is to evaluate each test set individually (not combining them). Here is the function: def _test_loader_from_config(cfg, dataset_name, mapper=None): """ Uses the given `dataset_name` argument (instead of the names in cfg), because the standard practice is to evaluate each test set individually (not combining them). """ if isinstance(dataset_name, str): dataset_name = [dataset_name] dataset = get_detection_dataset_dicts( dataset_name, filter_empty=False, proposal_files=[ cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)] for x in dataset_name ] if cfg.MODEL.LOAD_PROPOSALS else None, ) if mapper is None: mapper = DatasetMapper(cfg, False) return { "dataset": dataset, "mapper": mapper, "num_workers": cfg.DATALOADER.NUM_WORKERS, "sampler": InferenceSampler(len(dataset)), }
Uses the given `dataset_name` argument (instead of the names in cfg), because the standard practice is to evaluate each test set individually (not combining them).
3,590
import itertools import logging from typing import Dict, List import torch from detectron2.config import configurable from detectron2.layers import ShapeSpec, batched_nms_rotated, cat from detectron2.structures import Instances, RotatedBoxes, pairwise_iou_rotated from detectron2.utils.memory import retry_if_cuda_oom from ..box_regression import Box2BoxTransformRotated from .build import PROPOSAL_GENERATOR_REGISTRY from .proposal_utils import _is_tracing from .rpn import RPN def _is_tracing(): # (fixed in TORCH_VERSION >= 1.9) if torch.jit.is_scripting(): # https://github.com/pytorch/pytorch/issues/47379 return False else: return torch.jit.is_tracing() The provided code snippet includes necessary dependencies for implementing the `find_top_rrpn_proposals` function. Write a Python function `def find_top_rrpn_proposals( proposals, pred_objectness_logits, image_sizes, nms_thresh, pre_nms_topk, post_nms_topk, min_box_size, training, )` to solve the following problem: For each feature map, select the `pre_nms_topk` highest scoring proposals, apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk` highest scoring proposals among all the feature maps if `training` is True, otherwise, returns the highest `post_nms_topk` scoring proposals for each feature map. Args: proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 5). All proposal predictions on the feature maps. pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A). image_sizes (list[tuple]): sizes (h, w) for each image nms_thresh (float): IoU threshold to use for NMS pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS. When RRPN is run on multiple feature maps (as in FPN) this number is per feature map. post_nms_topk (int): number of top k scoring proposals to keep after applying NMS. When RRPN is run on multiple feature maps (as in FPN) this number is total, over all feature maps. min_box_size(float): minimum proposal box side length in pixels (absolute units wrt input images). training (bool): True if proposals are to be used in training, otherwise False. This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..." comment. Returns: proposals (list[Instances]): list of N Instances. The i-th Instances stores post_nms_topk object proposals for image i. Here is the function: def find_top_rrpn_proposals( proposals, pred_objectness_logits, image_sizes, nms_thresh, pre_nms_topk, post_nms_topk, min_box_size, training, ): """ For each feature map, select the `pre_nms_topk` highest scoring proposals, apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk` highest scoring proposals among all the feature maps if `training` is True, otherwise, returns the highest `post_nms_topk` scoring proposals for each feature map. Args: proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 5). All proposal predictions on the feature maps. pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A). image_sizes (list[tuple]): sizes (h, w) for each image nms_thresh (float): IoU threshold to use for NMS pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS. When RRPN is run on multiple feature maps (as in FPN) this number is per feature map. post_nms_topk (int): number of top k scoring proposals to keep after applying NMS. When RRPN is run on multiple feature maps (as in FPN) this number is total, over all feature maps. min_box_size(float): minimum proposal box side length in pixels (absolute units wrt input images). training (bool): True if proposals are to be used in training, otherwise False. This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..." comment. Returns: proposals (list[Instances]): list of N Instances. The i-th Instances stores post_nms_topk object proposals for image i. """ num_images = len(image_sizes) device = proposals[0].device # 1. Select top-k anchor for every level and every image topk_scores = [] # #lvl Tensor, each of shape N x topk topk_proposals = [] level_ids = [] # #lvl Tensor, each of shape (topk,) batch_idx = torch.arange(num_images, device=device) for level_id, proposals_i, logits_i in zip( itertools.count(), proposals, pred_objectness_logits ): Hi_Wi_A = logits_i.shape[1] if isinstance(Hi_Wi_A, torch.Tensor): # it's a tensor in tracing num_proposals_i = torch.clamp(Hi_Wi_A, max=pre_nms_topk) else: num_proposals_i = min(Hi_Wi_A, pre_nms_topk) topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1) # each is N x topk topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 5 topk_proposals.append(topk_proposals_i) topk_scores.append(topk_scores_i) level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device)) # 2. Concat all levels together topk_scores = cat(topk_scores, dim=1) topk_proposals = cat(topk_proposals, dim=1) level_ids = cat(level_ids, dim=0) # 3. For each image, run a per-level NMS, and choose topk results. results = [] for n, image_size in enumerate(image_sizes): boxes = RotatedBoxes(topk_proposals[n]) scores_per_img = topk_scores[n] valid_mask = torch.isfinite(boxes.tensor).all(dim=1) & torch.isfinite(scores_per_img) if not valid_mask.all(): boxes = boxes[valid_mask] scores_per_img = scores_per_img[valid_mask] boxes.clip(image_size) # filter empty boxes keep = boxes.nonempty(threshold=min_box_size) lvl = level_ids if _is_tracing() or keep.sum().item() != len(boxes): boxes, scores_per_img, lvl = (boxes[keep], scores_per_img[keep], level_ids[keep]) keep = batched_nms_rotated(boxes.tensor, scores_per_img, lvl, nms_thresh) # In Detectron1, there was different behavior during training vs. testing. # (https://github.com/facebookresearch/Detectron/issues/459) # During training, topk is over the proposals from *all* images in the training batch. # During testing, it is over the proposals for each image separately. # As a result, the training behavior becomes batch-dependent, # and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size. # This bug is addressed in Detectron2 to make the behavior independent of batch size. keep = keep[:post_nms_topk] res = Instances(image_size) res.proposal_boxes = boxes[keep] res.objectness_logits = scores_per_img[keep] results.append(res) return results
For each feature map, select the `pre_nms_topk` highest scoring proposals, apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk` highest scoring proposals among all the feature maps if `training` is True, otherwise, returns the highest `post_nms_topk` scoring proposals for each feature map. Args: proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 5). All proposal predictions on the feature maps. pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A). image_sizes (list[tuple]): sizes (h, w) for each image nms_thresh (float): IoU threshold to use for NMS pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS. When RRPN is run on multiple feature maps (as in FPN) this number is per feature map. post_nms_topk (int): number of top k scoring proposals to keep after applying NMS. When RRPN is run on multiple feature maps (as in FPN) this number is total, over all feature maps. min_box_size(float): minimum proposal box side length in pixels (absolute units wrt input images). training (bool): True if proposals are to be used in training, otherwise False. This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..." comment. Returns: proposals (list[Instances]): list of N Instances. The i-th Instances stores post_nms_topk object proposals for image i.
3,591
from typing import Dict, List, Optional, Tuple, Union import torch import torch.nn.functional as F from torch import nn from detectron2.config import configurable from detectron2.layers import Conv2d, ShapeSpec, cat from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou from detectron2.utils.events import get_event_storage from detectron2.utils.memory import retry_if_cuda_oom from detectron2.utils.registry import Registry from ..anchor_generator import build_anchor_generator from ..box_regression import Box2BoxTransform, _dense_box_regression_loss from ..matcher import Matcher from ..sampling import subsample_labels from .build import PROPOSAL_GENERATOR_REGISTRY from .proposal_utils import find_top_rpn_proposals RPN_HEAD_REGISTRY = Registry("RPN_HEAD") RPN_HEAD_REGISTRY.__doc__ = """ Registry for RPN heads, which take feature maps and perform objectness classification and bounding box regression for anchors. The registered object will be called with `obj(cfg, input_shape)`. The call should return a `nn.Module` object. """ class RPN(nn.Module): """ Region Proposal Network, introduced by :paper:`Faster R-CNN`. """ def __init__( self, *, in_features: List[str], head: nn.Module, anchor_generator: nn.Module, anchor_matcher: Matcher, box2box_transform: Box2BoxTransform, batch_size_per_image: int, positive_fraction: float, pre_nms_topk: Tuple[float, float], post_nms_topk: Tuple[float, float], nms_thresh: float = 0.7, min_box_size: float = 0.0, anchor_boundary_thresh: float = -1.0, loss_weight: Union[float, Dict[str, float]] = 1.0, box_reg_loss_type: str = "smooth_l1", smooth_l1_beta: float = 0.0, ): """ NOTE: this interface is experimental. Args: in_features (list[str]): list of names of input features to use head (nn.Module): a module that predicts logits and regression deltas for each level from a list of per-level features anchor_generator (nn.Module): a module that creates anchors from a list of features. Usually an instance of :class:`AnchorGenerator` anchor_matcher (Matcher): label the anchors by matching them with ground truth. box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to instance boxes batch_size_per_image (int): number of anchors per image to sample for training positive_fraction (float): fraction of foreground anchors to sample for training pre_nms_topk (tuple[float]): (train, test) that represents the number of top k proposals to select before NMS, in training and testing. post_nms_topk (tuple[float]): (train, test) that represents the number of top k proposals to select after NMS, in training and testing. nms_thresh (float): NMS threshold used to de-duplicate the predicted proposals min_box_size (float): remove proposal boxes with any side smaller than this threshold, in the unit of input image pixels anchor_boundary_thresh (float): legacy option loss_weight (float|dict): weights to use for losses. Can be single float for weighting all rpn losses together, or a dict of individual weightings. Valid dict keys are: "loss_rpn_cls" - applied to classification loss "loss_rpn_loc" - applied to box regression loss box_reg_loss_type (str): Loss type to use. Supported losses: "smooth_l1", "giou". smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to use L1 loss. Only used when `box_reg_loss_type` is "smooth_l1" """ super().__init__() self.in_features = in_features self.rpn_head = head self.anchor_generator = anchor_generator self.anchor_matcher = anchor_matcher self.box2box_transform = box2box_transform self.batch_size_per_image = batch_size_per_image self.positive_fraction = positive_fraction # Map from self.training state to train/test settings self.pre_nms_topk = {True: pre_nms_topk[0], False: pre_nms_topk[1]} self.post_nms_topk = {True: post_nms_topk[0], False: post_nms_topk[1]} self.nms_thresh = nms_thresh self.min_box_size = float(min_box_size) self.anchor_boundary_thresh = anchor_boundary_thresh if isinstance(loss_weight, float): loss_weight = {"loss_rpn_cls": loss_weight, "loss_rpn_loc": loss_weight} self.loss_weight = loss_weight self.box_reg_loss_type = box_reg_loss_type self.smooth_l1_beta = smooth_l1_beta def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): in_features = cfg.MODEL.RPN.IN_FEATURES ret = { "in_features": in_features, "min_box_size": cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE, "nms_thresh": cfg.MODEL.RPN.NMS_THRESH, "batch_size_per_image": cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE, "positive_fraction": cfg.MODEL.RPN.POSITIVE_FRACTION, "loss_weight": { "loss_rpn_cls": cfg.MODEL.RPN.LOSS_WEIGHT, "loss_rpn_loc": cfg.MODEL.RPN.BBOX_REG_LOSS_WEIGHT * cfg.MODEL.RPN.LOSS_WEIGHT, }, "anchor_boundary_thresh": cfg.MODEL.RPN.BOUNDARY_THRESH, "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS), "box_reg_loss_type": cfg.MODEL.RPN.BBOX_REG_LOSS_TYPE, "smooth_l1_beta": cfg.MODEL.RPN.SMOOTH_L1_BETA, } ret["pre_nms_topk"] = (cfg.MODEL.RPN.PRE_NMS_TOPK_TRAIN, cfg.MODEL.RPN.PRE_NMS_TOPK_TEST) ret["post_nms_topk"] = (cfg.MODEL.RPN.POST_NMS_TOPK_TRAIN, cfg.MODEL.RPN.POST_NMS_TOPK_TEST) ret["anchor_generator"] = build_anchor_generator(cfg, [input_shape[f] for f in in_features]) ret["anchor_matcher"] = Matcher( cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS, allow_low_quality_matches=True ) ret["head"] = build_rpn_head(cfg, [input_shape[f] for f in in_features]) return ret def _subsample_labels(self, label): """ Randomly sample a subset of positive and negative examples, and overwrite the label vector to the ignore value (-1) for all elements that are not included in the sample. Args: labels (Tensor): a vector of -1, 0, 1. Will be modified in-place and returned. """ pos_idx, neg_idx = subsample_labels( label, self.batch_size_per_image, self.positive_fraction, 0 ) # Fill with the ignore label (-1), then set positive and negative labels label.fill_(-1) label.scatter_(0, pos_idx, 1) label.scatter_(0, neg_idx, 0) return label def label_and_sample_anchors( self, anchors: List[Boxes], gt_instances: List[Instances] ) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: """ Args: anchors (list[Boxes]): anchors for each feature map. gt_instances: the ground-truth instances for each image. Returns: list[Tensor]: List of #img tensors. i-th element is a vector of labels whose length is the total number of anchors across all feature maps R = sum(Hi * Wi * A). Label values are in {-1, 0, 1}, with meanings: -1 = ignore; 0 = negative class; 1 = positive class. list[Tensor]: i-th element is a Rx4 tensor. The values are the matched gt boxes for each anchor. Values are undefined for those anchors not labeled as 1. """ anchors = Boxes.cat(anchors) gt_boxes = [x.gt_boxes for x in gt_instances] image_sizes = [x.image_size for x in gt_instances] del gt_instances gt_labels = [] matched_gt_boxes = [] for image_size_i, gt_boxes_i in zip(image_sizes, gt_boxes): """ image_size_i: (h, w) for the i-th image gt_boxes_i: ground-truth boxes for i-th image """ match_quality_matrix = retry_if_cuda_oom(pairwise_iou)(gt_boxes_i, anchors) matched_idxs, gt_labels_i = retry_if_cuda_oom(self.anchor_matcher)(match_quality_matrix) # Matching is memory-expensive and may result in CPU tensors. But the result is small gt_labels_i = gt_labels_i.to(device=gt_boxes_i.device) del match_quality_matrix if self.anchor_boundary_thresh >= 0: # Discard anchors that go out of the boundaries of the image # NOTE: This is legacy functionality that is turned off by default in Detectron2 anchors_inside_image = anchors.inside_box(image_size_i, self.anchor_boundary_thresh) gt_labels_i[~anchors_inside_image] = -1 # A vector of labels (-1, 0, 1) for each anchor gt_labels_i = self._subsample_labels(gt_labels_i) if len(gt_boxes_i) == 0: # These values won't be used anyway since the anchor is labeled as background matched_gt_boxes_i = torch.zeros_like(anchors.tensor) else: # TODO wasted indexing computation for ignored boxes matched_gt_boxes_i = gt_boxes_i[matched_idxs].tensor gt_labels.append(gt_labels_i) # N,AHW matched_gt_boxes.append(matched_gt_boxes_i) return gt_labels, matched_gt_boxes def losses( self, anchors: List[Boxes], pred_objectness_logits: List[torch.Tensor], gt_labels: List[torch.Tensor], pred_anchor_deltas: List[torch.Tensor], gt_boxes: List[torch.Tensor], ) -> Dict[str, torch.Tensor]: """ Return the losses from a set of RPN predictions and their associated ground-truth. Args: anchors (list[Boxes or RotatedBoxes]): anchors for each feature map, each has shape (Hi*Wi*A, B), where B is box dimension (4 or 5). pred_objectness_logits (list[Tensor]): A list of L elements. Element i is a tensor of shape (N, Hi*Wi*A) representing the predicted objectness logits for all anchors. gt_labels (list[Tensor]): Output of :meth:`label_and_sample_anchors`. pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape (N, Hi*Wi*A, 4 or 5) representing the predicted "deltas" used to transform anchors to proposals. gt_boxes (list[Tensor]): Output of :meth:`label_and_sample_anchors`. Returns: dict[loss name -> loss value]: A dict mapping from loss name to loss value. Loss names are: `loss_rpn_cls` for objectness classification and `loss_rpn_loc` for proposal localization. """ num_images = len(gt_labels) gt_labels = torch.stack(gt_labels) # (N, sum(Hi*Wi*Ai)) # Log the number of positive/negative anchors per-image that's used in training pos_mask = gt_labels == 1 num_pos_anchors = pos_mask.sum().item() num_neg_anchors = (gt_labels == 0).sum().item() storage = get_event_storage() storage.put_scalar("rpn/num_pos_anchors", num_pos_anchors / num_images) storage.put_scalar("rpn/num_neg_anchors", num_neg_anchors / num_images) localization_loss = _dense_box_regression_loss( anchors, self.box2box_transform, pred_anchor_deltas, gt_boxes, pos_mask, box_reg_loss_type=self.box_reg_loss_type, smooth_l1_beta=self.smooth_l1_beta, ) valid_mask = gt_labels >= 0 objectness_loss = F.binary_cross_entropy_with_logits( cat(pred_objectness_logits, dim=1)[valid_mask], gt_labels[valid_mask].to(torch.float32), reduction="sum", ) normalizer = self.batch_size_per_image * num_images losses = { "loss_rpn_cls": objectness_loss / normalizer, # The original Faster R-CNN paper uses a slightly different normalizer # for loc loss. But it doesn't matter in practice "loss_rpn_loc": localization_loss / normalizer, } losses = {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()} return losses def forward( self, images: ImageList, features: Dict[str, torch.Tensor], gt_instances: Optional[List[Instances]] = None, ): """ Args: images (ImageList): input images of length `N` features (dict[str, Tensor]): input data as a mapping from feature map name to tensor. Axis 0 represents the number of images `N` in the input data; axes 1-3 are channels, height, and width, which may vary between feature maps (e.g., if a feature pyramid is used). gt_instances (list[Instances], optional): a length `N` list of `Instances`s. Each `Instances` stores ground-truth instances for the corresponding image. Returns: proposals: list[Instances]: contains fields "proposal_boxes", "objectness_logits" loss: dict[Tensor] or None """ features = [features[f] for f in self.in_features] anchors = self.anchor_generator(features) pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features) # Transpose the Hi*Wi*A dimension to the middle: pred_objectness_logits = [ # (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A) score.permute(0, 2, 3, 1).flatten(1) for score in pred_objectness_logits ] pred_anchor_deltas = [ # (N, A*B, Hi, Wi) -> (N, A, B, Hi, Wi) -> (N, Hi, Wi, A, B) -> (N, Hi*Wi*A, B) x.view(x.shape[0], -1, self.anchor_generator.box_dim, x.shape[-2], x.shape[-1]) .permute(0, 3, 4, 1, 2) .flatten(1, -2) for x in pred_anchor_deltas ] if self.training: assert gt_instances is not None, "RPN requires gt_instances in training!" gt_labels, gt_boxes = self.label_and_sample_anchors(anchors, gt_instances) losses = self.losses( anchors, pred_objectness_logits, gt_labels, pred_anchor_deltas, gt_boxes ) else: losses = {} proposals = self.predict_proposals( anchors, pred_objectness_logits, pred_anchor_deltas, images.image_sizes ) return proposals, losses def predict_proposals( self, anchors: List[Boxes], pred_objectness_logits: List[torch.Tensor], pred_anchor_deltas: List[torch.Tensor], image_sizes: List[Tuple[int, int]], ): """ Decode all the predicted box regression deltas to proposals. Find the top proposals by applying NMS and removing boxes that are too small. Returns: proposals (list[Instances]): list of N Instances. The i-th Instances stores post_nms_topk object proposals for image i, sorted by their objectness score in descending order. """ # The proposals are treated as fixed for joint training with roi heads. # This approach ignores the derivative w.r.t. the proposal boxes’ coordinates that # are also network responses. with torch.no_grad(): pred_proposals = self._decode_proposals(anchors, pred_anchor_deltas) return find_top_rpn_proposals( pred_proposals, pred_objectness_logits, image_sizes, self.nms_thresh, self.pre_nms_topk[self.training], self.post_nms_topk[self.training], self.min_box_size, self.training, ) def _decode_proposals(self, anchors: List[Boxes], pred_anchor_deltas: List[torch.Tensor]): """ Transform anchors into proposals by applying the predicted anchor deltas. Returns: proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, B) """ N = pred_anchor_deltas[0].shape[0] proposals = [] # For each feature map for anchors_i, pred_anchor_deltas_i in zip(anchors, pred_anchor_deltas): B = anchors_i.tensor.size(1) pred_anchor_deltas_i = pred_anchor_deltas_i.reshape(-1, B) # Expand anchors to shape (N*Hi*Wi*A, B) anchors_i = anchors_i.tensor.unsqueeze(0).expand(N, -1, -1).reshape(-1, B) proposals_i = self.box2box_transform.apply_deltas(pred_anchor_deltas_i, anchors_i) # Append feature map proposals with shape (N, Hi*Wi*A, B) proposals.append(proposals_i.view(N, -1, B)) return proposals The provided code snippet includes necessary dependencies for implementing the `build_rpn_head` function. Write a Python function `def build_rpn_head(cfg, input_shape)` to solve the following problem: Build an RPN head defined by `cfg.MODEL.RPN.HEAD_NAME`. Here is the function: def build_rpn_head(cfg, input_shape): """ Build an RPN head defined by `cfg.MODEL.RPN.HEAD_NAME`. """ name = cfg.MODEL.RPN.HEAD_NAME return RPN_HEAD_REGISTRY.get(name)(cfg, input_shape)
Build an RPN head defined by `cfg.MODEL.RPN.HEAD_NAME`.
3,592
import logging import math from typing import List, Tuple, Union import torch from detectron2.layers import batched_nms, cat from detectron2.structures import Boxes, Instances def _is_tracing(): # (fixed in TORCH_VERSION >= 1.9) if torch.jit.is_scripting(): # https://github.com/pytorch/pytorch/issues/47379 return False else: return torch.jit.is_tracing() The provided code snippet includes necessary dependencies for implementing the `find_top_rpn_proposals` function. Write a Python function `def find_top_rpn_proposals( proposals: List[torch.Tensor], pred_objectness_logits: List[torch.Tensor], image_sizes: List[Tuple[int, int]], nms_thresh: float, pre_nms_topk: int, post_nms_topk: int, min_box_size: float, training: bool, )` to solve the following problem: For each feature map, select the `pre_nms_topk` highest scoring proposals, apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk` highest scoring proposals among all the feature maps for each image. Args: proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 4). All proposal predictions on the feature maps. pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A). image_sizes (list[tuple]): sizes (h, w) for each image nms_thresh (float): IoU threshold to use for NMS pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS. When RPN is run on multiple feature maps (as in FPN) this number is per feature map. post_nms_topk (int): number of top k scoring proposals to keep after applying NMS. When RPN is run on multiple feature maps (as in FPN) this number is total, over all feature maps. min_box_size (float): minimum proposal box side length in pixels (absolute units wrt input images). training (bool): True if proposals are to be used in training, otherwise False. This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..." comment. Returns: list[Instances]: list of N Instances. The i-th Instances stores post_nms_topk object proposals for image i, sorted by their objectness score in descending order. Here is the function: def find_top_rpn_proposals( proposals: List[torch.Tensor], pred_objectness_logits: List[torch.Tensor], image_sizes: List[Tuple[int, int]], nms_thresh: float, pre_nms_topk: int, post_nms_topk: int, min_box_size: float, training: bool, ): """ For each feature map, select the `pre_nms_topk` highest scoring proposals, apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk` highest scoring proposals among all the feature maps for each image. Args: proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 4). All proposal predictions on the feature maps. pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A). image_sizes (list[tuple]): sizes (h, w) for each image nms_thresh (float): IoU threshold to use for NMS pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS. When RPN is run on multiple feature maps (as in FPN) this number is per feature map. post_nms_topk (int): number of top k scoring proposals to keep after applying NMS. When RPN is run on multiple feature maps (as in FPN) this number is total, over all feature maps. min_box_size (float): minimum proposal box side length in pixels (absolute units wrt input images). training (bool): True if proposals are to be used in training, otherwise False. This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..." comment. Returns: list[Instances]: list of N Instances. The i-th Instances stores post_nms_topk object proposals for image i, sorted by their objectness score in descending order. """ num_images = len(image_sizes) device = proposals[0].device # 1. Select top-k anchor for every level and every image topk_scores = [] # #lvl Tensor, each of shape N x topk topk_proposals = [] level_ids = [] # #lvl Tensor, each of shape (topk,) batch_idx = torch.arange(num_images, device=device) for level_id, (proposals_i, logits_i) in enumerate(zip(proposals, pred_objectness_logits)): Hi_Wi_A = logits_i.shape[1] if isinstance(Hi_Wi_A, torch.Tensor): # it's a tensor in tracing num_proposals_i = torch.clamp(Hi_Wi_A, max=pre_nms_topk) else: num_proposals_i = min(Hi_Wi_A, pre_nms_topk) topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1) # each is N x topk topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 4 topk_proposals.append(topk_proposals_i) topk_scores.append(topk_scores_i) level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device)) # 2. Concat all levels together topk_scores = cat(topk_scores, dim=1) topk_proposals = cat(topk_proposals, dim=1) level_ids = cat(level_ids, dim=0) # 3. For each image, run a per-level NMS, and choose topk results. results: List[Instances] = [] for n, image_size in enumerate(image_sizes): boxes = Boxes(topk_proposals[n]) scores_per_img = topk_scores[n] lvl = level_ids valid_mask = torch.isfinite(boxes.tensor).all(dim=1) & torch.isfinite(scores_per_img) if not valid_mask.all(): if training: raise FloatingPointError( "Predicted boxes or scores contain Inf/NaN. Training has diverged." ) boxes = boxes[valid_mask] scores_per_img = scores_per_img[valid_mask] lvl = lvl[valid_mask] boxes.clip(image_size) # filter empty boxes keep = boxes.nonempty(threshold=min_box_size) if _is_tracing() or keep.sum().item() != len(boxes): boxes, scores_per_img, lvl = boxes[keep], scores_per_img[keep], lvl[keep] keep = batched_nms(boxes.tensor, scores_per_img, lvl, nms_thresh) # In Detectron1, there was different behavior during training vs. testing. # (https://github.com/facebookresearch/Detectron/issues/459) # During training, topk is over the proposals from *all* images in the training batch. # During testing, it is over the proposals for each image separately. # As a result, the training behavior becomes batch-dependent, # and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size. # This bug is addressed in Detectron2 to make the behavior independent of batch size. keep = keep[:post_nms_topk] # keep is already sorted res = Instances(image_size) res.proposal_boxes = boxes[keep] res.objectness_logits = scores_per_img[keep] results.append(res) return results
For each feature map, select the `pre_nms_topk` highest scoring proposals, apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk` highest scoring proposals among all the feature maps for each image. Args: proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 4). All proposal predictions on the feature maps. pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A). image_sizes (list[tuple]): sizes (h, w) for each image nms_thresh (float): IoU threshold to use for NMS pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS. When RPN is run on multiple feature maps (as in FPN) this number is per feature map. post_nms_topk (int): number of top k scoring proposals to keep after applying NMS. When RPN is run on multiple feature maps (as in FPN) this number is total, over all feature maps. min_box_size (float): minimum proposal box side length in pixels (absolute units wrt input images). training (bool): True if proposals are to be used in training, otherwise False. This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..." comment. Returns: list[Instances]: list of N Instances. The i-th Instances stores post_nms_topk object proposals for image i, sorted by their objectness score in descending order.
3,593
import logging import math from typing import List, Tuple, Union import torch from detectron2.layers import batched_nms, cat from detectron2.structures import Boxes, Instances def add_ground_truth_to_proposals_single_image( gt: Union[Instances, Boxes], proposals: Instances ) -> Instances: """ Augment `proposals` with `gt`. Args: Same as `add_ground_truth_to_proposals`, but with gt and proposals per image. Returns: Same as `add_ground_truth_to_proposals`, but for only one image. """ if isinstance(gt, Boxes): # convert Boxes to Instances gt = Instances(proposals.image_size, gt_boxes=gt) gt_boxes = gt.gt_boxes device = proposals.objectness_logits.device # Assign all ground-truth boxes an objectness logit corresponding to # P(object) = sigmoid(logit) =~ 1. gt_logit_value = math.log((1.0 - 1e-10) / (1 - (1.0 - 1e-10))) gt_logits = gt_logit_value * torch.ones(len(gt_boxes), device=device) # Concatenating gt_boxes with proposals requires them to have the same fields gt_proposal = Instances(proposals.image_size, **gt.get_fields()) gt_proposal.proposal_boxes = gt_boxes gt_proposal.objectness_logits = gt_logits for key in proposals.get_fields().keys(): assert gt_proposal.has( key ), "The attribute '{}' in `proposals` does not exist in `gt`".format(key) # NOTE: Instances.cat only use fields from the first item. Extra fields in latter items # will be thrown away. new_proposals = Instances.cat([proposals, gt_proposal]) return new_proposals The provided code snippet includes necessary dependencies for implementing the `add_ground_truth_to_proposals` function. Write a Python function `def add_ground_truth_to_proposals( gt: Union[List[Instances], List[Boxes]], proposals: List[Instances] ) -> List[Instances]` to solve the following problem: Call `add_ground_truth_to_proposals_single_image` for all images. Args: gt(Union[List[Instances], List[Boxes]): list of N elements. Element i is a Instances representing the ground-truth for image i. proposals (list[Instances]): list of N elements. Element i is a Instances representing the proposals for image i. Returns: list[Instances]: list of N Instances. Each is the proposals for the image, with field "proposal_boxes" and "objectness_logits". Here is the function: def add_ground_truth_to_proposals( gt: Union[List[Instances], List[Boxes]], proposals: List[Instances] ) -> List[Instances]: """ Call `add_ground_truth_to_proposals_single_image` for all images. Args: gt(Union[List[Instances], List[Boxes]): list of N elements. Element i is a Instances representing the ground-truth for image i. proposals (list[Instances]): list of N elements. Element i is a Instances representing the proposals for image i. Returns: list[Instances]: list of N Instances. Each is the proposals for the image, with field "proposal_boxes" and "objectness_logits". """ assert gt is not None if len(proposals) != len(gt): raise ValueError("proposals and gt should have the same length as the number of images!") if len(proposals) == 0: return proposals return [ add_ground_truth_to_proposals_single_image(gt_i, proposals_i) for gt_i, proposals_i in zip(gt, proposals) ]
Call `add_ground_truth_to_proposals_single_image` for all images. Args: gt(Union[List[Instances], List[Boxes]): list of N elements. Element i is a Instances representing the ground-truth for image i. proposals (list[Instances]): list of N elements. Element i is a Instances representing the proposals for image i. Returns: list[Instances]: list of N Instances. Each is the proposals for the image, with field "proposal_boxes" and "objectness_logits".
3,594
from detectron2.utils.registry import Registry PROPOSAL_GENERATOR_REGISTRY = Registry("PROPOSAL_GENERATOR") PROPOSAL_GENERATOR_REGISTRY.__doc__ = """ Registry for proposal generator, which produces object proposals from feature maps. The registered object will be called with `obj(cfg, input_shape)`. The call should return a `nn.Module` object. """ from . import rpn, rrpn The provided code snippet includes necessary dependencies for implementing the `build_proposal_generator` function. Write a Python function `def build_proposal_generator(cfg, input_shape)` to solve the following problem: Build a proposal generator from `cfg.MODEL.PROPOSAL_GENERATOR.NAME`. The name can be "PrecomputedProposals" to use no proposal generator. Here is the function: def build_proposal_generator(cfg, input_shape): """ Build a proposal generator from `cfg.MODEL.PROPOSAL_GENERATOR.NAME`. The name can be "PrecomputedProposals" to use no proposal generator. """ name = cfg.MODEL.PROPOSAL_GENERATOR.NAME if name == "PrecomputedProposals": return None return PROPOSAL_GENERATOR_REGISTRY.get(name)(cfg, input_shape)
Build a proposal generator from `cfg.MODEL.PROPOSAL_GENERATOR.NAME`. The name can be "PrecomputedProposals" to use no proposal generator.
3,595
import math from typing import List, Tuple, Union import torch from fvcore.nn import giou_loss, smooth_l1_loss from torch.nn import functional as F from detectron2.layers import cat, ciou_loss, diou_loss from detectron2.structures import Boxes class Box2BoxTransform(object): """ The box-to-box transform defined in R-CNN. The transformation is parameterized by 4 deltas: (dx, dy, dw, dh). The transformation scales the box's width and height by exp(dw), exp(dh) and shifts a box's center by the offset (dx * width, dy * height). """ def __init__( self, weights: Tuple[float, float, float, float], scale_clamp: float = _DEFAULT_SCALE_CLAMP ): """ Args: weights (4-element tuple): Scaling factors that are applied to the (dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set such that the deltas have unit variance; now they are treated as hyperparameters of the system. scale_clamp (float): When predicting deltas, the predicted box scaling factors (dw and dh) are clamped such that they are <= scale_clamp. """ self.weights = weights self.scale_clamp = scale_clamp def get_deltas(self, src_boxes, target_boxes): """ Get box regression transformation deltas (dx, dy, dw, dh) that can be used to transform the `src_boxes` into the `target_boxes`. That is, the relation ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless any delta is too large and is clamped). Args: src_boxes (Tensor): source boxes, e.g., object proposals target_boxes (Tensor): target of the transformation, e.g., ground-truth boxes. """ assert isinstance(src_boxes, torch.Tensor), type(src_boxes) assert isinstance(target_boxes, torch.Tensor), type(target_boxes) src_widths = src_boxes[:, 2] - src_boxes[:, 0] src_heights = src_boxes[:, 3] - src_boxes[:, 1] src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights target_widths = target_boxes[:, 2] - target_boxes[:, 0] target_heights = target_boxes[:, 3] - target_boxes[:, 1] target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights wx, wy, ww, wh = self.weights dx = wx * (target_ctr_x - src_ctr_x) / src_widths dy = wy * (target_ctr_y - src_ctr_y) / src_heights dw = ww * torch.log(target_widths / src_widths) dh = wh * torch.log(target_heights / src_heights) deltas = torch.stack((dx, dy, dw, dh), dim=1) assert (src_widths > 0).all().item(), "Input boxes to Box2BoxTransform are not valid!" return deltas def apply_deltas(self, deltas, boxes): """ Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`. Args: deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1. deltas[i] represents k potentially different class-specific box transformations for the single box boxes[i]. boxes (Tensor): boxes to transform, of shape (N, 4) """ deltas = deltas.float() # ensure fp32 for decoding precision boxes = boxes.to(deltas.dtype) widths = boxes[:, 2] - boxes[:, 0] heights = boxes[:, 3] - boxes[:, 1] ctr_x = boxes[:, 0] + 0.5 * widths ctr_y = boxes[:, 1] + 0.5 * heights wx, wy, ww, wh = self.weights dx = deltas[:, 0::4] / wx dy = deltas[:, 1::4] / wy dw = deltas[:, 2::4] / ww dh = deltas[:, 3::4] / wh # Prevent sending too large values into torch.exp() dw = torch.clamp(dw, max=self.scale_clamp) dh = torch.clamp(dh, max=self.scale_clamp) pred_ctr_x = dx * widths[:, None] + ctr_x[:, None] pred_ctr_y = dy * heights[:, None] + ctr_y[:, None] pred_w = torch.exp(dw) * widths[:, None] pred_h = torch.exp(dh) * heights[:, None] x1 = pred_ctr_x - 0.5 * pred_w y1 = pred_ctr_y - 0.5 * pred_h x2 = pred_ctr_x + 0.5 * pred_w y2 = pred_ctr_y + 0.5 * pred_h pred_boxes = torch.stack((x1, y1, x2, y2), dim=-1) return pred_boxes.reshape(deltas.shape) The provided code snippet includes necessary dependencies for implementing the `_dense_box_regression_loss` function. Write a Python function `def _dense_box_regression_loss( anchors: List[Union[Boxes, torch.Tensor]], box2box_transform: Box2BoxTransform, pred_anchor_deltas: List[torch.Tensor], gt_boxes: List[torch.Tensor], fg_mask: torch.Tensor, box_reg_loss_type="smooth_l1", smooth_l1_beta=0.0, )` to solve the following problem: Compute loss for dense multi-level box regression. Loss is accumulated over ``fg_mask``. Args: anchors: #lvl anchor boxes, each is (HixWixA, 4) pred_anchor_deltas: #lvl predictions, each is (N, HixWixA, 4) gt_boxes: N ground truth boxes, each has shape (R, 4) (R = sum(Hi * Wi * A)) fg_mask: the foreground boolean mask of shape (N, R) to compute loss on box_reg_loss_type (str): Loss type to use. Supported losses: "smooth_l1", "giou", "diou", "ciou". smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to use L1 loss. Only used when `box_reg_loss_type` is "smooth_l1" Here is the function: def _dense_box_regression_loss( anchors: List[Union[Boxes, torch.Tensor]], box2box_transform: Box2BoxTransform, pred_anchor_deltas: List[torch.Tensor], gt_boxes: List[torch.Tensor], fg_mask: torch.Tensor, box_reg_loss_type="smooth_l1", smooth_l1_beta=0.0, ): """ Compute loss for dense multi-level box regression. Loss is accumulated over ``fg_mask``. Args: anchors: #lvl anchor boxes, each is (HixWixA, 4) pred_anchor_deltas: #lvl predictions, each is (N, HixWixA, 4) gt_boxes: N ground truth boxes, each has shape (R, 4) (R = sum(Hi * Wi * A)) fg_mask: the foreground boolean mask of shape (N, R) to compute loss on box_reg_loss_type (str): Loss type to use. Supported losses: "smooth_l1", "giou", "diou", "ciou". smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to use L1 loss. Only used when `box_reg_loss_type` is "smooth_l1" """ if isinstance(anchors[0], Boxes): anchors = type(anchors[0]).cat(anchors).tensor # (R, 4) else: anchors = cat(anchors) if box_reg_loss_type == "smooth_l1": gt_anchor_deltas = [box2box_transform.get_deltas(anchors, k) for k in gt_boxes] gt_anchor_deltas = torch.stack(gt_anchor_deltas) # (N, R, 4) loss_box_reg = smooth_l1_loss( cat(pred_anchor_deltas, dim=1)[fg_mask], gt_anchor_deltas[fg_mask], beta=smooth_l1_beta, reduction="sum", ) elif box_reg_loss_type == "giou": pred_boxes = [ box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1) ] loss_box_reg = giou_loss( torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction="sum" ) elif box_reg_loss_type == "diou": pred_boxes = [ box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1) ] loss_box_reg = diou_loss( torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction="sum" ) elif box_reg_loss_type == "ciou": pred_boxes = [ box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1) ] loss_box_reg = ciou_loss( torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction="sum" ) else: raise ValueError(f"Invalid dense box regression loss type '{box_reg_loss_type}'") return loss_box_reg
Compute loss for dense multi-level box regression. Loss is accumulated over ``fg_mask``. Args: anchors: #lvl anchor boxes, each is (HixWixA, 4) pred_anchor_deltas: #lvl predictions, each is (N, HixWixA, 4) gt_boxes: N ground truth boxes, each has shape (R, 4) (R = sum(Hi * Wi * A)) fg_mask: the foreground boolean mask of shape (N, R) to compute loss on box_reg_loss_type (str): Loss type to use. Supported losses: "smooth_l1", "giou", "diou", "ciou". smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to use L1 loss. Only used when `box_reg_loss_type` is "smooth_l1"
3,596
import torch from torch.nn import functional as F from detectron2.structures import Instances, ROIMasks The provided code snippet includes necessary dependencies for implementing the `sem_seg_postprocess` function. Write a Python function `def sem_seg_postprocess(result, img_size, output_height, output_width)` to solve the following problem: Return semantic segmentation predictions in the original resolution. The input images are often resized when entering semantic segmentor. Moreover, in same cases, they also padded inside segmentor to be divisible by maximum network stride. As a result, we often need the predictions of the segmentor in a different resolution from its inputs. Args: result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W), where C is the number of classes, and H, W are the height and width of the prediction. img_size (tuple): image size that segmentor is taking as input. output_height, output_width: the desired output resolution. Returns: semantic segmentation prediction (Tensor): A tensor of the shape (C, output_height, output_width) that contains per-pixel soft predictions. Here is the function: def sem_seg_postprocess(result, img_size, output_height, output_width): """ Return semantic segmentation predictions in the original resolution. The input images are often resized when entering semantic segmentor. Moreover, in same cases, they also padded inside segmentor to be divisible by maximum network stride. As a result, we often need the predictions of the segmentor in a different resolution from its inputs. Args: result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W), where C is the number of classes, and H, W are the height and width of the prediction. img_size (tuple): image size that segmentor is taking as input. output_height, output_width: the desired output resolution. Returns: semantic segmentation prediction (Tensor): A tensor of the shape (C, output_height, output_width) that contains per-pixel soft predictions. """ result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1) result = F.interpolate( result, size=(output_height, output_width), mode="bilinear", align_corners=False )[0] return result
Return semantic segmentation predictions in the original resolution. The input images are often resized when entering semantic segmentor. Moreover, in same cases, they also padded inside segmentor to be divisible by maximum network stride. As a result, we often need the predictions of the segmentor in a different resolution from its inputs. Args: result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W), where C is the number of classes, and H, W are the height and width of the prediction. img_size (tuple): image size that segmentor is taking as input. output_height, output_width: the desired output resolution. Returns: semantic segmentation prediction (Tensor): A tensor of the shape (C, output_height, output_width) that contains per-pixel soft predictions.
3,597
import torch from detectron2.layers import nonzero_tuple The provided code snippet includes necessary dependencies for implementing the `subsample_labels` function. Write a Python function `def subsample_labels( labels: torch.Tensor, num_samples: int, positive_fraction: float, bg_label: int )` to solve the following problem: Return `num_samples` (or fewer, if not enough found) random samples from `labels` which is a mixture of positives & negatives. It will try to return as many positives as possible without exceeding `positive_fraction * num_samples`, and then try to fill the remaining slots with negatives. Args: labels (Tensor): (N, ) label vector with values: * -1: ignore * bg_label: background ("negative") class * otherwise: one or more foreground ("positive") classes num_samples (int): The total number of labels with value >= 0 to return. Values that are not sampled will be filled with -1 (ignore). positive_fraction (float): The number of subsampled labels with values > 0 is `min(num_positives, int(positive_fraction * num_samples))`. The number of negatives sampled is `min(num_negatives, num_samples - num_positives_sampled)`. In order words, if there are not enough positives, the sample is filled with negatives. If there are also not enough negatives, then as many elements are sampled as is possible. bg_label (int): label index of background ("negative") class. Returns: pos_idx, neg_idx (Tensor): 1D vector of indices. The total length of both is `num_samples` or fewer. Here is the function: def subsample_labels( labels: torch.Tensor, num_samples: int, positive_fraction: float, bg_label: int ): """ Return `num_samples` (or fewer, if not enough found) random samples from `labels` which is a mixture of positives & negatives. It will try to return as many positives as possible without exceeding `positive_fraction * num_samples`, and then try to fill the remaining slots with negatives. Args: labels (Tensor): (N, ) label vector with values: * -1: ignore * bg_label: background ("negative") class * otherwise: one or more foreground ("positive") classes num_samples (int): The total number of labels with value >= 0 to return. Values that are not sampled will be filled with -1 (ignore). positive_fraction (float): The number of subsampled labels with values > 0 is `min(num_positives, int(positive_fraction * num_samples))`. The number of negatives sampled is `min(num_negatives, num_samples - num_positives_sampled)`. In order words, if there are not enough positives, the sample is filled with negatives. If there are also not enough negatives, then as many elements are sampled as is possible. bg_label (int): label index of background ("negative") class. Returns: pos_idx, neg_idx (Tensor): 1D vector of indices. The total length of both is `num_samples` or fewer. """ positive = nonzero_tuple((labels != -1) & (labels != bg_label))[0] negative = nonzero_tuple(labels == bg_label)[0] num_pos = int(num_samples * positive_fraction) # protect against not enough positive examples num_pos = min(positive.numel(), num_pos) num_neg = num_samples - num_pos # protect against not enough negative examples num_neg = min(negative.numel(), num_neg) # randomly select positive and negative examples perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos] perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg] pos_idx = positive[perm1] neg_idx = negative[perm2] return pos_idx, neg_idx
Return `num_samples` (or fewer, if not enough found) random samples from `labels` which is a mixture of positives & negatives. It will try to return as many positives as possible without exceeding `positive_fraction * num_samples`, and then try to fill the remaining slots with negatives. Args: labels (Tensor): (N, ) label vector with values: * -1: ignore * bg_label: background ("negative") class * otherwise: one or more foreground ("positive") classes num_samples (int): The total number of labels with value >= 0 to return. Values that are not sampled will be filled with -1 (ignore). positive_fraction (float): The number of subsampled labels with values > 0 is `min(num_positives, int(positive_fraction * num_samples))`. The number of negatives sampled is `min(num_negatives, num_samples - num_positives_sampled)`. In order words, if there are not enough positives, the sample is filled with negatives. If there are also not enough negatives, then as many elements are sampled as is possible. bg_label (int): label index of background ("negative") class. Returns: pos_idx, neg_idx (Tensor): 1D vector of indices. The total length of both is `num_samples` or fewer.
3,598
import math import fvcore.nn.weight_init as weight_init import torch import torch.nn.functional as F from torch import nn from detectron2.layers import Conv2d, ShapeSpec, get_norm from .backbone import Backbone from .build import BACKBONE_REGISTRY from .resnet import build_resnet_backbone The provided code snippet includes necessary dependencies for implementing the `_assert_strides_are_log2_contiguous` function. Write a Python function `def _assert_strides_are_log2_contiguous(strides)` to solve the following problem: Assert that each stride is 2x times its preceding stride, i.e. "contiguous in log2". Here is the function: def _assert_strides_are_log2_contiguous(strides): """ Assert that each stride is 2x times its preceding stride, i.e. "contiguous in log2". """ for i, stride in enumerate(strides[1:], 1): assert stride == 2 * strides[i - 1], "Strides {} {} are not log2 contiguous".format( stride, strides[i - 1] )
Assert that each stride is 2x times its preceding stride, i.e. "contiguous in log2".
3,599
import math import fvcore.nn.weight_init as weight_init import torch import torch.nn.functional as F from torch import nn from detectron2.layers import Conv2d, ShapeSpec, get_norm from .backbone import Backbone from .build import BACKBONE_REGISTRY from .resnet import build_resnet_backbone class FPN(Backbone): """ This module implements :paper:`FPN`. It creates pyramid features built on top of some input feature maps. """ _fuse_type: torch.jit.Final[str] def __init__( self, bottom_up, in_features, out_channels, norm="", top_block=None, fuse_type="sum" ): """ Args: bottom_up (Backbone): module representing the bottom up subnetwork. Must be a subclass of :class:`Backbone`. The multi-scale feature maps generated by the bottom up network, and listed in `in_features`, are used to generate FPN levels. in_features (list[str]): names of the input feature maps coming from the backbone to which FPN is attached. For example, if the backbone produces ["res2", "res3", "res4"], any *contiguous* sublist of these may be used; order must be from high to low resolution. out_channels (int): number of channels in the output feature maps. norm (str): the normalization to use. top_block (nn.Module or None): if provided, an extra operation will be performed on the output of the last (smallest resolution) FPN output, and the result will extend the result list. The top_block further downsamples the feature map. It must have an attribute "num_levels", meaning the number of extra FPN levels added by this block, and "in_feature", which is a string representing its input feature (e.g., p5). fuse_type (str): types for fusing the top down features and the lateral ones. It can be "sum" (default), which sums up element-wise; or "avg", which takes the element-wise mean of the two. """ super(FPN, self).__init__() assert isinstance(bottom_up, Backbone) assert in_features, in_features # Feature map strides and channels from the bottom up network (e.g. ResNet) input_shapes = bottom_up.output_shape() strides = [input_shapes[f].stride for f in in_features] in_channels_per_feature = [input_shapes[f].channels for f in in_features] _assert_strides_are_log2_contiguous(strides) lateral_convs = [] output_convs = [] use_bias = norm == "" for idx, in_channels in enumerate(in_channels_per_feature): lateral_norm = get_norm(norm, out_channels) output_norm = get_norm(norm, out_channels) lateral_conv = Conv2d( in_channels, out_channels, kernel_size=1, bias=use_bias, norm=lateral_norm ) output_conv = Conv2d( out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=use_bias, norm=output_norm, ) weight_init.c2_xavier_fill(lateral_conv) weight_init.c2_xavier_fill(output_conv) stage = int(math.log2(strides[idx])) self.add_module("fpn_lateral{}".format(stage), lateral_conv) self.add_module("fpn_output{}".format(stage), output_conv) lateral_convs.append(lateral_conv) output_convs.append(output_conv) # Place convs into top-down order (from low to high resolution) # to make the top-down computation in forward clearer. self.lateral_convs = lateral_convs[::-1] self.output_convs = output_convs[::-1] self.top_block = top_block self.in_features = tuple(in_features) self.bottom_up = bottom_up # Return feature names are "p<stage>", like ["p2", "p3", ..., "p6"] self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in strides} # top block output feature maps. if self.top_block is not None: for s in range(stage, stage + self.top_block.num_levels): self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1) self._out_features = list(self._out_feature_strides.keys()) self._out_feature_channels = {k: out_channels for k in self._out_features} self._size_divisibility = strides[-1] assert fuse_type in {"avg", "sum"} self._fuse_type = fuse_type def size_divisibility(self): return self._size_divisibility def forward(self, x): """ Args: input (dict[str->Tensor]): mapping feature map name (e.g., "res5") to feature map tensor for each feature level in high to low resolution order. Returns: dict[str->Tensor]: mapping from feature map name to FPN feature map tensor in high to low resolution order. Returned feature names follow the FPN paper convention: "p<stage>", where stage has stride = 2 ** stage e.g., ["p2", "p3", ..., "p6"]. """ bottom_up_features = self.bottom_up(x) results = [] prev_features = self.lateral_convs[0](bottom_up_features[self.in_features[-1]]) results.append(self.output_convs[0](prev_features)) # Reverse feature maps into top-down order (from low to high resolution) for idx, (lateral_conv, output_conv) in enumerate( zip(self.lateral_convs, self.output_convs) ): # Slicing of ModuleList is not supported https://github.com/pytorch/pytorch/issues/47336 # Therefore we loop over all modules but skip the first one if idx > 0: features = self.in_features[-idx - 1] features = bottom_up_features[features] top_down_features = F.interpolate(prev_features, scale_factor=2.0, mode="nearest") lateral_features = lateral_conv(features) prev_features = lateral_features + top_down_features if self._fuse_type == "avg": prev_features /= 2 results.insert(0, output_conv(prev_features)) if self.top_block is not None: if self.top_block.in_feature in bottom_up_features: top_block_in_feature = bottom_up_features[self.top_block.in_feature] else: top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)] results.extend(self.top_block(top_block_in_feature)) assert len(self._out_features) == len(results) return {f: res for f, res in zip(self._out_features, results)} def output_shape(self): return { name: ShapeSpec( channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] ) for name in self._out_features } class LastLevelMaxPool(nn.Module): """ This module is used in the original FPN to generate a downsampled P6 feature from P5. """ def __init__(self): super().__init__() self.num_levels = 1 self.in_feature = "p5" def forward(self, x): return [F.max_pool2d(x, kernel_size=1, stride=2, padding=0)] def build_resnet_backbone(cfg, input_shape): """ Create a ResNet instance from config. Returns: ResNet: a :class:`ResNet` instance. """ # need registration of new blocks/stems? norm = cfg.MODEL.RESNETS.NORM stem = BasicStem( in_channels=input_shape.channels, out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS, norm=norm, ) # fmt: off freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT out_features = cfg.MODEL.RESNETS.OUT_FEATURES depth = cfg.MODEL.RESNETS.DEPTH num_groups = cfg.MODEL.RESNETS.NUM_GROUPS width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP bottleneck_channels = num_groups * width_per_group in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1 res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS # fmt: on assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation) num_blocks_per_stage = { 18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], }[depth] if depth in [18, 34]: assert out_channels == 64, "Must set MODEL.RESNETS.RES2_OUT_CHANNELS = 64 for R18/R34" assert not any( deform_on_per_stage ), "MODEL.RESNETS.DEFORM_ON_PER_STAGE unsupported for R18/R34" assert res5_dilation == 1, "Must set MODEL.RESNETS.RES5_DILATION = 1 for R18/R34" assert num_groups == 1, "Must set MODEL.RESNETS.NUM_GROUPS = 1 for R18/R34" stages = [] for idx, stage_idx in enumerate(range(2, 6)): # res5_dilation is used this way as a convention in R-FCN & Deformable Conv paper dilation = res5_dilation if stage_idx == 5 else 1 first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2 stage_kargs = { "num_blocks": num_blocks_per_stage[idx], "stride_per_block": [first_stride] + [1] * (num_blocks_per_stage[idx] - 1), "in_channels": in_channels, "out_channels": out_channels, "norm": norm, } # Use BasicBlock for R18 and R34. if depth in [18, 34]: stage_kargs["block_class"] = BasicBlock else: stage_kargs["bottleneck_channels"] = bottleneck_channels stage_kargs["stride_in_1x1"] = stride_in_1x1 stage_kargs["dilation"] = dilation stage_kargs["num_groups"] = num_groups if deform_on_per_stage[idx]: stage_kargs["block_class"] = DeformBottleneckBlock stage_kargs["deform_modulated"] = deform_modulated stage_kargs["deform_num_groups"] = deform_num_groups else: stage_kargs["block_class"] = BottleneckBlock blocks = ResNet.make_stage(**stage_kargs) in_channels = out_channels out_channels *= 2 bottleneck_channels *= 2 stages.append(blocks) return ResNet(stem, stages, out_features=out_features, freeze_at=freeze_at) The provided code snippet includes necessary dependencies for implementing the `build_resnet_fpn_backbone` function. Write a Python function `def build_resnet_fpn_backbone(cfg, input_shape: ShapeSpec)` to solve the following problem: Args: cfg: a detectron2 CfgNode Returns: backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. Here is the function: def build_resnet_fpn_backbone(cfg, input_shape: ShapeSpec): """ Args: cfg: a detectron2 CfgNode Returns: backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. """ bottom_up = build_resnet_backbone(cfg, input_shape) in_features = cfg.MODEL.FPN.IN_FEATURES out_channels = cfg.MODEL.FPN.OUT_CHANNELS backbone = FPN( bottom_up=bottom_up, in_features=in_features, out_channels=out_channels, norm=cfg.MODEL.FPN.NORM, top_block=LastLevelMaxPool(), fuse_type=cfg.MODEL.FPN.FUSE_TYPE, ) return backbone
Args: cfg: a detectron2 CfgNode Returns: backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
3,600
import math import fvcore.nn.weight_init as weight_init import torch import torch.nn.functional as F from torch import nn from detectron2.layers import Conv2d, ShapeSpec, get_norm from .backbone import Backbone from .build import BACKBONE_REGISTRY from .resnet import build_resnet_backbone class FPN(Backbone): """ This module implements :paper:`FPN`. It creates pyramid features built on top of some input feature maps. """ _fuse_type: torch.jit.Final[str] def __init__( self, bottom_up, in_features, out_channels, norm="", top_block=None, fuse_type="sum" ): """ Args: bottom_up (Backbone): module representing the bottom up subnetwork. Must be a subclass of :class:`Backbone`. The multi-scale feature maps generated by the bottom up network, and listed in `in_features`, are used to generate FPN levels. in_features (list[str]): names of the input feature maps coming from the backbone to which FPN is attached. For example, if the backbone produces ["res2", "res3", "res4"], any *contiguous* sublist of these may be used; order must be from high to low resolution. out_channels (int): number of channels in the output feature maps. norm (str): the normalization to use. top_block (nn.Module or None): if provided, an extra operation will be performed on the output of the last (smallest resolution) FPN output, and the result will extend the result list. The top_block further downsamples the feature map. It must have an attribute "num_levels", meaning the number of extra FPN levels added by this block, and "in_feature", which is a string representing its input feature (e.g., p5). fuse_type (str): types for fusing the top down features and the lateral ones. It can be "sum" (default), which sums up element-wise; or "avg", which takes the element-wise mean of the two. """ super(FPN, self).__init__() assert isinstance(bottom_up, Backbone) assert in_features, in_features # Feature map strides and channels from the bottom up network (e.g. ResNet) input_shapes = bottom_up.output_shape() strides = [input_shapes[f].stride for f in in_features] in_channels_per_feature = [input_shapes[f].channels for f in in_features] _assert_strides_are_log2_contiguous(strides) lateral_convs = [] output_convs = [] use_bias = norm == "" for idx, in_channels in enumerate(in_channels_per_feature): lateral_norm = get_norm(norm, out_channels) output_norm = get_norm(norm, out_channels) lateral_conv = Conv2d( in_channels, out_channels, kernel_size=1, bias=use_bias, norm=lateral_norm ) output_conv = Conv2d( out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=use_bias, norm=output_norm, ) weight_init.c2_xavier_fill(lateral_conv) weight_init.c2_xavier_fill(output_conv) stage = int(math.log2(strides[idx])) self.add_module("fpn_lateral{}".format(stage), lateral_conv) self.add_module("fpn_output{}".format(stage), output_conv) lateral_convs.append(lateral_conv) output_convs.append(output_conv) # Place convs into top-down order (from low to high resolution) # to make the top-down computation in forward clearer. self.lateral_convs = lateral_convs[::-1] self.output_convs = output_convs[::-1] self.top_block = top_block self.in_features = tuple(in_features) self.bottom_up = bottom_up # Return feature names are "p<stage>", like ["p2", "p3", ..., "p6"] self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in strides} # top block output feature maps. if self.top_block is not None: for s in range(stage, stage + self.top_block.num_levels): self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1) self._out_features = list(self._out_feature_strides.keys()) self._out_feature_channels = {k: out_channels for k in self._out_features} self._size_divisibility = strides[-1] assert fuse_type in {"avg", "sum"} self._fuse_type = fuse_type def size_divisibility(self): return self._size_divisibility def forward(self, x): """ Args: input (dict[str->Tensor]): mapping feature map name (e.g., "res5") to feature map tensor for each feature level in high to low resolution order. Returns: dict[str->Tensor]: mapping from feature map name to FPN feature map tensor in high to low resolution order. Returned feature names follow the FPN paper convention: "p<stage>", where stage has stride = 2 ** stage e.g., ["p2", "p3", ..., "p6"]. """ bottom_up_features = self.bottom_up(x) results = [] prev_features = self.lateral_convs[0](bottom_up_features[self.in_features[-1]]) results.append(self.output_convs[0](prev_features)) # Reverse feature maps into top-down order (from low to high resolution) for idx, (lateral_conv, output_conv) in enumerate( zip(self.lateral_convs, self.output_convs) ): # Slicing of ModuleList is not supported https://github.com/pytorch/pytorch/issues/47336 # Therefore we loop over all modules but skip the first one if idx > 0: features = self.in_features[-idx - 1] features = bottom_up_features[features] top_down_features = F.interpolate(prev_features, scale_factor=2.0, mode="nearest") lateral_features = lateral_conv(features) prev_features = lateral_features + top_down_features if self._fuse_type == "avg": prev_features /= 2 results.insert(0, output_conv(prev_features)) if self.top_block is not None: if self.top_block.in_feature in bottom_up_features: top_block_in_feature = bottom_up_features[self.top_block.in_feature] else: top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)] results.extend(self.top_block(top_block_in_feature)) assert len(self._out_features) == len(results) return {f: res for f, res in zip(self._out_features, results)} def output_shape(self): return { name: ShapeSpec( channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] ) for name in self._out_features } class LastLevelP6P7(nn.Module): """ This module is used in RetinaNet to generate extra layers, P6 and P7 from C5 feature. """ def __init__(self, in_channels, out_channels, in_feature="res5"): super().__init__() self.num_levels = 2 self.in_feature = in_feature self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) for module in [self.p6, self.p7]: weight_init.c2_xavier_fill(module) def forward(self, c5): p6 = self.p6(c5) p7 = self.p7(F.relu(p6)) return [p6, p7] def build_resnet_backbone(cfg, input_shape): """ Create a ResNet instance from config. Returns: ResNet: a :class:`ResNet` instance. """ # need registration of new blocks/stems? norm = cfg.MODEL.RESNETS.NORM stem = BasicStem( in_channels=input_shape.channels, out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS, norm=norm, ) # fmt: off freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT out_features = cfg.MODEL.RESNETS.OUT_FEATURES depth = cfg.MODEL.RESNETS.DEPTH num_groups = cfg.MODEL.RESNETS.NUM_GROUPS width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP bottleneck_channels = num_groups * width_per_group in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1 res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS # fmt: on assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation) num_blocks_per_stage = { 18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], }[depth] if depth in [18, 34]: assert out_channels == 64, "Must set MODEL.RESNETS.RES2_OUT_CHANNELS = 64 for R18/R34" assert not any( deform_on_per_stage ), "MODEL.RESNETS.DEFORM_ON_PER_STAGE unsupported for R18/R34" assert res5_dilation == 1, "Must set MODEL.RESNETS.RES5_DILATION = 1 for R18/R34" assert num_groups == 1, "Must set MODEL.RESNETS.NUM_GROUPS = 1 for R18/R34" stages = [] for idx, stage_idx in enumerate(range(2, 6)): # res5_dilation is used this way as a convention in R-FCN & Deformable Conv paper dilation = res5_dilation if stage_idx == 5 else 1 first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2 stage_kargs = { "num_blocks": num_blocks_per_stage[idx], "stride_per_block": [first_stride] + [1] * (num_blocks_per_stage[idx] - 1), "in_channels": in_channels, "out_channels": out_channels, "norm": norm, } # Use BasicBlock for R18 and R34. if depth in [18, 34]: stage_kargs["block_class"] = BasicBlock else: stage_kargs["bottleneck_channels"] = bottleneck_channels stage_kargs["stride_in_1x1"] = stride_in_1x1 stage_kargs["dilation"] = dilation stage_kargs["num_groups"] = num_groups if deform_on_per_stage[idx]: stage_kargs["block_class"] = DeformBottleneckBlock stage_kargs["deform_modulated"] = deform_modulated stage_kargs["deform_num_groups"] = deform_num_groups else: stage_kargs["block_class"] = BottleneckBlock blocks = ResNet.make_stage(**stage_kargs) in_channels = out_channels out_channels *= 2 bottleneck_channels *= 2 stages.append(blocks) return ResNet(stem, stages, out_features=out_features, freeze_at=freeze_at) The provided code snippet includes necessary dependencies for implementing the `build_retinanet_resnet_fpn_backbone` function. Write a Python function `def build_retinanet_resnet_fpn_backbone(cfg, input_shape: ShapeSpec)` to solve the following problem: Args: cfg: a detectron2 CfgNode Returns: backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. Here is the function: def build_retinanet_resnet_fpn_backbone(cfg, input_shape: ShapeSpec): """ Args: cfg: a detectron2 CfgNode Returns: backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. """ bottom_up = build_resnet_backbone(cfg, input_shape) in_features = cfg.MODEL.FPN.IN_FEATURES out_channels = cfg.MODEL.FPN.OUT_CHANNELS in_channels_p6p7 = bottom_up.output_shape()["res5"].channels backbone = FPN( bottom_up=bottom_up, in_features=in_features, out_channels=out_channels, norm=cfg.MODEL.FPN.NORM, top_block=LastLevelP6P7(in_channels_p6p7, out_channels), fuse_type=cfg.MODEL.FPN.FUSE_TYPE, ) return backbone
Args: cfg: a detectron2 CfgNode Returns: backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
3,601
import numpy as np from torch import nn from detectron2.layers import CNNBlockBase, ShapeSpec, get_norm from .backbone import Backbone The provided code snippet includes necessary dependencies for implementing the `conv2d` function. Write a Python function `def conv2d(w_in, w_out, k, *, stride=1, groups=1, bias=False)` to solve the following problem: Helper for building a conv2d layer. Here is the function: def conv2d(w_in, w_out, k, *, stride=1, groups=1, bias=False): """Helper for building a conv2d layer.""" assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues." s, p, g, b = stride, (k - 1) // 2, groups, bias return nn.Conv2d(w_in, w_out, k, stride=s, padding=p, groups=g, bias=b)
Helper for building a conv2d layer.
3,602
import numpy as np from torch import nn from detectron2.layers import CNNBlockBase, ShapeSpec, get_norm from .backbone import Backbone The provided code snippet includes necessary dependencies for implementing the `gap2d` function. Write a Python function `def gap2d()` to solve the following problem: Helper for building a global average pooling layer. Here is the function: def gap2d(): """Helper for building a global average pooling layer.""" return nn.AdaptiveAvgPool2d((1, 1))
Helper for building a global average pooling layer.
3,603
import numpy as np from torch import nn from detectron2.layers import CNNBlockBase, ShapeSpec, get_norm from .backbone import Backbone The provided code snippet includes necessary dependencies for implementing the `pool2d` function. Write a Python function `def pool2d(k, *, stride=1)` to solve the following problem: Helper for building a pool2d layer. Here is the function: def pool2d(k, *, stride=1): """Helper for building a pool2d layer.""" assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues." return nn.MaxPool2d(k, stride=stride, padding=(k - 1) // 2)
Helper for building a pool2d layer.
3,604
import numpy as np from torch import nn from detectron2.layers import CNNBlockBase, ShapeSpec, get_norm from .backbone import Backbone The provided code snippet includes necessary dependencies for implementing the `init_weights` function. Write a Python function `def init_weights(m)` to solve the following problem: Performs ResNet-style weight initialization. Here is the function: def init_weights(m): """Performs ResNet-style weight initialization.""" if isinstance(m, nn.Conv2d): # Note that there is no bias due to BN fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(mean=0.0, std=np.sqrt(2.0 / fan_out)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1.0) m.bias.data.zero_() elif isinstance(m, nn.Linear): m.weight.data.normal_(mean=0.0, std=0.01) m.bias.data.zero_()
Performs ResNet-style weight initialization.
3,605
import numpy as np from torch import nn from detectron2.layers import CNNBlockBase, ShapeSpec, get_norm from .backbone import Backbone The provided code snippet includes necessary dependencies for implementing the `adjust_block_compatibility` function. Write a Python function `def adjust_block_compatibility(ws, bs, gs)` to solve the following problem: Adjusts the compatibility of widths, bottlenecks, and groups. Here is the function: def adjust_block_compatibility(ws, bs, gs): """Adjusts the compatibility of widths, bottlenecks, and groups.""" assert len(ws) == len(bs) == len(gs) assert all(w > 0 and b > 0 and g > 0 for w, b, g in zip(ws, bs, gs)) vs = [int(max(1, w * b)) for w, b in zip(ws, bs)] gs = [int(min(g, v)) for g, v in zip(gs, vs)] ms = [np.lcm(g, b) if b > 1 else g for g, b in zip(gs, bs)] vs = [max(m, int(round(v / m) * m)) for v, m in zip(vs, ms)] ws = [int(v / b) for v, b in zip(vs, bs)] assert all(w * b % g == 0 for w, b, g in zip(ws, bs, gs)) return ws, bs, gs
Adjusts the compatibility of widths, bottlenecks, and groups.
3,606
import numpy as np from torch import nn from detectron2.layers import CNNBlockBase, ShapeSpec, get_norm from .backbone import Backbone The provided code snippet includes necessary dependencies for implementing the `generate_regnet_parameters` function. Write a Python function `def generate_regnet_parameters(w_a, w_0, w_m, d, q=8)` to solve the following problem: Generates per stage widths and depths from RegNet parameters. Here is the function: def generate_regnet_parameters(w_a, w_0, w_m, d, q=8): """Generates per stage widths and depths from RegNet parameters.""" assert w_a >= 0 and w_0 > 0 and w_m > 1 and w_0 % q == 0 # Generate continuous per-block ws ws_cont = np.arange(d) * w_a + w_0 # Generate quantized per-block ws ks = np.round(np.log(ws_cont / w_0) / np.log(w_m)) ws_all = w_0 * np.power(w_m, ks) ws_all = np.round(np.divide(ws_all, q)).astype(int) * q # Generate per stage ws and ds (assumes ws_all are sorted) ws, ds = np.unique(ws_all, return_counts=True) # Compute number of actual stages and total possible stages num_stages, total_stages = len(ws), ks.max() + 1 # Convert numpy arrays to lists and return ws, ds, ws_all, ws_cont = (x.tolist() for x in (ws, ds, ws_all, ws_cont)) return ws, ds, num_stages, total_stages, ws_all, ws_cont
Generates per stage widths and depths from RegNet parameters.
3,607
from detectron2.layers import ShapeSpec from detectron2.utils.registry import Registry from .backbone import Backbone BACKBONE_REGISTRY = Registry("BACKBONE") BACKBONE_REGISTRY.__doc__ = """ Registry for backbones, which extract feature maps from images The registered object must be a callable that accepts two arguments: 1. A :class:`detectron2.config.CfgNode` 2. A :class:`detectron2.layers.ShapeSpec`, which contains the input shape specification. Registered object must return instance of :class:`Backbone`. """ class Backbone(nn.Module, metaclass=ABCMeta): """ Abstract base class for network backbones. """ def __init__(self): """ The `__init__` method of any subclass can specify its own set of arguments. """ super().__init__() def forward(self): """ Subclasses must override this method, but adhere to the same return type. Returns: dict[str->Tensor]: mapping from feature name (e.g., "res2") to tensor """ pass def size_divisibility(self) -> int: """ Some backbones require the input height and width to be divisible by a specific integer. This is typically true for encoder / decoder type networks with lateral connection (e.g., FPN) for which feature maps need to match dimension in the "bottom up" and "top down" paths. Set to 0 if no specific input size divisibility is required. """ return 0 def output_shape(self): """ Returns: dict[str->ShapeSpec] """ # this is a backward-compatible default return { name: ShapeSpec( channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] ) for name in self._out_features } The provided code snippet includes necessary dependencies for implementing the `build_backbone` function. Write a Python function `def build_backbone(cfg, input_shape=None)` to solve the following problem: Build a backbone from `cfg.MODEL.BACKBONE.NAME`. Returns: an instance of :class:`Backbone` Here is the function: def build_backbone(cfg, input_shape=None): """ Build a backbone from `cfg.MODEL.BACKBONE.NAME`. Returns: an instance of :class:`Backbone` """ if input_shape is None: input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN)) backbone_name = cfg.MODEL.BACKBONE.NAME backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape) assert isinstance(backbone, Backbone) return backbone
Build a backbone from `cfg.MODEL.BACKBONE.NAME`. Returns: an instance of :class:`Backbone`
3,608
import math from typing import List import torch from torch import nn from torchvision.ops import RoIPool from detectron2.layers import ROIAlign, ROIAlignRotated, cat, nonzero_tuple, shapes_to_tensor from detectron2.structures import Boxes The provided code snippet includes necessary dependencies for implementing the `assign_boxes_to_levels` function. Write a Python function `def assign_boxes_to_levels( box_lists: List[Boxes], min_level: int, max_level: int, canonical_box_size: int, canonical_level: int, )` to solve the following problem: Map each box in `box_lists` to a feature map level index and return the assignment vector. Args: box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. min_level (int): Smallest feature map level index. The input is considered index 0, the output of stage 1 is index 1, and so. max_level (int): Largest feature map level index. canonical_box_size (int): A canonical box size in pixels (sqrt(box area)). canonical_level (int): The feature map level index on which a canonically-sized box should be placed. Returns: A tensor of length M, where M is the total number of boxes aggregated over all N batch images. The memory layout corresponds to the concatenation of boxes from all images. Each element is the feature map index, as an offset from `self.min_level`, for the corresponding box (so value i means the box is at `self.min_level + i`). Here is the function: def assign_boxes_to_levels( box_lists: List[Boxes], min_level: int, max_level: int, canonical_box_size: int, canonical_level: int, ): """ Map each box in `box_lists` to a feature map level index and return the assignment vector. Args: box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. min_level (int): Smallest feature map level index. The input is considered index 0, the output of stage 1 is index 1, and so. max_level (int): Largest feature map level index. canonical_box_size (int): A canonical box size in pixels (sqrt(box area)). canonical_level (int): The feature map level index on which a canonically-sized box should be placed. Returns: A tensor of length M, where M is the total number of boxes aggregated over all N batch images. The memory layout corresponds to the concatenation of boxes from all images. Each element is the feature map index, as an offset from `self.min_level`, for the corresponding box (so value i means the box is at `self.min_level + i`). """ box_sizes = torch.sqrt(cat([boxes.area() for boxes in box_lists])) # Eqn.(1) in FPN paper level_assignments = torch.floor( canonical_level + torch.log2(box_sizes / canonical_box_size + 1e-8) ) # clamp level to (min, max), in case the box size is too large or too small # for the available feature maps level_assignments = torch.clamp(level_assignments, min=min_level, max=max_level) return level_assignments.to(torch.int64) - min_level
Map each box in `box_lists` to a feature map level index and return the assignment vector. Args: box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. min_level (int): Smallest feature map level index. The input is considered index 0, the output of stage 1 is index 1, and so. max_level (int): Largest feature map level index. canonical_box_size (int): A canonical box size in pixels (sqrt(box area)). canonical_level (int): The feature map level index on which a canonically-sized box should be placed. Returns: A tensor of length M, where M is the total number of boxes aggregated over all N batch images. The memory layout corresponds to the concatenation of boxes from all images. Each element is the feature map index, as an offset from `self.min_level`, for the corresponding box (so value i means the box is at `self.min_level + i`).
3,609
import math from typing import List import torch from torch import nn from torchvision.ops import RoIPool from detectron2.layers import ROIAlign, ROIAlignRotated, cat, nonzero_tuple, shapes_to_tensor from detectron2.structures import Boxes The provided code snippet includes necessary dependencies for implementing the `convert_boxes_to_pooler_format` function. Write a Python function `def convert_boxes_to_pooler_format(box_lists: List[Boxes])` to solve the following problem: Convert all boxes in `box_lists` to the low-level format used by ROI pooling ops (see description under Returns). Args: box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. Returns: When input is list[Boxes]: A tensor of shape (M, 5), where M is the total number of boxes aggregated over all N batch images. The 5 columns are (batch index, x0, y0, x1, y1), where batch index is the index in [0, N) identifying which batch image the box with corners at (x0, y0, x1, y1) comes from. When input is list[RotatedBoxes]: A tensor of shape (M, 6), where M is the total number of boxes aggregated over all N batch images. The 6 columns are (batch index, x_ctr, y_ctr, width, height, angle_degrees), where batch index is the index in [0, N) identifying which batch image the rotated box (x_ctr, y_ctr, width, height, angle_degrees) comes from. Here is the function: def convert_boxes_to_pooler_format(box_lists: List[Boxes]): """ Convert all boxes in `box_lists` to the low-level format used by ROI pooling ops (see description under Returns). Args: box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. Returns: When input is list[Boxes]: A tensor of shape (M, 5), where M is the total number of boxes aggregated over all N batch images. The 5 columns are (batch index, x0, y0, x1, y1), where batch index is the index in [0, N) identifying which batch image the box with corners at (x0, y0, x1, y1) comes from. When input is list[RotatedBoxes]: A tensor of shape (M, 6), where M is the total number of boxes aggregated over all N batch images. The 6 columns are (batch index, x_ctr, y_ctr, width, height, angle_degrees), where batch index is the index in [0, N) identifying which batch image the rotated box (x_ctr, y_ctr, width, height, angle_degrees) comes from. """ boxes = torch.cat([x.tensor for x in box_lists], dim=0) # __len__ returns Tensor in tracing. sizes = shapes_to_tensor([x.__len__() for x in box_lists], device=boxes.device) indices = torch.repeat_interleave( torch.arange(len(box_lists), dtype=boxes.dtype, device=boxes.device), sizes ) return cat([indices[:, None], boxes], dim=1)
Convert all boxes in `box_lists` to the low-level format used by ROI pooling ops (see description under Returns). Args: box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. Returns: When input is list[Boxes]: A tensor of shape (M, 5), where M is the total number of boxes aggregated over all N batch images. The 5 columns are (batch index, x0, y0, x1, y1), where batch index is the index in [0, N) identifying which batch image the box with corners at (x0, y0, x1, y1) comes from. When input is list[RotatedBoxes]: A tensor of shape (M, 6), where M is the total number of boxes aggregated over all N batch images. The 6 columns are (batch index, x_ctr, y_ctr, width, height, angle_degrees), where batch index is the index in [0, N) identifying which batch image the rotated box (x_ctr, y_ctr, width, height, angle_degrees) comes from.
3,610
import itertools import logging import numpy as np from collections import OrderedDict from collections.abc import Mapping from typing import Dict, List, Optional, Tuple, Union import torch from omegaconf import DictConfig, OmegaConf from torch import Tensor, nn from detectron2.layers import ShapeSpec from detectron2.structures import BitMasks, Boxes, ImageList, Instances from detectron2.utils.events import get_event_storage from .backbone import Backbone The provided code snippet includes necessary dependencies for implementing the `_to_container` function. Write a Python function `def _to_container(cfg)` to solve the following problem: mmdet will assert the type of dict/list. So convert omegaconf objects to dict/list. Here is the function: def _to_container(cfg): """ mmdet will assert the type of dict/list. So convert omegaconf objects to dict/list. """ if isinstance(cfg, DictConfig): cfg = OmegaConf.to_container(cfg, resolve=True) from mmcv.utils import ConfigDict return ConfigDict(cfg)
mmdet will assert the type of dict/list. So convert omegaconf objects to dict/list.
3,611
import itertools import logging import numpy as np from collections import OrderedDict from collections.abc import Mapping from typing import Dict, List, Optional, Tuple, Union import torch from omegaconf import DictConfig, OmegaConf from torch import Tensor, nn from detectron2.layers import ShapeSpec from detectron2.structures import BitMasks, Boxes, ImageList, Instances from detectron2.utils.events import get_event_storage from .backbone import Backbone def _convert_mmdet_result(result, shape: Tuple[int, int]) -> Instances: if isinstance(result, tuple): bbox_result, segm_result = result if isinstance(segm_result, tuple): segm_result = segm_result[0] else: bbox_result, segm_result = result, None bboxes = torch.from_numpy(np.vstack(bbox_result)) # Nx5 bboxes, scores = bboxes[:, :4], bboxes[:, -1] labels = [ torch.full((bbox.shape[0],), i, dtype=torch.int32) for i, bbox in enumerate(bbox_result) ] labels = torch.cat(labels) inst = Instances(shape) inst.pred_boxes = Boxes(bboxes) inst.scores = scores inst.pred_classes = labels if segm_result is not None and len(labels) > 0: segm_result = list(itertools.chain(*segm_result)) segm_result = [torch.from_numpy(x) if isinstance(x, np.ndarray) else x for x in segm_result] segm_result = torch.stack(segm_result, dim=0) inst.pred_masks = segm_result return inst
null
3,612
import itertools import logging import numpy as np from collections import OrderedDict from collections.abc import Mapping from typing import Dict, List, Optional, Tuple, Union import torch from omegaconf import DictConfig, OmegaConf from torch import Tensor, nn from detectron2.layers import ShapeSpec from detectron2.structures import BitMasks, Boxes, ImageList, Instances from detectron2.utils.events import get_event_storage from .backbone import Backbone def get_event_storage(): """ Returns: The :class:`EventStorage` object that's currently being used. Throws an error if no :class:`EventStorage` is currently enabled. """ assert len( _CURRENT_STORAGE_STACK ), "get_event_storage() has to be called inside a 'with EventStorage(...)' context!" return _CURRENT_STORAGE_STACK[-1] def _parse_losses(losses: Dict[str, Tensor]) -> Dict[str, Tensor]: log_vars = OrderedDict() for loss_name, loss_value in losses.items(): if isinstance(loss_value, torch.Tensor): log_vars[loss_name] = loss_value.mean() elif isinstance(loss_value, list): log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) else: raise TypeError(f"{loss_name} is not a tensor or list of tensors") if "loss" not in loss_name: # put metrics to storage; don't return them storage = get_event_storage() value = log_vars.pop(loss_name).cpu().item() storage.put_scalar(loss_name, value) return log_vars
null
3,613
import logging from typing import Dict, List import torch from torch import nn from detectron2.config import configurable from detectron2.structures import ImageList from ..postprocessing import detector_postprocess, sem_seg_postprocess from .build import META_ARCH_REGISTRY from .rcnn import GeneralizedRCNN from .semantic_seg import build_sem_seg_head The provided code snippet includes necessary dependencies for implementing the `combine_semantic_and_instance_outputs` function. Write a Python function `def combine_semantic_and_instance_outputs( instance_results, semantic_results, overlap_threshold, stuff_area_thresh, instances_score_thresh, )` to solve the following problem: Implement a simple combining logic following "combine_semantic_and_instance_predictions.py" in panopticapi to produce panoptic segmentation outputs. Args: instance_results: output of :func:`detector_postprocess`. semantic_results: an (H, W) tensor, each element is the contiguous semantic category id Returns: panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. segments_info (list[dict]): Describe each segment in `panoptic_seg`. Each dict contains keys "id", "category_id", "isthing". Here is the function: def combine_semantic_and_instance_outputs( instance_results, semantic_results, overlap_threshold, stuff_area_thresh, instances_score_thresh, ): """ Implement a simple combining logic following "combine_semantic_and_instance_predictions.py" in panopticapi to produce panoptic segmentation outputs. Args: instance_results: output of :func:`detector_postprocess`. semantic_results: an (H, W) tensor, each element is the contiguous semantic category id Returns: panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. segments_info (list[dict]): Describe each segment in `panoptic_seg`. Each dict contains keys "id", "category_id", "isthing". """ panoptic_seg = torch.zeros_like(semantic_results, dtype=torch.int32) # sort instance outputs by scores sorted_inds = torch.argsort(-instance_results.scores) current_segment_id = 0 segments_info = [] instance_masks = instance_results.pred_masks.to(dtype=torch.bool, device=panoptic_seg.device) # Add instances one-by-one, check for overlaps with existing ones for inst_id in sorted_inds: score = instance_results.scores[inst_id].item() if score < instances_score_thresh: break mask = instance_masks[inst_id] # H,W mask_area = mask.sum().item() if mask_area == 0: continue intersect = (mask > 0) & (panoptic_seg > 0) intersect_area = intersect.sum().item() if intersect_area * 1.0 / mask_area > overlap_threshold: continue if intersect_area > 0: mask = mask & (panoptic_seg == 0) current_segment_id += 1 panoptic_seg[mask] = current_segment_id segments_info.append( { "id": current_segment_id, "isthing": True, "score": score, "category_id": instance_results.pred_classes[inst_id].item(), "instance_id": inst_id.item(), } ) # Add semantic results to remaining empty areas semantic_labels = torch.unique(semantic_results).cpu().tolist() for semantic_label in semantic_labels: if semantic_label == 0: # 0 is a special "thing" class continue mask = (semantic_results == semantic_label) & (panoptic_seg == 0) mask_area = mask.sum().item() if mask_area < stuff_area_thresh: continue current_segment_id += 1 panoptic_seg[mask] = current_segment_id segments_info.append( { "id": current_segment_id, "isthing": False, "category_id": semantic_label, "area": mask_area, } ) return panoptic_seg, segments_info
Implement a simple combining logic following "combine_semantic_and_instance_predictions.py" in panopticapi to produce panoptic segmentation outputs. Args: instance_results: output of :func:`detector_postprocess`. semantic_results: an (H, W) tensor, each element is the contiguous semantic category id Returns: panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. segments_info (list[dict]): Describe each segment in `panoptic_seg`. Each dict contains keys "id", "category_id", "isthing".
3,614
import numpy as np from typing import Dict, List, Optional, Tuple import torch from torch import Tensor, nn from detectron2.data.detection_utils import convert_image_to_rgb from detectron2.modeling import Backbone from detectron2.structures import Boxes, ImageList, Instances from detectron2.utils.events import get_event_storage from ..postprocessing import detector_postprocess The provided code snippet includes necessary dependencies for implementing the `permute_to_N_HWA_K` function. Write a Python function `def permute_to_N_HWA_K(tensor, K: int)` to solve the following problem: Transpose/reshape a tensor from (N, (Ai x K), H, W) to (N, (HxWxAi), K) Here is the function: def permute_to_N_HWA_K(tensor, K: int): """ Transpose/reshape a tensor from (N, (Ai x K), H, W) to (N, (HxWxAi), K) """ assert tensor.dim() == 4, tensor.shape N, _, H, W = tensor.shape tensor = tensor.view(N, -1, K, H, W) tensor = tensor.permute(0, 3, 4, 1, 2) tensor = tensor.reshape(N, -1, K) # Size=(N,HWA,K) return tensor
Transpose/reshape a tensor from (N, (Ai x K), H, W) to (N, (HxWxAi), K)
3,615
import numpy as np from typing import Callable, Dict, Optional, Tuple, Union import fvcore.nn.weight_init as weight_init import torch from torch import nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.layers import Conv2d, ShapeSpec, get_norm from detectron2.structures import ImageList from detectron2.utils.registry import Registry from ..backbone import Backbone, build_backbone from ..postprocessing import sem_seg_postprocess from .build import META_ARCH_REGISTRY SEM_SEG_HEADS_REGISTRY = Registry("SEM_SEG_HEADS") SEM_SEG_HEADS_REGISTRY.__doc__ = """ Registry for semantic segmentation heads, which make semantic segmentation predictions from feature maps. """ The provided code snippet includes necessary dependencies for implementing the `build_sem_seg_head` function. Write a Python function `def build_sem_seg_head(cfg, input_shape)` to solve the following problem: Build a semantic segmentation head from `cfg.MODEL.SEM_SEG_HEAD.NAME`. Here is the function: def build_sem_seg_head(cfg, input_shape): """ Build a semantic segmentation head from `cfg.MODEL.SEM_SEG_HEAD.NAME`. """ name = cfg.MODEL.SEM_SEG_HEAD.NAME return SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape)
Build a semantic segmentation head from `cfg.MODEL.SEM_SEG_HEAD.NAME`.
3,616
import torch from detectron2.utils.logger import _log_api_usage from detectron2.utils.registry import Registry META_ARCH_REGISTRY = Registry("META_ARCH") META_ARCH_REGISTRY.__doc__ = """ Registry for meta-architectures, i.e. the whole model. The registered object will be called with `obj(cfg)` and expected to return a `nn.Module` object. """ def _log_api_usage(identifier: str): """ Internal function used to log the usage of different detectron2 components inside facebook's infra. """ torch._C._log_api_usage_once("detectron2." + identifier) The provided code snippet includes necessary dependencies for implementing the `build_model` function. Write a Python function `def build_model(cfg)` to solve the following problem: Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``. Note that it does not load any weights from ``cfg``. Here is the function: def build_model(cfg): """ Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``. Note that it does not load any weights from ``cfg``. """ meta_arch = cfg.MODEL.META_ARCHITECTURE model = META_ARCH_REGISTRY.get(meta_arch)(cfg) model.to(torch.device(cfg.MODEL.DEVICE)) _log_api_usage("modeling.meta_arch." + meta_arch) return model
Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``. Note that it does not load any weights from ``cfg``.
3,617
import collections import math from typing import List import torch from torch import nn from detectron2.config import configurable from detectron2.layers import ShapeSpec from detectron2.structures import Boxes, RotatedBoxes from detectron2.utils.registry import Registry def _create_grid_offsets(size: List[int], stride: int, offset: float, device: torch.device): grid_height, grid_width = size shifts_x = torch.arange( offset * stride, grid_width * stride, step=stride, dtype=torch.float32, device=device ) shifts_y = torch.arange( offset * stride, grid_height * stride, step=stride, dtype=torch.float32, device=device ) shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) shift_x = shift_x.reshape(-1) shift_y = shift_y.reshape(-1) return shift_x, shift_y
null
3,618
import collections import math from typing import List import torch from torch import nn from detectron2.config import configurable from detectron2.layers import ShapeSpec from detectron2.structures import Boxes, RotatedBoxes from detectron2.utils.registry import Registry The provided code snippet includes necessary dependencies for implementing the `_broadcast_params` function. Write a Python function `def _broadcast_params(params, num_features, name)` to solve the following problem: If one size (or aspect ratio) is specified and there are multiple feature maps, we "broadcast" anchors of that single size (or aspect ratio) over all feature maps. If params is list[float], or list[list[float]] with len(params) == 1, repeat it num_features time. Returns: list[list[float]]: param for each feature Here is the function: def _broadcast_params(params, num_features, name): """ If one size (or aspect ratio) is specified and there are multiple feature maps, we "broadcast" anchors of that single size (or aspect ratio) over all feature maps. If params is list[float], or list[list[float]] with len(params) == 1, repeat it num_features time. Returns: list[list[float]]: param for each feature """ assert isinstance( params, collections.abc.Sequence ), f"{name} in anchor generator has to be a list! Got {params}." assert len(params), f"{name} in anchor generator cannot be empty!" if not isinstance(params[0], collections.abc.Sequence): # params is list[float] return [params] * num_features if len(params) == 1: return list(params) * num_features assert len(params) == num_features, ( f"Got {name} of length {len(params)} in anchor generator, " f"but the number of input features is {num_features}!" ) return params
If one size (or aspect ratio) is specified and there are multiple feature maps, we "broadcast" anchors of that single size (or aspect ratio) over all feature maps. If params is list[float], or list[list[float]] with len(params) == 1, repeat it num_features time. Returns: list[list[float]]: param for each feature
3,619
import collections import math from typing import List import torch from torch import nn from detectron2.config import configurable from detectron2.layers import ShapeSpec from detectron2.structures import Boxes, RotatedBoxes from detectron2.utils.registry import Registry ANCHOR_GENERATOR_REGISTRY = Registry("ANCHOR_GENERATOR") ANCHOR_GENERATOR_REGISTRY.__doc__ = """ Registry for modules that creates object detection anchors for feature maps. The registered object will be called with `obj(cfg, input_shape)`. """ The provided code snippet includes necessary dependencies for implementing the `build_anchor_generator` function. Write a Python function `def build_anchor_generator(cfg, input_shape)` to solve the following problem: Built an anchor generator from `cfg.MODEL.ANCHOR_GENERATOR.NAME`. Here is the function: def build_anchor_generator(cfg, input_shape): """ Built an anchor generator from `cfg.MODEL.ANCHOR_GENERATOR.NAME`. """ anchor_generator = cfg.MODEL.ANCHOR_GENERATOR.NAME return ANCHOR_GENERATOR_REGISTRY.get(anchor_generator)(cfg, input_shape)
Built an anchor generator from `cfg.MODEL.ANCHOR_GENERATOR.NAME`.
3,620
from typing import List import torch from torch import nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.layers import Conv2d, ConvTranspose2d, cat, interpolate from detectron2.structures import Instances, heatmaps_to_keypoints from detectron2.utils.events import get_event_storage from detectron2.utils.registry import Registry ROI_KEYPOINT_HEAD_REGISTRY = Registry("ROI_KEYPOINT_HEAD") ROI_KEYPOINT_HEAD_REGISTRY.__doc__ = """ Registry for keypoint heads, which make keypoint predictions from per-region features. The registered object will be called with `obj(cfg, input_shape)`. """ The provided code snippet includes necessary dependencies for implementing the `build_keypoint_head` function. Write a Python function `def build_keypoint_head(cfg, input_shape)` to solve the following problem: Build a keypoint head from `cfg.MODEL.ROI_KEYPOINT_HEAD.NAME`. Here is the function: def build_keypoint_head(cfg, input_shape): """ Build a keypoint head from `cfg.MODEL.ROI_KEYPOINT_HEAD.NAME`. """ name = cfg.MODEL.ROI_KEYPOINT_HEAD.NAME return ROI_KEYPOINT_HEAD_REGISTRY.get(name)(cfg, input_shape)
Build a keypoint head from `cfg.MODEL.ROI_KEYPOINT_HEAD.NAME`.
3,621
from typing import List import torch from torch import nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.layers import Conv2d, ConvTranspose2d, cat, interpolate from detectron2.structures import Instances, heatmaps_to_keypoints from detectron2.utils.events import get_event_storage from detectron2.utils.registry import Registry _TOTAL_SKIPPED = 0 def get_event_storage(): """ Returns: The :class:`EventStorage` object that's currently being used. Throws an error if no :class:`EventStorage` is currently enabled. """ assert len( _CURRENT_STORAGE_STACK ), "get_event_storage() has to be called inside a 'with EventStorage(...)' context!" return _CURRENT_STORAGE_STACK[-1] The provided code snippet includes necessary dependencies for implementing the `keypoint_rcnn_loss` function. Write a Python function `def keypoint_rcnn_loss(pred_keypoint_logits, instances, normalizer)` to solve the following problem: Arguments: pred_keypoint_logits (Tensor): A tensor of shape (N, K, S, S) where N is the total number of instances in the batch, K is the number of keypoints, and S is the side length of the keypoint heatmap. The values are spatial logits. instances (list[Instances]): A list of M Instances, where M is the batch size. These instances are predictions from the model that are in 1:1 correspondence with pred_keypoint_logits. Each Instances should contain a `gt_keypoints` field containing a `structures.Keypoint` instance. normalizer (float): Normalize the loss by this amount. If not specified, we normalize by the number of visible keypoints in the minibatch. Returns a scalar tensor containing the loss. Here is the function: def keypoint_rcnn_loss(pred_keypoint_logits, instances, normalizer): """ Arguments: pred_keypoint_logits (Tensor): A tensor of shape (N, K, S, S) where N is the total number of instances in the batch, K is the number of keypoints, and S is the side length of the keypoint heatmap. The values are spatial logits. instances (list[Instances]): A list of M Instances, where M is the batch size. These instances are predictions from the model that are in 1:1 correspondence with pred_keypoint_logits. Each Instances should contain a `gt_keypoints` field containing a `structures.Keypoint` instance. normalizer (float): Normalize the loss by this amount. If not specified, we normalize by the number of visible keypoints in the minibatch. Returns a scalar tensor containing the loss. """ heatmaps = [] valid = [] keypoint_side_len = pred_keypoint_logits.shape[2] for instances_per_image in instances: if len(instances_per_image) == 0: continue keypoints = instances_per_image.gt_keypoints heatmaps_per_image, valid_per_image = keypoints.to_heatmap( instances_per_image.proposal_boxes.tensor, keypoint_side_len ) heatmaps.append(heatmaps_per_image.view(-1)) valid.append(valid_per_image.view(-1)) if len(heatmaps): keypoint_targets = cat(heatmaps, dim=0) valid = cat(valid, dim=0).to(dtype=torch.uint8) valid = torch.nonzero(valid).squeeze(1) # torch.mean (in binary_cross_entropy_with_logits) doesn't # accept empty tensors, so handle it separately if len(heatmaps) == 0 or valid.numel() == 0: global _TOTAL_SKIPPED _TOTAL_SKIPPED += 1 storage = get_event_storage() storage.put_scalar("kpts_num_skipped_batches", _TOTAL_SKIPPED, smoothing_hint=False) return pred_keypoint_logits.sum() * 0 N, K, H, W = pred_keypoint_logits.shape pred_keypoint_logits = pred_keypoint_logits.view(N * K, H * W) keypoint_loss = F.cross_entropy( pred_keypoint_logits[valid], keypoint_targets[valid], reduction="sum" ) # If a normalizer isn't specified, normalize by the number of visible keypoints in the minibatch if normalizer is None: normalizer = valid.numel() keypoint_loss /= normalizer return keypoint_loss
Arguments: pred_keypoint_logits (Tensor): A tensor of shape (N, K, S, S) where N is the total number of instances in the batch, K is the number of keypoints, and S is the side length of the keypoint heatmap. The values are spatial logits. instances (list[Instances]): A list of M Instances, where M is the batch size. These instances are predictions from the model that are in 1:1 correspondence with pred_keypoint_logits. Each Instances should contain a `gt_keypoints` field containing a `structures.Keypoint` instance. normalizer (float): Normalize the loss by this amount. If not specified, we normalize by the number of visible keypoints in the minibatch. Returns a scalar tensor containing the loss.
3,622
from typing import List import fvcore.nn.weight_init as weight_init import torch from torch import nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.layers import Conv2d, ConvTranspose2d, ShapeSpec, cat, get_norm from detectron2.structures import Instances from detectron2.utils.events import get_event_storage from detectron2.utils.registry import Registry def get_event_storage(): """ Returns: The :class:`EventStorage` object that's currently being used. Throws an error if no :class:`EventStorage` is currently enabled. """ assert len( _CURRENT_STORAGE_STACK ), "get_event_storage() has to be called inside a 'with EventStorage(...)' context!" return _CURRENT_STORAGE_STACK[-1] The provided code snippet includes necessary dependencies for implementing the `mask_rcnn_loss` function. Write a Python function `def mask_rcnn_loss(pred_mask_logits: torch.Tensor, instances: List[Instances], vis_period: int = 0)` to solve the following problem: Compute the mask prediction loss defined in the Mask R-CNN paper. Args: pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask) for class-specific or class-agnostic, where B is the total number of predicted masks in all images, C is the number of foreground classes, and Hmask, Wmask are the height and width of the mask predictions. The values are logits. instances (list[Instances]): A list of N Instances, where N is the number of images in the batch. These instances are in 1:1 correspondence with the pred_mask_logits. The ground-truth labels (class, box, mask, ...) associated with each instance are stored in fields. vis_period (int): the period (in steps) to dump visualization. Returns: mask_loss (Tensor): A scalar tensor containing the loss. Here is the function: def mask_rcnn_loss(pred_mask_logits: torch.Tensor, instances: List[Instances], vis_period: int = 0): """ Compute the mask prediction loss defined in the Mask R-CNN paper. Args: pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask) for class-specific or class-agnostic, where B is the total number of predicted masks in all images, C is the number of foreground classes, and Hmask, Wmask are the height and width of the mask predictions. The values are logits. instances (list[Instances]): A list of N Instances, where N is the number of images in the batch. These instances are in 1:1 correspondence with the pred_mask_logits. The ground-truth labels (class, box, mask, ...) associated with each instance are stored in fields. vis_period (int): the period (in steps) to dump visualization. Returns: mask_loss (Tensor): A scalar tensor containing the loss. """ cls_agnostic_mask = pred_mask_logits.size(1) == 1 total_num_masks = pred_mask_logits.size(0) mask_side_len = pred_mask_logits.size(2) assert pred_mask_logits.size(2) == pred_mask_logits.size(3), "Mask prediction must be square!" gt_classes = [] gt_masks = [] for instances_per_image in instances: if len(instances_per_image) == 0: continue if not cls_agnostic_mask: gt_classes_per_image = instances_per_image.gt_classes.to(dtype=torch.int64) gt_classes.append(gt_classes_per_image) gt_masks_per_image = instances_per_image.gt_masks.crop_and_resize( instances_per_image.proposal_boxes.tensor, mask_side_len ).to(device=pred_mask_logits.device) # A tensor of shape (N, M, M), N=#instances in the image; M=mask_side_len gt_masks.append(gt_masks_per_image) if len(gt_masks) == 0: return pred_mask_logits.sum() * 0 gt_masks = cat(gt_masks, dim=0) if cls_agnostic_mask: pred_mask_logits = pred_mask_logits[:, 0] else: indices = torch.arange(total_num_masks) gt_classes = cat(gt_classes, dim=0) pred_mask_logits = pred_mask_logits[indices, gt_classes] if gt_masks.dtype == torch.bool: gt_masks_bool = gt_masks else: # Here we allow gt_masks to be float as well (depend on the implementation of rasterize()) gt_masks_bool = gt_masks > 0.5 gt_masks = gt_masks.to(dtype=torch.float32) # Log the training accuracy (using gt classes and 0.5 threshold) mask_incorrect = (pred_mask_logits > 0.0) != gt_masks_bool mask_accuracy = 1 - (mask_incorrect.sum().item() / max(mask_incorrect.numel(), 1.0)) num_positive = gt_masks_bool.sum().item() false_positive = (mask_incorrect & ~gt_masks_bool).sum().item() / max( gt_masks_bool.numel() - num_positive, 1.0 ) false_negative = (mask_incorrect & gt_masks_bool).sum().item() / max(num_positive, 1.0) storage = get_event_storage() storage.put_scalar("mask_rcnn/accuracy", mask_accuracy) storage.put_scalar("mask_rcnn/false_positive", false_positive) storage.put_scalar("mask_rcnn/false_negative", false_negative) if vis_period > 0 and storage.iter % vis_period == 0: pred_masks = pred_mask_logits.sigmoid() vis_masks = torch.cat([pred_masks, gt_masks], axis=2) name = "Left: mask prediction; Right: mask GT" for idx, vis_mask in enumerate(vis_masks): vis_mask = torch.stack([vis_mask] * 3, axis=0) storage.put_image(name + f" ({idx})", vis_mask) mask_loss = F.binary_cross_entropy_with_logits(pred_mask_logits, gt_masks, reduction="mean") return mask_loss
Compute the mask prediction loss defined in the Mask R-CNN paper. Args: pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask) for class-specific or class-agnostic, where B is the total number of predicted masks in all images, C is the number of foreground classes, and Hmask, Wmask are the height and width of the mask predictions. The values are logits. instances (list[Instances]): A list of N Instances, where N is the number of images in the batch. These instances are in 1:1 correspondence with the pred_mask_logits. The ground-truth labels (class, box, mask, ...) associated with each instance are stored in fields. vis_period (int): the period (in steps) to dump visualization. Returns: mask_loss (Tensor): A scalar tensor containing the loss.
3,623
from typing import List import fvcore.nn.weight_init as weight_init import torch from torch import nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.layers import Conv2d, ConvTranspose2d, ShapeSpec, cat, get_norm from detectron2.structures import Instances from detectron2.utils.events import get_event_storage from detectron2.utils.registry import Registry The provided code snippet includes necessary dependencies for implementing the `mask_rcnn_inference` function. Write a Python function `def mask_rcnn_inference(pred_mask_logits: torch.Tensor, pred_instances: List[Instances])` to solve the following problem: Convert pred_mask_logits to estimated foreground probability masks while also extracting only the masks for the predicted classes in pred_instances. For each predicted box, the mask of the same class is attached to the instance by adding a new "pred_masks" field to pred_instances. Args: pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask) for class-specific or class-agnostic, where B is the total number of predicted masks in all images, C is the number of foreground classes, and Hmask, Wmask are the height and width of the mask predictions. The values are logits. pred_instances (list[Instances]): A list of N Instances, where N is the number of images in the batch. Each Instances must have field "pred_classes". Returns: None. pred_instances will contain an extra "pred_masks" field storing a mask of size (Hmask, Wmask) for predicted class. Note that the masks are returned as a soft (non-quantized) masks the resolution predicted by the network; post-processing steps, such as resizing the predicted masks to the original image resolution and/or binarizing them, is left to the caller. Here is the function: def mask_rcnn_inference(pred_mask_logits: torch.Tensor, pred_instances: List[Instances]): """ Convert pred_mask_logits to estimated foreground probability masks while also extracting only the masks for the predicted classes in pred_instances. For each predicted box, the mask of the same class is attached to the instance by adding a new "pred_masks" field to pred_instances. Args: pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask) for class-specific or class-agnostic, where B is the total number of predicted masks in all images, C is the number of foreground classes, and Hmask, Wmask are the height and width of the mask predictions. The values are logits. pred_instances (list[Instances]): A list of N Instances, where N is the number of images in the batch. Each Instances must have field "pred_classes". Returns: None. pred_instances will contain an extra "pred_masks" field storing a mask of size (Hmask, Wmask) for predicted class. Note that the masks are returned as a soft (non-quantized) masks the resolution predicted by the network; post-processing steps, such as resizing the predicted masks to the original image resolution and/or binarizing them, is left to the caller. """ cls_agnostic_mask = pred_mask_logits.size(1) == 1 if cls_agnostic_mask: mask_probs_pred = pred_mask_logits.sigmoid() else: # Select masks corresponding to the predicted classes num_masks = pred_mask_logits.shape[0] class_pred = cat([i.pred_classes for i in pred_instances]) indices = torch.arange(num_masks, device=class_pred.device) mask_probs_pred = pred_mask_logits[indices, class_pred][:, None].sigmoid() # mask_probs_pred.shape: (B, 1, Hmask, Wmask) num_boxes_per_image = [len(i) for i in pred_instances] mask_probs_pred = mask_probs_pred.split(num_boxes_per_image, dim=0) for prob, instances in zip(mask_probs_pred, pred_instances): instances.pred_masks = prob # (1, Hmask, Wmask)
Convert pred_mask_logits to estimated foreground probability masks while also extracting only the masks for the predicted classes in pred_instances. For each predicted box, the mask of the same class is attached to the instance by adding a new "pred_masks" field to pred_instances. Args: pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask) for class-specific or class-agnostic, where B is the total number of predicted masks in all images, C is the number of foreground classes, and Hmask, Wmask are the height and width of the mask predictions. The values are logits. pred_instances (list[Instances]): A list of N Instances, where N is the number of images in the batch. Each Instances must have field "pred_classes". Returns: None. pred_instances will contain an extra "pred_masks" field storing a mask of size (Hmask, Wmask) for predicted class. Note that the masks are returned as a soft (non-quantized) masks the resolution predicted by the network; post-processing steps, such as resizing the predicted masks to the original image resolution and/or binarizing them, is left to the caller.
3,624
from typing import List import fvcore.nn.weight_init as weight_init import torch from torch import nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.layers import Conv2d, ConvTranspose2d, ShapeSpec, cat, get_norm from detectron2.structures import Instances from detectron2.utils.events import get_event_storage from detectron2.utils.registry import Registry ROI_MASK_HEAD_REGISTRY = Registry("ROI_MASK_HEAD") ROI_MASK_HEAD_REGISTRY.__doc__ = """ Registry for mask heads, which predicts instance masks given per-region features. The registered object will be called with `obj(cfg, input_shape)`. """ The provided code snippet includes necessary dependencies for implementing the `build_mask_head` function. Write a Python function `def build_mask_head(cfg, input_shape)` to solve the following problem: Build a mask head defined by `cfg.MODEL.ROI_MASK_HEAD.NAME`. Here is the function: def build_mask_head(cfg, input_shape): """ Build a mask head defined by `cfg.MODEL.ROI_MASK_HEAD.NAME`. """ name = cfg.MODEL.ROI_MASK_HEAD.NAME return ROI_MASK_HEAD_REGISTRY.get(name)(cfg, input_shape)
Build a mask head defined by `cfg.MODEL.ROI_MASK_HEAD.NAME`.
3,625
import numpy as np from typing import List import fvcore.nn.weight_init as weight_init import torch from torch import nn from detectron2.config import configurable from detectron2.layers import Conv2d, ShapeSpec, get_norm from detectron2.utils.registry import Registry ROI_BOX_HEAD_REGISTRY = Registry("ROI_BOX_HEAD") ROI_BOX_HEAD_REGISTRY.__doc__ = """ Registry for box heads, which make box predictions from per-region features. The registered object will be called with `obj(cfg, input_shape)`. """ The provided code snippet includes necessary dependencies for implementing the `build_box_head` function. Write a Python function `def build_box_head(cfg, input_shape)` to solve the following problem: Build a box head defined by `cfg.MODEL.ROI_BOX_HEAD.NAME`. Here is the function: def build_box_head(cfg, input_shape): """ Build a box head defined by `cfg.MODEL.ROI_BOX_HEAD.NAME`. """ name = cfg.MODEL.ROI_BOX_HEAD.NAME return ROI_BOX_HEAD_REGISTRY.get(name)(cfg, input_shape)
Build a box head defined by `cfg.MODEL.ROI_BOX_HEAD.NAME`.
3,626
import logging from typing import Dict, List, Tuple, Union import torch from torch import nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.layers import ShapeSpec, batched_nms, cat, cross_entropy, nonzero_tuple from detectron2.modeling.box_regression import Box2BoxTransform, _dense_box_regression_loss from detectron2.structures import Boxes, Instances from detectron2.utils.events import get_event_storage def fast_rcnn_inference_single_image( boxes, scores, image_shape: Tuple[int, int], score_thresh: float, nms_thresh: float, topk_per_image: int, ): """ Single-image inference. Return bounding-box detection results by thresholding on scores and applying non-maximum suppression (NMS). Args: Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes per image. Returns: Same as `fast_rcnn_inference`, but for only one image. """ valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1) if not valid_mask.all(): boxes = boxes[valid_mask] scores = scores[valid_mask] scores = scores[:, :-1] num_bbox_reg_classes = boxes.shape[1] // 4 # Convert to Boxes to use the `clip` function ... boxes = Boxes(boxes.reshape(-1, 4)) boxes.clip(image_shape) boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4 # 1. Filter results based on detection scores. It can make NMS more efficient # by filtering out low-confidence detections. filter_mask = scores > score_thresh # R x K # R' x 2. First column contains indices of the R predictions; # Second column contains indices of classes. filter_inds = filter_mask.nonzero() if num_bbox_reg_classes == 1: boxes = boxes[filter_inds[:, 0], 0] else: boxes = boxes[filter_mask] scores = scores[filter_mask] # 2. Apply NMS for each class independently. keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh) if topk_per_image >= 0: keep = keep[:topk_per_image] boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep] result = Instances(image_shape) result.pred_boxes = Boxes(boxes) result.scores = scores result.pred_classes = filter_inds[:, 1] return result, filter_inds[:, 0] The provided code snippet includes necessary dependencies for implementing the `fast_rcnn_inference` function. Write a Python function `def fast_rcnn_inference( boxes: List[torch.Tensor], scores: List[torch.Tensor], image_shapes: List[Tuple[int, int]], score_thresh: float, nms_thresh: float, topk_per_image: int, )` to solve the following problem: Call `fast_rcnn_inference_single_image` for all images. Args: boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic boxes for each image. Element i has shape (Ri, K * 4) if doing class-specific regression, or (Ri, 4) if doing class-agnostic regression, where Ri is the number of predicted objects for image i. This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`. scores (list[Tensor]): A list of Tensors of predicted class scores for each image. Element i has shape (Ri, K + 1), where Ri is the number of predicted objects for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`. image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch. score_thresh (float): Only return detections with a confidence score exceeding this threshold. nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. topk_per_image (int): The number of top scoring detections to return. Set < 0 to return all detections. Returns: instances: (list[Instances]): A list of N instances, one for each image in the batch, that stores the topk most confidence detections. kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates the corresponding boxes/scores index in [0, Ri) from the input, for image i. Here is the function: def fast_rcnn_inference( boxes: List[torch.Tensor], scores: List[torch.Tensor], image_shapes: List[Tuple[int, int]], score_thresh: float, nms_thresh: float, topk_per_image: int, ): """ Call `fast_rcnn_inference_single_image` for all images. Args: boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic boxes for each image. Element i has shape (Ri, K * 4) if doing class-specific regression, or (Ri, 4) if doing class-agnostic regression, where Ri is the number of predicted objects for image i. This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`. scores (list[Tensor]): A list of Tensors of predicted class scores for each image. Element i has shape (Ri, K + 1), where Ri is the number of predicted objects for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`. image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch. score_thresh (float): Only return detections with a confidence score exceeding this threshold. nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. topk_per_image (int): The number of top scoring detections to return. Set < 0 to return all detections. Returns: instances: (list[Instances]): A list of N instances, one for each image in the batch, that stores the topk most confidence detections. kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates the corresponding boxes/scores index in [0, Ri) from the input, for image i. """ result_per_image = [ fast_rcnn_inference_single_image( boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image ) for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes) ] return [x[0] for x in result_per_image], [x[1] for x in result_per_image]
Call `fast_rcnn_inference_single_image` for all images. Args: boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic boxes for each image. Element i has shape (Ri, K * 4) if doing class-specific regression, or (Ri, 4) if doing class-agnostic regression, where Ri is the number of predicted objects for image i. This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`. scores (list[Tensor]): A list of Tensors of predicted class scores for each image. Element i has shape (Ri, K + 1), where Ri is the number of predicted objects for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`. image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch. score_thresh (float): Only return detections with a confidence score exceeding this threshold. nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. topk_per_image (int): The number of top scoring detections to return. Set < 0 to return all detections. Returns: instances: (list[Instances]): A list of N instances, one for each image in the batch, that stores the topk most confidence detections. kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates the corresponding boxes/scores index in [0, Ri) from the input, for image i.
3,627
import logging from typing import Dict, List, Tuple, Union import torch from torch import nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.layers import ShapeSpec, batched_nms, cat, cross_entropy, nonzero_tuple from detectron2.modeling.box_regression import Box2BoxTransform, _dense_box_regression_loss from detectron2.structures import Boxes, Instances from detectron2.utils.events import get_event_storage def get_event_storage(): """ Returns: The :class:`EventStorage` object that's currently being used. Throws an error if no :class:`EventStorage` is currently enabled. """ assert len( _CURRENT_STORAGE_STACK ), "get_event_storage() has to be called inside a 'with EventStorage(...)' context!" return _CURRENT_STORAGE_STACK[-1] The provided code snippet includes necessary dependencies for implementing the `_log_classification_stats` function. Write a Python function `def _log_classification_stats(pred_logits, gt_classes, prefix="fast_rcnn")` to solve the following problem: Log the classification metrics to EventStorage. Args: pred_logits: Rx(K+1) logits. The last column is for background class. gt_classes: R labels Here is the function: def _log_classification_stats(pred_logits, gt_classes, prefix="fast_rcnn"): """ Log the classification metrics to EventStorage. Args: pred_logits: Rx(K+1) logits. The last column is for background class. gt_classes: R labels """ num_instances = gt_classes.numel() if num_instances == 0: return pred_classes = pred_logits.argmax(dim=1) bg_class_ind = pred_logits.shape[1] - 1 fg_inds = (gt_classes >= 0) & (gt_classes < bg_class_ind) num_fg = fg_inds.nonzero().numel() fg_gt_classes = gt_classes[fg_inds] fg_pred_classes = pred_classes[fg_inds] num_false_negative = (fg_pred_classes == bg_class_ind).nonzero().numel() num_accurate = (pred_classes == gt_classes).nonzero().numel() fg_num_accurate = (fg_pred_classes == fg_gt_classes).nonzero().numel() storage = get_event_storage() storage.put_scalar(f"{prefix}/cls_accuracy", num_accurate / num_instances) if num_fg > 0: storage.put_scalar(f"{prefix}/fg_cls_accuracy", fg_num_accurate / num_fg) storage.put_scalar(f"{prefix}/false_negative", num_false_negative / num_fg)
Log the classification metrics to EventStorage. Args: pred_logits: Rx(K+1) logits. The last column is for background class. gt_classes: R labels
3,628
import logging import numpy as np import torch from detectron2.config import configurable from detectron2.layers import ShapeSpec, batched_nms_rotated from detectron2.structures import Instances, RotatedBoxes, pairwise_iou_rotated from detectron2.utils.events import get_event_storage from ..box_regression import Box2BoxTransformRotated from ..poolers import ROIPooler from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals from .box_head import build_box_head from .fast_rcnn import FastRCNNOutputLayers from .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads def fast_rcnn_inference_single_image_rotated( boxes, scores, image_shape, score_thresh, nms_thresh, topk_per_image ): """ Single-image inference. Return rotated bounding-box detection results by thresholding on scores and applying rotated non-maximum suppression (Rotated NMS). Args: Same as `fast_rcnn_inference_rotated`, but with rotated boxes, scores, and image shapes per image. Returns: Same as `fast_rcnn_inference_rotated`, but for only one image. """ valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1) if not valid_mask.all(): boxes = boxes[valid_mask] scores = scores[valid_mask] B = 5 # box dimension scores = scores[:, :-1] num_bbox_reg_classes = boxes.shape[1] // B # Convert to Boxes to use the `clip` function ... boxes = RotatedBoxes(boxes.reshape(-1, B)) boxes.clip(image_shape) boxes = boxes.tensor.view(-1, num_bbox_reg_classes, B) # R x C x B # Filter results based on detection scores filter_mask = scores > score_thresh # R x K # R' x 2. First column contains indices of the R predictions; # Second column contains indices of classes. filter_inds = filter_mask.nonzero() if num_bbox_reg_classes == 1: boxes = boxes[filter_inds[:, 0], 0] else: boxes = boxes[filter_mask] scores = scores[filter_mask] # Apply per-class Rotated NMS keep = batched_nms_rotated(boxes, scores, filter_inds[:, 1], nms_thresh) if topk_per_image >= 0: keep = keep[:topk_per_image] boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep] result = Instances(image_shape) result.pred_boxes = RotatedBoxes(boxes) result.scores = scores result.pred_classes = filter_inds[:, 1] return result, filter_inds[:, 0] The provided code snippet includes necessary dependencies for implementing the `fast_rcnn_inference_rotated` function. Write a Python function `def fast_rcnn_inference_rotated( boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image )` to solve the following problem: Call `fast_rcnn_inference_single_image_rotated` for all images. Args: boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic boxes for each image. Element i has shape (Ri, K * 5) if doing class-specific regression, or (Ri, 5) if doing class-agnostic regression, where Ri is the number of predicted objects for image i. This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`. scores (list[Tensor]): A list of Tensors of predicted class scores for each image. Element i has shape (Ri, K + 1), where Ri is the number of predicted objects for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`. image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch. score_thresh (float): Only return detections with a confidence score exceeding this threshold. nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. topk_per_image (int): The number of top scoring detections to return. Set < 0 to return all detections. Returns: instances: (list[Instances]): A list of N instances, one for each image in the batch, that stores the topk most confidence detections. kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates the corresponding boxes/scores index in [0, Ri) from the input, for image i. Here is the function: def fast_rcnn_inference_rotated( boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image ): """ Call `fast_rcnn_inference_single_image_rotated` for all images. Args: boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic boxes for each image. Element i has shape (Ri, K * 5) if doing class-specific regression, or (Ri, 5) if doing class-agnostic regression, where Ri is the number of predicted objects for image i. This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`. scores (list[Tensor]): A list of Tensors of predicted class scores for each image. Element i has shape (Ri, K + 1), where Ri is the number of predicted objects for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`. image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch. score_thresh (float): Only return detections with a confidence score exceeding this threshold. nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. topk_per_image (int): The number of top scoring detections to return. Set < 0 to return all detections. Returns: instances: (list[Instances]): A list of N instances, one for each image in the batch, that stores the topk most confidence detections. kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates the corresponding boxes/scores index in [0, Ri) from the input, for image i. """ result_per_image = [ fast_rcnn_inference_single_image_rotated( boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image ) for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes) ] return [x[0] for x in result_per_image], [x[1] for x in result_per_image]
Call `fast_rcnn_inference_single_image_rotated` for all images. Args: boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic boxes for each image. Element i has shape (Ri, K * 5) if doing class-specific regression, or (Ri, 5) if doing class-agnostic regression, where Ri is the number of predicted objects for image i. This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`. scores (list[Tensor]): A list of Tensors of predicted class scores for each image. Element i has shape (Ri, K + 1), where Ri is the number of predicted objects for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`. image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch. score_thresh (float): Only return detections with a confidence score exceeding this threshold. nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. topk_per_image (int): The number of top scoring detections to return. Set < 0 to return all detections. Returns: instances: (list[Instances]): A list of N instances, one for each image in the batch, that stores the topk most confidence detections. kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates the corresponding boxes/scores index in [0, Ri) from the input, for image i.
3,629
import inspect import logging import numpy as np from typing import Dict, List, Optional, Tuple import torch from torch import nn from detectron2.config import configurable from detectron2.layers import ShapeSpec, nonzero_tuple from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou from detectron2.utils.events import get_event_storage from detectron2.utils.registry import Registry from ..backbone.resnet import BottleneckBlock, ResNet from ..matcher import Matcher from ..poolers import ROIPooler from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals from ..sampling import subsample_labels from .box_head import build_box_head from .fast_rcnn import FastRCNNOutputLayers from .keypoint_head import build_keypoint_head from .mask_head import build_mask_head ROI_HEADS_REGISTRY = Registry("ROI_HEADS") ROI_HEADS_REGISTRY.__doc__ = """ Registry for ROI heads in a generalized R-CNN model. ROIHeads take feature maps and region proposals, and perform per-region computation. The registered object will be called with `obj(cfg, input_shape)`. The call is expected to return an :class:`ROIHeads`. """ The provided code snippet includes necessary dependencies for implementing the `build_roi_heads` function. Write a Python function `def build_roi_heads(cfg, input_shape)` to solve the following problem: Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`. Here is the function: def build_roi_heads(cfg, input_shape): """ Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`. """ name = cfg.MODEL.ROI_HEADS.NAME return ROI_HEADS_REGISTRY.get(name)(cfg, input_shape)
Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`.
3,630
import inspect import logging import numpy as np from typing import Dict, List, Optional, Tuple import torch from torch import nn from detectron2.config import configurable from detectron2.layers import ShapeSpec, nonzero_tuple from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou from detectron2.utils.events import get_event_storage from detectron2.utils.registry import Registry from ..backbone.resnet import BottleneckBlock, ResNet from ..matcher import Matcher from ..poolers import ROIPooler from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals from ..sampling import subsample_labels from .box_head import build_box_head from .fast_rcnn import FastRCNNOutputLayers from .keypoint_head import build_keypoint_head from .mask_head import build_mask_head The provided code snippet includes necessary dependencies for implementing the `select_foreground_proposals` function. Write a Python function `def select_foreground_proposals( proposals: List[Instances], bg_label: int ) -> Tuple[List[Instances], List[torch.Tensor]]` to solve the following problem: Given a list of N Instances (for N images), each containing a `gt_classes` field, return a list of Instances that contain only instances with `gt_classes != -1 && gt_classes != bg_label`. Args: proposals (list[Instances]): A list of N Instances, where N is the number of images in the batch. bg_label: label index of background class. Returns: list[Instances]: N Instances, each contains only the selected foreground instances. list[Tensor]: N boolean vector, correspond to the selection mask of each Instances object. True for selected instances. Here is the function: def select_foreground_proposals( proposals: List[Instances], bg_label: int ) -> Tuple[List[Instances], List[torch.Tensor]]: """ Given a list of N Instances (for N images), each containing a `gt_classes` field, return a list of Instances that contain only instances with `gt_classes != -1 && gt_classes != bg_label`. Args: proposals (list[Instances]): A list of N Instances, where N is the number of images in the batch. bg_label: label index of background class. Returns: list[Instances]: N Instances, each contains only the selected foreground instances. list[Tensor]: N boolean vector, correspond to the selection mask of each Instances object. True for selected instances. """ assert isinstance(proposals, (list, tuple)) assert isinstance(proposals[0], Instances) assert proposals[0].has("gt_classes") fg_proposals = [] fg_selection_masks = [] for proposals_per_image in proposals: gt_classes = proposals_per_image.gt_classes fg_selection_mask = (gt_classes != -1) & (gt_classes != bg_label) fg_idxs = fg_selection_mask.nonzero().squeeze(1) fg_proposals.append(proposals_per_image[fg_idxs]) fg_selection_masks.append(fg_selection_mask) return fg_proposals, fg_selection_masks
Given a list of N Instances (for N images), each containing a `gt_classes` field, return a list of Instances that contain only instances with `gt_classes != -1 && gt_classes != bg_label`. Args: proposals (list[Instances]): A list of N Instances, where N is the number of images in the batch. bg_label: label index of background class. Returns: list[Instances]: N Instances, each contains only the selected foreground instances. list[Tensor]: N boolean vector, correspond to the selection mask of each Instances object. True for selected instances.
3,631
import inspect import logging import numpy as np from typing import Dict, List, Optional, Tuple import torch from torch import nn from detectron2.config import configurable from detectron2.layers import ShapeSpec, nonzero_tuple from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou from detectron2.utils.events import get_event_storage from detectron2.utils.registry import Registry from ..backbone.resnet import BottleneckBlock, ResNet from ..matcher import Matcher from ..poolers import ROIPooler from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals from ..sampling import subsample_labels from .box_head import build_box_head from .fast_rcnn import FastRCNNOutputLayers from .keypoint_head import build_keypoint_head from .mask_head import build_mask_head def get_event_storage(): """ Returns: The :class:`EventStorage` object that's currently being used. Throws an error if no :class:`EventStorage` is currently enabled. """ assert len( _CURRENT_STORAGE_STACK ), "get_event_storage() has to be called inside a 'with EventStorage(...)' context!" return _CURRENT_STORAGE_STACK[-1] The provided code snippet includes necessary dependencies for implementing the `select_proposals_with_visible_keypoints` function. Write a Python function `def select_proposals_with_visible_keypoints(proposals: List[Instances]) -> List[Instances]` to solve the following problem: Args: proposals (list[Instances]): a list of N Instances, where N is the number of images. Returns: proposals: only contains proposals with at least one visible keypoint. Note that this is still slightly different from Detectron. In Detectron, proposals for training keypoint head are re-sampled from all the proposals with IOU>threshold & >=1 visible keypoint. Here, the proposals are first sampled from all proposals with IOU>threshold, then proposals with no visible keypoint are filtered out. This strategy seems to make no difference on Detectron and is easier to implement. Here is the function: def select_proposals_with_visible_keypoints(proposals: List[Instances]) -> List[Instances]: """ Args: proposals (list[Instances]): a list of N Instances, where N is the number of images. Returns: proposals: only contains proposals with at least one visible keypoint. Note that this is still slightly different from Detectron. In Detectron, proposals for training keypoint head are re-sampled from all the proposals with IOU>threshold & >=1 visible keypoint. Here, the proposals are first sampled from all proposals with IOU>threshold, then proposals with no visible keypoint are filtered out. This strategy seems to make no difference on Detectron and is easier to implement. """ ret = [] all_num_fg = [] for proposals_per_image in proposals: # If empty/unannotated image (hard negatives), skip filtering for train if len(proposals_per_image) == 0: ret.append(proposals_per_image) continue gt_keypoints = proposals_per_image.gt_keypoints.tensor # #fg x K x 3 vis_mask = gt_keypoints[:, :, 2] >= 1 xs, ys = gt_keypoints[:, :, 0], gt_keypoints[:, :, 1] proposal_boxes = proposals_per_image.proposal_boxes.tensor.unsqueeze(dim=1) # #fg x 1 x 4 kp_in_box = ( (xs >= proposal_boxes[:, :, 0]) & (xs <= proposal_boxes[:, :, 2]) & (ys >= proposal_boxes[:, :, 1]) & (ys <= proposal_boxes[:, :, 3]) ) selection = (kp_in_box & vis_mask).any(dim=1) selection_idxs = nonzero_tuple(selection)[0] all_num_fg.append(selection_idxs.numel()) ret.append(proposals_per_image[selection_idxs]) storage = get_event_storage() storage.put_scalar("keypoint_head/num_fg_samples", np.mean(all_num_fg)) return ret
Args: proposals (list[Instances]): a list of N Instances, where N is the number of images. Returns: proposals: only contains proposals with at least one visible keypoint. Note that this is still slightly different from Detectron. In Detectron, proposals for training keypoint head are re-sampled from all the proposals with IOU>threshold & >=1 visible keypoint. Here, the proposals are first sampled from all proposals with IOU>threshold, then proposals with no visible keypoint are filtered out. This strategy seems to make no difference on Detectron and is easier to implement.
3,632
import logging import math from bisect import bisect_right from typing import List import torch from fvcore.common.param_scheduler import ( CompositeParamScheduler, ConstantParamScheduler, LinearParamScheduler, ParamScheduler, ) The provided code snippet includes necessary dependencies for implementing the `_get_warmup_factor_at_iter` function. Write a Python function `def _get_warmup_factor_at_iter( method: str, iter: int, warmup_iters: int, warmup_factor: float ) -> float` to solve the following problem: Return the learning rate warmup factor at a specific iteration. See :paper:`ImageNet in 1h` for more details. Args: method (str): warmup method; either "constant" or "linear". iter (int): iteration at which to calculate the warmup factor. warmup_iters (int): the number of warmup iterations. warmup_factor (float): the base warmup factor (the meaning changes according to the method used). Returns: float: the effective warmup factor at the given iteration. Here is the function: def _get_warmup_factor_at_iter( method: str, iter: int, warmup_iters: int, warmup_factor: float ) -> float: """ Return the learning rate warmup factor at a specific iteration. See :paper:`ImageNet in 1h` for more details. Args: method (str): warmup method; either "constant" or "linear". iter (int): iteration at which to calculate the warmup factor. warmup_iters (int): the number of warmup iterations. warmup_factor (float): the base warmup factor (the meaning changes according to the method used). Returns: float: the effective warmup factor at the given iteration. """ if iter >= warmup_iters: return 1.0 if method == "constant": return warmup_factor elif method == "linear": alpha = iter / warmup_iters return warmup_factor * (1 - alpha) + alpha else: raise ValueError("Unknown warmup method: {}".format(method))
Return the learning rate warmup factor at a specific iteration. See :paper:`ImageNet in 1h` for more details. Args: method (str): warmup method; either "constant" or "linear". iter (int): iteration at which to calculate the warmup factor. warmup_iters (int): the number of warmup iterations. warmup_factor (float): the base warmup factor (the meaning changes according to the method used). Returns: float: the effective warmup factor at the given iteration.
3,633
import copy import itertools import logging from collections import defaultdict from enum import Enum from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Type, Union import torch from fvcore.common.param_scheduler import CosineParamScheduler, MultiStepParamScheduler from detectron2.config import CfgNode from .lr_scheduler import LRMultiplier, WarmupParamScheduler def maybe_add_gradient_clipping( cfg: CfgNode, optimizer: Type[torch.optim.Optimizer] ) -> Type[torch.optim.Optimizer]: """ If gradient clipping is enabled through config options, wraps the existing optimizer type to become a new dynamically created class OptimizerWithGradientClip that inherits the given optimizer and overrides the `step` method to include gradient clipping. Args: cfg: CfgNode, configuration options optimizer: type. A subclass of torch.optim.Optimizer Return: type: either the input `optimizer` (if gradient clipping is disabled), or a subclass of it with gradient clipping included in the `step` method. """ if not cfg.SOLVER.CLIP_GRADIENTS.ENABLED: return optimizer if isinstance(optimizer, torch.optim.Optimizer): optimizer_type = type(optimizer) else: assert issubclass(optimizer, torch.optim.Optimizer), optimizer optimizer_type = optimizer grad_clipper = _create_gradient_clipper(cfg.SOLVER.CLIP_GRADIENTS) OptimizerWithGradientClip = _generate_optimizer_class_with_gradient_clipping( optimizer_type, per_param_clipper=grad_clipper ) if isinstance(optimizer, torch.optim.Optimizer): optimizer.__class__ = OptimizerWithGradientClip # a bit hacky, not recommended return optimizer else: return OptimizerWithGradientClip def get_default_optimizer_params( model: torch.nn.Module, base_lr: Optional[float] = None, weight_decay: Optional[float] = None, weight_decay_norm: Optional[float] = None, bias_lr_factor: Optional[float] = 1.0, weight_decay_bias: Optional[float] = None, overrides: Optional[Dict[str, Dict[str, float]]] = None, ) -> List[Dict[str, Any]]: """ Get default param list for optimizer, with support for a few types of overrides. If no overrides needed, this is equivalent to `model.parameters()`. Args: base_lr: lr for every group by default. Can be omitted to use the one in optimizer. weight_decay: weight decay for every group by default. Can be omitted to use the one in optimizer. weight_decay_norm: override weight decay for params in normalization layers bias_lr_factor: multiplier of lr for bias parameters. weight_decay_bias: override weight decay for bias parameters overrides: if not `None`, provides values for optimizer hyperparameters (LR, weight decay) for module parameters with a given name; e.g. ``{"embedding": {"lr": 0.01, "weight_decay": 0.1}}`` will set the LR and weight decay values for all module parameters named `embedding`. For common detection models, ``weight_decay_norm`` is the only option needed to be set. ``bias_lr_factor,weight_decay_bias`` are legacy settings from Detectron1 that are not found useful. Example: :: torch.optim.SGD(get_default_optimizer_params(model, weight_decay_norm=0), lr=0.01, weight_decay=1e-4, momentum=0.9) """ if overrides is None: overrides = {} defaults = {} if base_lr is not None: defaults["lr"] = base_lr if weight_decay is not None: defaults["weight_decay"] = weight_decay bias_overrides = {} if bias_lr_factor is not None and bias_lr_factor != 1.0: # NOTE: unlike Detectron v1, we now by default make bias hyperparameters # exactly the same as regular weights. if base_lr is None: raise ValueError("bias_lr_factor requires base_lr") bias_overrides["lr"] = base_lr * bias_lr_factor if weight_decay_bias is not None: bias_overrides["weight_decay"] = weight_decay_bias if len(bias_overrides): if "bias" in overrides: raise ValueError("Conflicting overrides for 'bias'") overrides["bias"] = bias_overrides norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module in model.modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if isinstance(module, norm_module_types) and weight_decay_norm is not None: hyperparams["weight_decay"] = weight_decay_norm hyperparams.update(overrides.get(module_param_name, {})) params.append({"params": [value], **hyperparams}) return reduce_param_groups(params) The provided code snippet includes necessary dependencies for implementing the `build_optimizer` function. Write a Python function `def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer` to solve the following problem: Build an optimizer from config. Here is the function: def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer: """ Build an optimizer from config. """ params = get_default_optimizer_params( model, base_lr=cfg.SOLVER.BASE_LR, weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM, bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR, weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS, ) return maybe_add_gradient_clipping(cfg, torch.optim.SGD)( params, lr=cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM, nesterov=cfg.SOLVER.NESTEROV, weight_decay=cfg.SOLVER.WEIGHT_DECAY, )
Build an optimizer from config.
3,634
import copy import itertools import logging from collections import defaultdict from enum import Enum from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Type, Union import torch from fvcore.common.param_scheduler import CosineParamScheduler, MultiStepParamScheduler from detectron2.config import CfgNode from .lr_scheduler import LRMultiplier, WarmupParamScheduler class WarmupParamScheduler(CompositeParamScheduler): """ Add an initial warmup stage to another scheduler. """ def __init__( self, scheduler: ParamScheduler, warmup_factor: float, warmup_length: float, warmup_method: str = "linear", ): """ Args: scheduler: warmup will be added at the beginning of this scheduler warmup_factor: the factor w.r.t the initial value of ``scheduler``, e.g. 0.001 warmup_length: the relative length (in [0, 1]) of warmup steps w.r.t the entire training, e.g. 0.01 warmup_method: one of "linear" or "constant" """ end_value = scheduler(warmup_length) # the value to reach when warmup ends start_value = warmup_factor * scheduler(0.0) if warmup_method == "constant": warmup = ConstantParamScheduler(start_value) elif warmup_method == "linear": warmup = LinearParamScheduler(start_value, end_value) else: raise ValueError("Unknown warmup method: {}".format(warmup_method)) super().__init__( [warmup, scheduler], interval_scaling=["rescaled", "fixed"], lengths=[warmup_length, 1 - warmup_length], ) class LRMultiplier(torch.optim.lr_scheduler._LRScheduler): """ A LRScheduler which uses fvcore :class:`ParamScheduler` to multiply the learning rate of each param in the optimizer. Every step, the learning rate of each parameter becomes its initial value multiplied by the output of the given :class:`ParamScheduler`. The absolute learning rate value of each parameter can be different. This scheduler can be used as long as the relative scale among them do not change during training. Examples: :: LRMultiplier( opt, WarmupParamScheduler( MultiStepParamScheduler( [1, 0.1, 0.01], milestones=[60000, 80000], num_updates=90000, ), 0.001, 100 / 90000 ), max_iter=90000 ) """ # NOTES: in the most general case, every LR can use its own scheduler. # Supporting this requires interaction with the optimizer when its parameter # group is initialized. For example, classyvision implements its own optimizer # that allows different schedulers for every parameter group. # To avoid this complexity, we use this class to support the most common cases # where the relative scale among all LRs stay unchanged during training. In this # case we only need a total of one scheduler that defines the relative LR multiplier. def __init__( self, optimizer: torch.optim.Optimizer, multiplier: ParamScheduler, max_iter: int, last_iter: int = -1, ): """ Args: optimizer, last_iter: See ``torch.optim.lr_scheduler._LRScheduler``. ``last_iter`` is the same as ``last_epoch``. multiplier: a fvcore ParamScheduler that defines the multiplier on every LR of the optimizer max_iter: the total number of training iterations """ if not isinstance(multiplier, ParamScheduler): raise ValueError( "_LRMultiplier(multiplier=) must be an instance of fvcore " f"ParamScheduler. Got {multiplier} instead." ) self._multiplier = multiplier self._max_iter = max_iter super().__init__(optimizer, last_epoch=last_iter) def state_dict(self): # fvcore schedulers are stateless. Only keep pytorch scheduler states return {"base_lrs": self.base_lrs, "last_epoch": self.last_epoch} def get_lr(self) -> List[float]: multiplier = self._multiplier(self.last_epoch / self._max_iter) return [base_lr * multiplier for base_lr in self.base_lrs] The provided code snippet includes necessary dependencies for implementing the `build_lr_scheduler` function. Write a Python function `def build_lr_scheduler( cfg: CfgNode, optimizer: torch.optim.Optimizer ) -> torch.optim.lr_scheduler._LRScheduler` to solve the following problem: Build a LR scheduler from config. Here is the function: def build_lr_scheduler( cfg: CfgNode, optimizer: torch.optim.Optimizer ) -> torch.optim.lr_scheduler._LRScheduler: """ Build a LR scheduler from config. """ name = cfg.SOLVER.LR_SCHEDULER_NAME if name == "WarmupMultiStepLR": steps = [x for x in cfg.SOLVER.STEPS if x <= cfg.SOLVER.MAX_ITER] if len(steps) != len(cfg.SOLVER.STEPS): logger = logging.getLogger(__name__) logger.warning( "SOLVER.STEPS contains values larger than SOLVER.MAX_ITER. " "These values will be ignored." ) sched = MultiStepParamScheduler( values=[cfg.SOLVER.GAMMA ** k for k in range(len(steps) + 1)], milestones=steps, num_updates=cfg.SOLVER.MAX_ITER, ) elif name == "WarmupCosineLR": sched = CosineParamScheduler(1, 0) else: raise ValueError("Unknown LR scheduler: {}".format(name)) sched = WarmupParamScheduler( sched, cfg.SOLVER.WARMUP_FACTOR, min(cfg.SOLVER.WARMUP_ITERS / cfg.SOLVER.MAX_ITER, 1.0), cfg.SOLVER.WARMUP_METHOD, ) return LRMultiplier(optimizer, multiplier=sched, max_iter=cfg.SOLVER.MAX_ITER)
Build a LR scheduler from config.
3,635
import datetime import logging import time from collections import OrderedDict, abc from contextlib import ExitStack, contextmanager from typing import List, Union import torch from torch import nn from detectron2.utils.comm import get_world_size, is_main_process from detectron2.utils.logger import log_every_n_seconds class DatasetEvaluator: """ Base class for a dataset evaluator. The function :func:`inference_on_dataset` runs the model over all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs. This class will accumulate information of the inputs/outputs (by :meth:`process`), and produce evaluation results in the end (by :meth:`evaluate`). """ def reset(self): """ Preparation for a new round of evaluation. Should be called before starting a round of evaluation. """ pass def process(self, inputs, outputs): """ Process the pair of inputs and outputs. If they contain batches, the pairs can be consumed one-by-one using `zip`: .. code-block:: python for input_, output in zip(inputs, outputs): # do evaluation on single input/output pair ... Args: inputs (list): the inputs that's used to call the model. outputs (list): the return value of `model(inputs)` """ pass def evaluate(self): """ Evaluate/summarize the performance, after processing all input/output pairs. Returns: dict: A new evaluator class can return a dict of arbitrary format as long as the user can process the results. In our train_net.py, we expect the following format: * key: the name of the task (e.g., bbox) * value: a dict of {metric name: score}, e.g.: {"AP50": 80} """ pass class DatasetEvaluators(DatasetEvaluator): """ Wrapper class to combine multiple :class:`DatasetEvaluator` instances. This class dispatches every evaluation call to all of its :class:`DatasetEvaluator`. """ def __init__(self, evaluators): """ Args: evaluators (list): the evaluators to combine. """ super().__init__() self._evaluators = evaluators def reset(self): for evaluator in self._evaluators: evaluator.reset() def process(self, inputs, outputs): for evaluator in self._evaluators: evaluator.process(inputs, outputs) def evaluate(self): results = OrderedDict() for evaluator in self._evaluators: result = evaluator.evaluate() if is_main_process() and result is not None: for k, v in result.items(): assert ( k not in results ), "Different evaluators produce results with the same key {}".format(k) results[k] = v return results def inference_context(model): """ A context where the model is temporarily changed to eval mode, and restored to previous mode afterwards. Args: model: a torch Module """ training_mode = model.training model.eval() yield model.train(training_mode) def get_world_size() -> int: if not dist.is_available(): return 1 if not dist.is_initialized(): return 1 return dist.get_world_size() def log_every_n_seconds(lvl, msg, n=1, *, name=None): """ Log no more than once per n seconds. Args: lvl (int): the logging level msg (str): n (int): name (str): name of the logger to use. Will use the caller's module by default. """ caller_module, key = _find_caller() last_logged = _LOG_TIMER.get(key, None) current_time = time.time() if last_logged is None or current_time - last_logged >= n: logging.getLogger(name or caller_module).log(lvl, msg) _LOG_TIMER[key] = current_time The provided code snippet includes necessary dependencies for implementing the `inference_on_dataset` function. Write a Python function `def inference_on_dataset( model, data_loader, evaluator: Union[DatasetEvaluator, List[DatasetEvaluator], None] )` to solve the following problem: Run model on the data_loader and evaluate the metrics with evaluator. Also benchmark the inference speed of `model.__call__` accurately. The model will be used in eval mode. Args: model (callable): a callable which takes an object from `data_loader` and returns some outputs. If it's an nn.Module, it will be temporarily set to `eval` mode. If you wish to evaluate a model in `training` mode instead, you can wrap the given model and override its behavior of `.eval()` and `.train()`. data_loader: an iterable object with a length. The elements it generates will be the inputs to the model. evaluator: the evaluator(s) to run. Use `None` if you only want to benchmark, but don't want to do any evaluation. Returns: The return value of `evaluator.evaluate()` Here is the function: def inference_on_dataset( model, data_loader, evaluator: Union[DatasetEvaluator, List[DatasetEvaluator], None] ): """ Run model on the data_loader and evaluate the metrics with evaluator. Also benchmark the inference speed of `model.__call__` accurately. The model will be used in eval mode. Args: model (callable): a callable which takes an object from `data_loader` and returns some outputs. If it's an nn.Module, it will be temporarily set to `eval` mode. If you wish to evaluate a model in `training` mode instead, you can wrap the given model and override its behavior of `.eval()` and `.train()`. data_loader: an iterable object with a length. The elements it generates will be the inputs to the model. evaluator: the evaluator(s) to run. Use `None` if you only want to benchmark, but don't want to do any evaluation. Returns: The return value of `evaluator.evaluate()` """ num_devices = get_world_size() logger = logging.getLogger(__name__) logger.info("Start inference on {} batches".format(len(data_loader))) total = len(data_loader) # inference data loader must have a fixed length if evaluator is None: # create a no-op evaluator evaluator = DatasetEvaluators([]) if isinstance(evaluator, abc.MutableSequence): evaluator = DatasetEvaluators(evaluator) evaluator.reset() num_warmup = min(5, total - 1) start_time = time.perf_counter() total_data_time = 0 total_compute_time = 0 total_eval_time = 0 with ExitStack() as stack: if isinstance(model, nn.Module): stack.enter_context(inference_context(model)) stack.enter_context(torch.no_grad()) start_data_time = time.perf_counter() for idx, inputs in enumerate(data_loader): total_data_time += time.perf_counter() - start_data_time if idx == num_warmup: start_time = time.perf_counter() total_data_time = 0 total_compute_time = 0 total_eval_time = 0 start_compute_time = time.perf_counter() outputs = model(inputs) if torch.cuda.is_available(): torch.cuda.synchronize() total_compute_time += time.perf_counter() - start_compute_time start_eval_time = time.perf_counter() evaluator.process(inputs, outputs) total_eval_time += time.perf_counter() - start_eval_time iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup) data_seconds_per_iter = total_data_time / iters_after_start compute_seconds_per_iter = total_compute_time / iters_after_start eval_seconds_per_iter = total_eval_time / iters_after_start total_seconds_per_iter = (time.perf_counter() - start_time) / iters_after_start if idx >= num_warmup * 2 or compute_seconds_per_iter > 5: eta = datetime.timedelta(seconds=int(total_seconds_per_iter * (total - idx - 1))) log_every_n_seconds( logging.INFO, ( f"Inference done {idx + 1}/{total}. " f"Dataloading: {data_seconds_per_iter:.4f} s/iter. " f"Inference: {compute_seconds_per_iter:.4f} s/iter. " f"Eval: {eval_seconds_per_iter:.4f} s/iter. " f"Total: {total_seconds_per_iter:.4f} s/iter. " f"ETA={eta}" ), n=5, ) start_data_time = time.perf_counter() # Measure the time only for this worker (before the synchronization barrier) total_time = time.perf_counter() - start_time total_time_str = str(datetime.timedelta(seconds=total_time)) # NOTE this format is parsed by grep logger.info( "Total inference time: {} ({:.6f} s / iter per device, on {} devices)".format( total_time_str, total_time / (total - num_warmup), num_devices ) ) total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time))) logger.info( "Total inference pure compute time: {} ({:.6f} s / iter per device, on {} devices)".format( total_compute_time_str, total_compute_time / (total - num_warmup), num_devices ) ) results = evaluator.evaluate() # An evaluator may return None when not in main process. # Replace it by an empty dict instead to make it easier for downstream code to handle if results is None: results = {} return results
Run model on the data_loader and evaluate the metrics with evaluator. Also benchmark the inference speed of `model.__call__` accurately. The model will be used in eval mode. Args: model (callable): a callable which takes an object from `data_loader` and returns some outputs. If it's an nn.Module, it will be temporarily set to `eval` mode. If you wish to evaluate a model in `training` mode instead, you can wrap the given model and override its behavior of `.eval()` and `.train()`. data_loader: an iterable object with a length. The elements it generates will be the inputs to the model. evaluator: the evaluator(s) to run. Use `None` if you only want to benchmark, but don't want to do any evaluation. Returns: The return value of `evaluator.evaluate()`
3,636
import contextlib import io import itertools import json import logging import numpy as np import os import tempfile from collections import OrderedDict from typing import Optional from PIL import Image from tabulate import tabulate from detectron2.data import MetadataCatalog from detectron2.utils import comm from detectron2.utils.file_io import PathManager from .evaluator import DatasetEvaluator logger = logging.getLogger(__name__) def _print_panoptic_results(pq_res): headers = ["", "PQ", "SQ", "RQ", "#categories"] data = [] for name in ["All", "Things", "Stuff"]: row = [name] + [pq_res[name][k] * 100 for k in ["pq", "sq", "rq"]] + [pq_res[name]["n"]] data.append(row) table = tabulate( data, headers=headers, tablefmt="pipe", floatfmt=".3f", stralign="center", numalign="center" ) logger.info("Panoptic Evaluation Results:\n" + table)
null
3,637
import copy import itertools import json import logging import os import pickle from collections import OrderedDict import torch import detectron2.utils.comm as comm from detectron2.config import CfgNode from detectron2.data import MetadataCatalog from detectron2.structures import Boxes, BoxMode, pairwise_iou from detectron2.utils.file_io import PathManager from detectron2.utils.logger import create_small_table from .coco_evaluation import instances_to_coco_json from .evaluator import DatasetEvaluator The provided code snippet includes necessary dependencies for implementing the `_evaluate_box_proposals` function. Write a Python function `def _evaluate_box_proposals(dataset_predictions, lvis_api, thresholds=None, area="all", limit=None)` to solve the following problem: Evaluate detection proposal recall metrics. This function is a much faster alternative to the official LVIS API recall evaluation code. However, it produces slightly different results. Here is the function: def _evaluate_box_proposals(dataset_predictions, lvis_api, thresholds=None, area="all", limit=None): """ Evaluate detection proposal recall metrics. This function is a much faster alternative to the official LVIS API recall evaluation code. However, it produces slightly different results. """ # Record max overlap value for each gt box # Return vector of overlap values areas = { "all": 0, "small": 1, "medium": 2, "large": 3, "96-128": 4, "128-256": 5, "256-512": 6, "512-inf": 7, } area_ranges = [ [0 ** 2, 1e5 ** 2], # all [0 ** 2, 32 ** 2], # small [32 ** 2, 96 ** 2], # medium [96 ** 2, 1e5 ** 2], # large [96 ** 2, 128 ** 2], # 96-128 [128 ** 2, 256 ** 2], # 128-256 [256 ** 2, 512 ** 2], # 256-512 [512 ** 2, 1e5 ** 2], ] # 512-inf assert area in areas, "Unknown area range: {}".format(area) area_range = area_ranges[areas[area]] gt_overlaps = [] num_pos = 0 for prediction_dict in dataset_predictions: predictions = prediction_dict["proposals"] # sort predictions in descending order # TODO maybe remove this and make it explicit in the documentation inds = predictions.objectness_logits.sort(descending=True)[1] predictions = predictions[inds] ann_ids = lvis_api.get_ann_ids(img_ids=[prediction_dict["image_id"]]) anno = lvis_api.load_anns(ann_ids) gt_boxes = [ BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno ] gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes gt_boxes = Boxes(gt_boxes) gt_areas = torch.as_tensor([obj["area"] for obj in anno]) if len(gt_boxes) == 0 or len(predictions) == 0: continue valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) gt_boxes = gt_boxes[valid_gt_inds] num_pos += len(gt_boxes) if len(gt_boxes) == 0: continue if limit is not None and len(predictions) > limit: predictions = predictions[:limit] overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes) _gt_overlaps = torch.zeros(len(gt_boxes)) for j in range(min(len(predictions), len(gt_boxes))): # find which proposal box maximally covers each gt box # and get the iou amount of coverage for each gt box max_overlaps, argmax_overlaps = overlaps.max(dim=0) # find which gt box is 'best' covered (i.e. 'best' = most iou) gt_ovr, gt_ind = max_overlaps.max(dim=0) assert gt_ovr >= 0 # find the proposal box that covers the best covered gt box box_ind = argmax_overlaps[gt_ind] # record the iou coverage of this gt box _gt_overlaps[j] = overlaps[box_ind, gt_ind] assert _gt_overlaps[j] == gt_ovr # mark the proposal box and the gt box as used overlaps[box_ind, :] = -1 overlaps[:, gt_ind] = -1 # append recorded iou coverage level gt_overlaps.append(_gt_overlaps) gt_overlaps = ( torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32) ) gt_overlaps, _ = torch.sort(gt_overlaps) if thresholds is None: step = 0.05 thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32) recalls = torch.zeros_like(thresholds) # compute recall for each iou threshold for i, t in enumerate(thresholds): recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos) # ar = 2 * np.trapz(recalls, thresholds) ar = recalls.mean() return { "ar": ar, "recalls": recalls, "thresholds": thresholds, "gt_overlaps": gt_overlaps, "num_pos": num_pos, }
Evaluate detection proposal recall metrics. This function is a much faster alternative to the official LVIS API recall evaluation code. However, it produces slightly different results.
3,638
import copy import itertools import json import logging import os import pickle from collections import OrderedDict import torch import detectron2.utils.comm as comm from detectron2.config import CfgNode from detectron2.data import MetadataCatalog from detectron2.structures import Boxes, BoxMode, pairwise_iou from detectron2.utils.file_io import PathManager from detectron2.utils.logger import create_small_table from .coco_evaluation import instances_to_coco_json from .evaluator import DatasetEvaluator def create_small_table(small_dict): """ Create a small table using the keys of small_dict as headers. This is only suitable for small dictionaries. Args: small_dict (dict): a result dictionary of only a few items. Returns: str: the table as a string. """ keys, values = tuple(zip(*small_dict.items())) table = tabulate( [values], headers=keys, tablefmt="pipe", floatfmt=".3f", stralign="center", numalign="center", ) return table The provided code snippet includes necessary dependencies for implementing the `_evaluate_predictions_on_lvis` function. Write a Python function `def _evaluate_predictions_on_lvis( lvis_gt, lvis_results, iou_type, max_dets_per_image=None, class_names=None )` to solve the following problem: Args: iou_type (str): max_dets_per_image (None or int): limit on maximum detections per image in evaluating AP This limit, by default of the LVIS dataset, is 300. class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score} Here is the function: def _evaluate_predictions_on_lvis( lvis_gt, lvis_results, iou_type, max_dets_per_image=None, class_names=None ): """ Args: iou_type (str): max_dets_per_image (None or int): limit on maximum detections per image in evaluating AP This limit, by default of the LVIS dataset, is 300. class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score} """ metrics = { "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"], "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"], }[iou_type] logger = logging.getLogger(__name__) if len(lvis_results) == 0: # TODO: check if needed logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} if iou_type == "segm": lvis_results = copy.deepcopy(lvis_results) # When evaluating mask AP, if the results contain bbox, LVIS API will # use the box area as the area of the instance, instead of the mask area. # This leads to a different definition of small/medium/large. # We remove the bbox field to let mask AP use mask area. for c in lvis_results: c.pop("bbox", None) if max_dets_per_image is None: max_dets_per_image = 300 # Default for LVIS dataset from lvis import LVISEval, LVISResults logger.info(f"Evaluating with max detections per image = {max_dets_per_image}") lvis_results = LVISResults(lvis_gt, lvis_results, max_dets=max_dets_per_image) lvis_eval = LVISEval(lvis_gt, lvis_results, iou_type) lvis_eval.run() lvis_eval.print_results() # Pull the standard metrics from the LVIS results results = lvis_eval.get_results() results = {metric: float(results[metric] * 100) for metric in metrics} logger.info("Evaluation results for {}: \n".format(iou_type) + create_small_table(results)) return results
Args: iou_type (str): max_dets_per_image (None or int): limit on maximum detections per image in evaluating AP This limit, by default of the LVIS dataset, is 300. class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score}
3,639
import contextlib import copy import io import itertools import json import logging import numpy as np import os import pickle from collections import OrderedDict import pycocotools.mask as mask_util import torch from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from tabulate import tabulate import detectron2.utils.comm as comm from detectron2.config import CfgNode from detectron2.data import MetadataCatalog from detectron2.data.datasets.coco import convert_to_coco_json from detectron2.evaluation.fast_eval_api import COCOeval_opt from detectron2.structures import Boxes, BoxMode, pairwise_iou from detectron2.utils.file_io import PathManager from detectron2.utils.logger import create_small_table from .evaluator import DatasetEvaluator The provided code snippet includes necessary dependencies for implementing the `instances_to_coco_json` function. Write a Python function `def instances_to_coco_json(instances, img_id)` to solve the following problem: Dump an "Instances" object to a COCO-format json that's used for evaluation. Args: instances (Instances): img_id (int): the image id Returns: list[dict]: list of json annotations in COCO format. Here is the function: def instances_to_coco_json(instances, img_id): """ Dump an "Instances" object to a COCO-format json that's used for evaluation. Args: instances (Instances): img_id (int): the image id Returns: list[dict]: list of json annotations in COCO format. """ num_instance = len(instances) if num_instance == 0: return [] boxes = instances.pred_boxes.tensor.numpy() boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) boxes = boxes.tolist() scores = instances.scores.tolist() classes = instances.pred_classes.tolist() has_mask = instances.has("pred_masks") if has_mask: # use RLE to encode the masks, because they are too large and takes memory # since this evaluator stores outputs of the entire dataset rles = [ mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0] for mask in instances.pred_masks ] for rle in rles: # "counts" is an array encoded by mask_util as a byte-stream. Python3's # json writer which always produces strings cannot serialize a bytestream # unless you decode it. Thankfully, utf-8 works out (which is also what # the pycocotools/_mask.pyx does). rle["counts"] = rle["counts"].decode("utf-8") has_keypoints = instances.has("pred_keypoints") if has_keypoints: keypoints = instances.pred_keypoints results = [] for k in range(num_instance): result = { "image_id": img_id, "category_id": classes[k], "bbox": boxes[k], "score": scores[k], } if has_mask: result["segmentation"] = rles[k] if has_keypoints: # In COCO annotations, # keypoints coordinates are pixel indices. # However our predictions are floating point coordinates. # Therefore we subtract 0.5 to be consistent with the annotation format. # This is the inverse of data loading logic in `datasets/coco.py`. keypoints[k][:, :2] -= 0.5 result["keypoints"] = keypoints[k].flatten().tolist() results.append(result) return results
Dump an "Instances" object to a COCO-format json that's used for evaluation. Args: instances (Instances): img_id (int): the image id Returns: list[dict]: list of json annotations in COCO format.
3,640
import contextlib import copy import io import itertools import json import logging import numpy as np import os import pickle from collections import OrderedDict import pycocotools.mask as mask_util import torch from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from tabulate import tabulate import detectron2.utils.comm as comm from detectron2.config import CfgNode from detectron2.data import MetadataCatalog from detectron2.data.datasets.coco import convert_to_coco_json from detectron2.evaluation.fast_eval_api import COCOeval_opt from detectron2.structures import Boxes, BoxMode, pairwise_iou from detectron2.utils.file_io import PathManager from detectron2.utils.logger import create_small_table from .evaluator import DatasetEvaluator The provided code snippet includes necessary dependencies for implementing the `_evaluate_box_proposals` function. Write a Python function `def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None)` to solve the following problem: Evaluate detection proposal recall metrics. This function is a much faster alternative to the official COCO API recall evaluation code. However, it produces slightly different results. Here is the function: def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None): """ Evaluate detection proposal recall metrics. This function is a much faster alternative to the official COCO API recall evaluation code. However, it produces slightly different results. """ # Record max overlap value for each gt box # Return vector of overlap values areas = { "all": 0, "small": 1, "medium": 2, "large": 3, "96-128": 4, "128-256": 5, "256-512": 6, "512-inf": 7, } area_ranges = [ [0 ** 2, 1e5 ** 2], # all [0 ** 2, 32 ** 2], # small [32 ** 2, 96 ** 2], # medium [96 ** 2, 1e5 ** 2], # large [96 ** 2, 128 ** 2], # 96-128 [128 ** 2, 256 ** 2], # 128-256 [256 ** 2, 512 ** 2], # 256-512 [512 ** 2, 1e5 ** 2], ] # 512-inf assert area in areas, "Unknown area range: {}".format(area) area_range = area_ranges[areas[area]] gt_overlaps = [] num_pos = 0 for prediction_dict in dataset_predictions: predictions = prediction_dict["proposals"] # sort predictions in descending order # TODO maybe remove this and make it explicit in the documentation inds = predictions.objectness_logits.sort(descending=True)[1] predictions = predictions[inds] ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"]) anno = coco_api.loadAnns(ann_ids) gt_boxes = [ BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno if obj["iscrowd"] == 0 ] gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes gt_boxes = Boxes(gt_boxes) gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0]) if len(gt_boxes) == 0 or len(predictions) == 0: continue valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) gt_boxes = gt_boxes[valid_gt_inds] num_pos += len(gt_boxes) if len(gt_boxes) == 0: continue if limit is not None and len(predictions) > limit: predictions = predictions[:limit] overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes) _gt_overlaps = torch.zeros(len(gt_boxes)) for j in range(min(len(predictions), len(gt_boxes))): # find which proposal box maximally covers each gt box # and get the iou amount of coverage for each gt box max_overlaps, argmax_overlaps = overlaps.max(dim=0) # find which gt box is 'best' covered (i.e. 'best' = most iou) gt_ovr, gt_ind = max_overlaps.max(dim=0) assert gt_ovr >= 0 # find the proposal box that covers the best covered gt box box_ind = argmax_overlaps[gt_ind] # record the iou coverage of this gt box _gt_overlaps[j] = overlaps[box_ind, gt_ind] assert _gt_overlaps[j] == gt_ovr # mark the proposal box and the gt box as used overlaps[box_ind, :] = -1 overlaps[:, gt_ind] = -1 # append recorded iou coverage level gt_overlaps.append(_gt_overlaps) gt_overlaps = ( torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32) ) gt_overlaps, _ = torch.sort(gt_overlaps) if thresholds is None: step = 0.05 thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32) recalls = torch.zeros_like(thresholds) # compute recall for each iou threshold for i, t in enumerate(thresholds): recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos) # ar = 2 * np.trapz(recalls, thresholds) ar = recalls.mean() return { "ar": ar, "recalls": recalls, "thresholds": thresholds, "gt_overlaps": gt_overlaps, "num_pos": num_pos, }
Evaluate detection proposal recall metrics. This function is a much faster alternative to the official COCO API recall evaluation code. However, it produces slightly different results.
3,641
import contextlib import copy import io import itertools import json import logging import numpy as np import os import pickle from collections import OrderedDict import pycocotools.mask as mask_util import torch from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from tabulate import tabulate import detectron2.utils.comm as comm from detectron2.config import CfgNode from detectron2.data import MetadataCatalog from detectron2.data.datasets.coco import convert_to_coco_json from detectron2.evaluation.fast_eval_api import COCOeval_opt from detectron2.structures import Boxes, BoxMode, pairwise_iou from detectron2.utils.file_io import PathManager from detectron2.utils.logger import create_small_table from .evaluator import DatasetEvaluator class COCOevalMaxDets(COCOeval): """ Modified version of COCOeval for evaluating AP with a custom maxDets (by default for COCO, maxDets is 100) """ def summarize(self): """ Compute and display summary metrics for evaluation results given a custom value for max_dets_per_image """ def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100): p = self.params iStr = " {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}" titleStr = "Average Precision" if ap == 1 else "Average Recall" typeStr = "(AP)" if ap == 1 else "(AR)" iouStr = ( "{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1]) if iouThr is None else "{:0.2f}".format(iouThr) ) aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng] mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets] if ap == 1: # dimension of precision: [TxRxKxAxM] s = self.eval["precision"] # IoU if iouThr is not None: t = np.where(iouThr == p.iouThrs)[0] s = s[t] s = s[:, :, :, aind, mind] else: # dimension of recall: [TxKxAxM] s = self.eval["recall"] if iouThr is not None: t = np.where(iouThr == p.iouThrs)[0] s = s[t] s = s[:, :, aind, mind] if len(s[s > -1]) == 0: mean_s = -1 else: mean_s = np.mean(s[s > -1]) print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s)) return mean_s def _summarizeDets(): stats = np.zeros((12,)) # Evaluate AP using the custom limit on maximum detections per image stats[0] = _summarize(1, maxDets=self.params.maxDets[2]) stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2]) stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2]) stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2]) stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2]) stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2]) stats[6] = _summarize(0, maxDets=self.params.maxDets[0]) stats[7] = _summarize(0, maxDets=self.params.maxDets[1]) stats[8] = _summarize(0, maxDets=self.params.maxDets[2]) stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2]) stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2]) stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2]) return stats def _summarizeKps(): stats = np.zeros((10,)) stats[0] = _summarize(1, maxDets=20) stats[1] = _summarize(1, maxDets=20, iouThr=0.5) stats[2] = _summarize(1, maxDets=20, iouThr=0.75) stats[3] = _summarize(1, maxDets=20, areaRng="medium") stats[4] = _summarize(1, maxDets=20, areaRng="large") stats[5] = _summarize(0, maxDets=20) stats[6] = _summarize(0, maxDets=20, iouThr=0.5) stats[7] = _summarize(0, maxDets=20, iouThr=0.75) stats[8] = _summarize(0, maxDets=20, areaRng="medium") stats[9] = _summarize(0, maxDets=20, areaRng="large") return stats if not self.eval: raise Exception("Please run accumulate() first") iouType = self.params.iouType if iouType == "segm" or iouType == "bbox": summarize = _summarizeDets elif iouType == "keypoints": summarize = _summarizeKps self.stats = summarize() def __str__(self): self.summarize() class COCOeval_opt(COCOeval): """ This is a slightly modified version of the original COCO API, where the functions evaluateImg() and accumulate() are implemented in C++ to speedup evaluation """ def evaluate(self): """ Run per image evaluation on given images and store results in self.evalImgs_cpp, a datastructure that isn't readable from Python but is used by a c++ implementation of accumulate(). Unlike the original COCO PythonAPI, we don't populate the datastructure self.evalImgs because this datastructure is a computational bottleneck. :return: None """ tic = time.time() p = self.params # add backward compatibility if useSegm is specified in params if p.useSegm is not None: p.iouType = "segm" if p.useSegm == 1 else "bbox" logger.info("Evaluate annotation type *{}*".format(p.iouType)) p.imgIds = list(np.unique(p.imgIds)) if p.useCats: p.catIds = list(np.unique(p.catIds)) p.maxDets = sorted(p.maxDets) self.params = p self._prepare() # bottleneck # loop through images, area range, max detection number catIds = p.catIds if p.useCats else [-1] if p.iouType == "segm" or p.iouType == "bbox": computeIoU = self.computeIoU elif p.iouType == "keypoints": computeIoU = self.computeOks self.ious = { (imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds } # bottleneck maxDet = p.maxDets[-1] # <<<< Beginning of code differences with original COCO API def convert_instances_to_cpp(instances, is_det=False): # Convert annotations for a list of instances in an image to a format that's fast # to access in C++ instances_cpp = [] for instance in instances: instance_cpp = _C.InstanceAnnotation( int(instance["id"]), instance["score"] if is_det else instance.get("score", 0.0), instance["area"], bool(instance.get("iscrowd", 0)), bool(instance.get("ignore", 0)), ) instances_cpp.append(instance_cpp) return instances_cpp # Convert GT annotations, detections, and IOUs to a format that's fast to access in C++ ground_truth_instances = [ [convert_instances_to_cpp(self._gts[imgId, catId]) for catId in p.catIds] for imgId in p.imgIds ] detected_instances = [ [convert_instances_to_cpp(self._dts[imgId, catId], is_det=True) for catId in p.catIds] for imgId in p.imgIds ] ious = [[self.ious[imgId, catId] for catId in catIds] for imgId in p.imgIds] if not p.useCats: # For each image, flatten per-category lists into a single list ground_truth_instances = [[[o for c in i for o in c]] for i in ground_truth_instances] detected_instances = [[[o for c in i for o in c]] for i in detected_instances] # Call C++ implementation of self.evaluateImgs() self._evalImgs_cpp = _C.COCOevalEvaluateImages( p.areaRng, maxDet, p.iouThrs, ious, ground_truth_instances, detected_instances ) self._evalImgs = None self._paramsEval = copy.deepcopy(self.params) toc = time.time() logger.info("COCOeval_opt.evaluate() finished in {:0.2f} seconds.".format(toc - tic)) # >>>> End of code differences with original COCO API def accumulate(self): """ Accumulate per image evaluation results and store the result in self.eval. Does not support changing parameter settings from those used by self.evaluate() """ logger.info("Accumulating evaluation results...") tic = time.time() assert hasattr( self, "_evalImgs_cpp" ), "evaluate() must be called before accmulate() is called." self.eval = _C.COCOevalAccumulate(self._paramsEval, self._evalImgs_cpp) # recall is num_iou_thresholds X num_categories X num_area_ranges X num_max_detections self.eval["recall"] = np.array(self.eval["recall"]).reshape( self.eval["counts"][:1] + self.eval["counts"][2:] ) # precision and scores are num_iou_thresholds X num_recall_thresholds X num_categories X # num_area_ranges X num_max_detections self.eval["precision"] = np.array(self.eval["precision"]).reshape(self.eval["counts"]) self.eval["scores"] = np.array(self.eval["scores"]).reshape(self.eval["counts"]) toc = time.time() logger.info("COCOeval_opt.accumulate() finished in {:0.2f} seconds.".format(toc - tic)) The provided code snippet includes necessary dependencies for implementing the `_evaluate_predictions_on_coco` function. Write a Python function `def _evaluate_predictions_on_coco( coco_gt, coco_results, iou_type, kpt_oks_sigmas=None, use_fast_impl=True, img_ids=None, max_dets_per_image=None, )` to solve the following problem: Evaluate the coco results using COCOEval API. Here is the function: def _evaluate_predictions_on_coco( coco_gt, coco_results, iou_type, kpt_oks_sigmas=None, use_fast_impl=True, img_ids=None, max_dets_per_image=None, ): """ Evaluate the coco results using COCOEval API. """ assert len(coco_results) > 0 if iou_type == "segm": coco_results = copy.deepcopy(coco_results) # When evaluating mask AP, if the results contain bbox, cocoapi will # use the box area as the area of the instance, instead of the mask area. # This leads to a different definition of small/medium/large. # We remove the bbox field to let mask AP use mask area. for c in coco_results: c.pop("bbox", None) coco_dt = coco_gt.loadRes(coco_results) coco_eval = (COCOeval_opt if use_fast_impl else COCOeval)(coco_gt, coco_dt, iou_type) # For COCO, the default max_dets_per_image is [1, 10, 100]. if max_dets_per_image is None: max_dets_per_image = [1, 10, 100] # Default from COCOEval else: assert ( len(max_dets_per_image) >= 3 ), "COCOeval requires maxDets (and max_dets_per_image) to have length at least 3" # In the case that user supplies a custom input for max_dets_per_image, # apply COCOevalMaxDets to evaluate AP with the custom input. if max_dets_per_image[2] != 100: coco_eval = COCOevalMaxDets(coco_gt, coco_dt, iou_type) if iou_type != "keypoints": coco_eval.params.maxDets = max_dets_per_image if img_ids is not None: coco_eval.params.imgIds = img_ids if iou_type == "keypoints": # Use the COCO default keypoint OKS sigmas unless overrides are specified if kpt_oks_sigmas: assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "pycocotools is too old!" coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas) # COCOAPI requires every detection and every gt to have keypoints, so # we just take the first entry from both num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3 num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3 num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas) assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, ( f"[COCOEvaluator] Prediction contain {num_keypoints_dt} keypoints. " f"Ground truth contains {num_keypoints_gt} keypoints. " f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. " "They have to agree with each other. For meaning of OKS, please refer to " "http://cocodataset.org/#keypoints-eval." ) coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize() return coco_eval
Evaluate the coco results using COCOEval API.
3,642
import logging import numpy as np import os import tempfile import xml.etree.ElementTree as ET from collections import OrderedDict, defaultdict from functools import lru_cache import torch from detectron2.data import MetadataCatalog from detectron2.utils import comm from detectron2.utils.file_io import PathManager from .evaluator import DatasetEvaluator def parse_rec(filename): """Parse a PASCAL VOC xml file.""" with PathManager.open(filename) as f: tree = ET.parse(f) objects = [] for obj in tree.findall("object"): obj_struct = {} obj_struct["name"] = obj.find("name").text obj_struct["pose"] = obj.find("pose").text obj_struct["truncated"] = int(obj.find("truncated").text) obj_struct["difficult"] = int(obj.find("difficult").text) bbox = obj.find("bndbox") obj_struct["bbox"] = [ int(bbox.find("xmin").text), int(bbox.find("ymin").text), int(bbox.find("xmax").text), int(bbox.find("ymax").text), ] objects.append(obj_struct) return objects def voc_ap(rec, prec, use_07_metric=False): """Compute VOC AP given precision and recall. If use_07_metric is true, uses the VOC 07 11-point method (default:False). """ if use_07_metric: # 11 point metric ap = 0.0 for t in np.arange(0.0, 1.1, 0.1): if np.sum(rec >= t) == 0: p = 0 else: p = np.max(prec[rec >= t]) ap = ap + p / 11.0 else: # correct AP calculation # first append sentinel values at the end mrec = np.concatenate(([0.0], rec, [1.0])) mpre = np.concatenate(([0.0], prec, [0.0])) # compute the precision envelope for i in range(mpre.size - 1, 0, -1): mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) # to calculate area under PR curve, look for points # where X axis (recall) changes value i = np.where(mrec[1:] != mrec[:-1])[0] # and sum (\Delta recall) * prec ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) return ap PathManager = PathManagerBase() PathManager.register_handler(HTTPURLHandler()) PathManager.register_handler(OneDrivePathHandler()) PathManager.register_handler(Detectron2Handler()) The provided code snippet includes necessary dependencies for implementing the `voc_eval` function. Write a Python function `def voc_eval(detpath, annopath, imagesetfile, classname, ovthresh=0.5, use_07_metric=False)` to solve the following problem: rec, prec, ap = voc_eval(detpath, annopath, imagesetfile, classname, [ovthresh], [use_07_metric]) Top level function that does the PASCAL VOC evaluation. detpath: Path to detections detpath.format(classname) should produce the detection results file. annopath: Path to annotations annopath.format(imagename) should be the xml annotations file. imagesetfile: Text file containing the list of images, one image per line. classname: Category name (duh) [ovthresh]: Overlap threshold (default = 0.5) [use_07_metric]: Whether to use VOC07's 11 point AP computation (default False) Here is the function: def voc_eval(detpath, annopath, imagesetfile, classname, ovthresh=0.5, use_07_metric=False): """rec, prec, ap = voc_eval(detpath, annopath, imagesetfile, classname, [ovthresh], [use_07_metric]) Top level function that does the PASCAL VOC evaluation. detpath: Path to detections detpath.format(classname) should produce the detection results file. annopath: Path to annotations annopath.format(imagename) should be the xml annotations file. imagesetfile: Text file containing the list of images, one image per line. classname: Category name (duh) [ovthresh]: Overlap threshold (default = 0.5) [use_07_metric]: Whether to use VOC07's 11 point AP computation (default False) """ # assumes detections are in detpath.format(classname) # assumes annotations are in annopath.format(imagename) # assumes imagesetfile is a text file with each line an image name # first load gt # read list of images with PathManager.open(imagesetfile, "r") as f: lines = f.readlines() imagenames = [x.strip() for x in lines] # load annots recs = {} for imagename in imagenames: recs[imagename] = parse_rec(annopath.format(imagename)) # extract gt objects for this class class_recs = {} npos = 0 for imagename in imagenames: R = [obj for obj in recs[imagename] if obj["name"] == classname] bbox = np.array([x["bbox"] for x in R]) difficult = np.array([x["difficult"] for x in R]).astype(np.bool) # difficult = np.array([False for x in R]).astype(np.bool) # treat all "difficult" as GT det = [False] * len(R) npos = npos + sum(~difficult) class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det} # read dets detfile = detpath.format(classname) with open(detfile, "r") as f: lines = f.readlines() splitlines = [x.strip().split(" ") for x in lines] image_ids = [x[0] for x in splitlines] confidence = np.array([float(x[1]) for x in splitlines]) BB = np.array([[float(z) for z in x[2:]] for x in splitlines]).reshape(-1, 4) # sort by confidence sorted_ind = np.argsort(-confidence) BB = BB[sorted_ind, :] image_ids = [image_ids[x] for x in sorted_ind] # go down dets and mark TPs and FPs nd = len(image_ids) tp = np.zeros(nd) fp = np.zeros(nd) for d in range(nd): R = class_recs[image_ids[d]] bb = BB[d, :].astype(float) ovmax = -np.inf BBGT = R["bbox"].astype(float) if BBGT.size > 0: # compute overlaps # intersection ixmin = np.maximum(BBGT[:, 0], bb[0]) iymin = np.maximum(BBGT[:, 1], bb[1]) ixmax = np.minimum(BBGT[:, 2], bb[2]) iymax = np.minimum(BBGT[:, 3], bb[3]) iw = np.maximum(ixmax - ixmin + 1.0, 0.0) ih = np.maximum(iymax - iymin + 1.0, 0.0) inters = iw * ih # union uni = ( (bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0) + (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0) - inters ) overlaps = inters / uni ovmax = np.max(overlaps) jmax = np.argmax(overlaps) if ovmax > ovthresh: if not R["difficult"][jmax]: if not R["det"][jmax]: tp[d] = 1.0 R["det"][jmax] = 1 else: fp[d] = 1.0 else: fp[d] = 1.0 # compute precision recall fp = np.cumsum(fp) tp = np.cumsum(tp) rec = tp / float(npos) # avoid divide by zero in case the first detection matches a difficult # ground truth prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) ap = voc_ap(rec, prec, use_07_metric) return rec, prec, ap
rec, prec, ap = voc_eval(detpath, annopath, imagesetfile, classname, [ovthresh], [use_07_metric]) Top level function that does the PASCAL VOC evaluation. detpath: Path to detections detpath.format(classname) should produce the detection results file. annopath: Path to annotations annopath.format(imagename) should be the xml annotations file. imagesetfile: Text file containing the list of images, one image per line. classname: Category name (duh) [ovthresh]: Overlap threshold (default = 0.5) [use_07_metric]: Whether to use VOC07's 11 point AP computation (default False)
3,643
import ast import builtins import importlib import inspect import logging import os import uuid from collections import abc from contextlib import contextmanager from copy import deepcopy from dataclasses import is_dataclass from typing import List, Tuple, Union import cloudpickle import yaml from omegaconf import DictConfig, ListConfig, OmegaConf from detectron2.utils.file_io import PathManager from detectron2.utils.registry import _convert_target_to_string The provided code snippet includes necessary dependencies for implementing the `_visit_dict_config` function. Write a Python function `def _visit_dict_config(cfg, func)` to solve the following problem: Apply func recursively to all DictConfig in cfg. Here is the function: def _visit_dict_config(cfg, func): """ Apply func recursively to all DictConfig in cfg. """ if isinstance(cfg, DictConfig): func(cfg) for v in cfg.values(): _visit_dict_config(v, func) elif isinstance(cfg, ListConfig): for v in cfg: _visit_dict_config(v, func)
Apply func recursively to all DictConfig in cfg.
3,644
import ast import builtins import importlib import inspect import logging import os import uuid from collections import abc from contextlib import contextmanager from copy import deepcopy from dataclasses import is_dataclass from typing import List, Tuple, Union import cloudpickle import yaml from omegaconf import DictConfig, ListConfig, OmegaConf from detectron2.utils.file_io import PathManager from detectron2.utils.registry import _convert_target_to_string def _validate_py_syntax(filename): # see also https://github.com/open-mmlab/mmcv/blob/master/mmcv/utils/config.py with PathManager.open(filename, "r") as f: content = f.read() try: ast.parse(content) except SyntaxError as e: raise SyntaxError(f"Config file {filename} has syntax error!") from e def _cast_to_config(obj): # if given a dict, return DictConfig instead if isinstance(obj, dict): return DictConfig(obj, flags={"allow_objects": True}) return obj _CFG_PACKAGE_NAME = "detectron2._cfg_loader" def _random_package_name(filename): # generate a random package name when loading config files return _CFG_PACKAGE_NAME + str(uuid.uuid4())[:4] + "." + os.path.basename(filename) PathManager = PathManagerBase() PathManager.register_handler(HTTPURLHandler()) PathManager.register_handler(OneDrivePathHandler()) PathManager.register_handler(Detectron2Handler()) The provided code snippet includes necessary dependencies for implementing the `_patch_import` function. Write a Python function `def _patch_import()` to solve the following problem: Enhance relative import statements in config files, so that they: 1. locate files purely based on relative location, regardless of packages. e.g. you can import file without having __init__ 2. do not cache modules globally; modifications of module states has no side effect 3. support other storage system through PathManager 4. imported dict are turned into omegaconf.DictConfig automatically Here is the function: def _patch_import(): """ Enhance relative import statements in config files, so that they: 1. locate files purely based on relative location, regardless of packages. e.g. you can import file without having __init__ 2. do not cache modules globally; modifications of module states has no side effect 3. support other storage system through PathManager 4. imported dict are turned into omegaconf.DictConfig automatically """ old_import = builtins.__import__ def find_relative_file(original_file, relative_import_path, level): cur_file = os.path.dirname(original_file) for _ in range(level - 1): cur_file = os.path.dirname(cur_file) cur_name = relative_import_path.lstrip(".") for part in cur_name.split("."): cur_file = os.path.join(cur_file, part) # NOTE: directory import is not handled. Because then it's unclear # if such import should produce python module or DictConfig. This can # be discussed further if needed. if not cur_file.endswith(".py"): cur_file += ".py" if not PathManager.isfile(cur_file): raise ImportError( f"Cannot import name {relative_import_path} from " f"{original_file}: {cur_file} has to exist." ) return cur_file def new_import(name, globals=None, locals=None, fromlist=(), level=0): if ( # Only deal with relative imports inside config files level != 0 and globals is not None and (globals.get("__package__", "") or "").startswith(_CFG_PACKAGE_NAME) ): cur_file = find_relative_file(globals["__file__"], name, level) _validate_py_syntax(cur_file) spec = importlib.machinery.ModuleSpec( _random_package_name(cur_file), None, origin=cur_file ) module = importlib.util.module_from_spec(spec) module.__file__ = cur_file with PathManager.open(cur_file) as f: content = f.read() exec(compile(content, cur_file, "exec"), module.__dict__) for name in fromlist: # turn imported dict into DictConfig automatically val = _cast_to_config(module.__dict__[name]) module.__dict__[name] = val return module return old_import(name, globals, locals, fromlist=fromlist, level=level) builtins.__import__ = new_import yield new_import builtins.__import__ = old_import
Enhance relative import statements in config files, so that they: 1. locate files purely based on relative location, regardless of packages. e.g. you can import file without having __init__ 2. do not cache modules globally; modifications of module states has no side effect 3. support other storage system through PathManager 4. imported dict are turned into omegaconf.DictConfig automatically
3,645
import dataclasses import logging from collections import abc from typing import Any from detectron2.utils.registry import _convert_target_to_string, locate def _convert_target_to_string(t: Any) -> str: """ Inverse of ``locate()``. Args: t: any object with ``__module__`` and ``__qualname__`` """ module, qualname = t.__module__, t.__qualname__ # Compress the path to this object, e.g. ``module.submodule._impl.class`` # may become ``module.submodule.class``, if the later also resolves to the same # object. This simplifies the string, and also is less affected by moving the # class implementation. module_parts = module.split(".") for k in range(1, len(module_parts)): prefix = ".".join(module_parts[:k]) candidate = f"{prefix}.{qualname}" try: if locate(candidate) is t: return candidate except ImportError: pass return f"{module}.{qualname}" The provided code snippet includes necessary dependencies for implementing the `dump_dataclass` function. Write a Python function `def dump_dataclass(obj: Any)` to solve the following problem: Dump a dataclass recursively into a dict that can be later instantiated. Args: obj: a dataclass object Returns: dict Here is the function: def dump_dataclass(obj: Any): """ Dump a dataclass recursively into a dict that can be later instantiated. Args: obj: a dataclass object Returns: dict """ assert dataclasses.is_dataclass(obj) and not isinstance( obj, type ), "dump_dataclass() requires an instance of a dataclass." ret = {"_target_": _convert_target_to_string(type(obj))} for f in dataclasses.fields(obj): v = getattr(obj, f.name) if dataclasses.is_dataclass(v): v = dump_dataclass(v) if isinstance(v, (list, tuple)): v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v] ret[f.name] = v return ret
Dump a dataclass recursively into a dict that can be later instantiated. Args: obj: a dataclass object Returns: dict
3,646
import functools import inspect import logging from fvcore.common.config import CfgNode as _CfgNode from detectron2.utils.file_io import PathManager class CfgNode(_CfgNode): """ The same as `fvcore.common.config.CfgNode`, but different in: 1. Use unsafe yaml loading by default. Note that this may lead to arbitrary code execution: you must not load a config file from untrusted sources before manually inspecting the content of the file. 2. Support config versioning. When attempting to merge an old config, it will convert the old config automatically. .. automethod:: clone .. automethod:: freeze .. automethod:: defrost .. automethod:: is_frozen .. automethod:: load_yaml_with_base .. automethod:: merge_from_list .. automethod:: merge_from_other_cfg """ def _open_cfg(cls, filename): return PathManager.open(filename, "r") # Note that the default value of allow_unsafe is changed to True def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None: """ Load content from the given config file and merge it into self. Args: cfg_filename: config filename allow_unsafe: allow unsafe yaml syntax """ assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!" loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe) loaded_cfg = type(self)(loaded_cfg) # defaults.py needs to import CfgNode from .defaults import _C latest_ver = _C.VERSION assert ( latest_ver == self.VERSION ), "CfgNode.merge_from_file is only allowed on a config object of latest version!" logger = logging.getLogger(__name__) loaded_ver = loaded_cfg.get("VERSION", None) if loaded_ver is None: from .compat import guess_version loaded_ver = guess_version(loaded_cfg, cfg_filename) assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format( loaded_ver, self.VERSION ) if loaded_ver == self.VERSION: self.merge_from_other_cfg(loaded_cfg) else: # compat.py needs to import CfgNode from .compat import upgrade_config, downgrade_config logger.warning( "Loading an old v{} config file '{}' by automatically upgrading to v{}. " "See docs/CHANGELOG.md for instructions to update your files.".format( loaded_ver, cfg_filename, self.VERSION ) ) # To convert, first obtain a full config at an old version old_self = downgrade_config(self, to_version=loaded_ver) old_self.merge_from_other_cfg(loaded_cfg) new_config = upgrade_config(old_self) self.clear() self.update(new_config) def dump(self, *args, **kwargs): """ Returns: str: a yaml string representation of the config """ # to make it show up in docs return super().dump(*args, **kwargs) _C = CN() _C.VERSION = 2 _C.MODEL = CN() _C.MODEL.LOAD_PROPOSALS = False _C.MODEL.MASK_ON = False _C.MODEL.KEYPOINT_ON = False _C.MODEL.DEVICE = "cuda" _C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN" _C.MODEL.WEIGHTS = "" _C.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675] _C.MODEL.PIXEL_STD = [1.0, 1.0, 1.0] _C.INPUT = CN() _C.INPUT.MIN_SIZE_TRAIN = (800,) _C.INPUT.MIN_SIZE_TRAIN_SAMPLING = "choice" _C.INPUT.MAX_SIZE_TRAIN = 1333 _C.INPUT.MIN_SIZE_TEST = 800 _C.INPUT.MAX_SIZE_TEST = 1333 _C.INPUT.RANDOM_FLIP = "horizontal" _C.INPUT.CROP = CN({"ENABLED": False}) _C.INPUT.CROP.TYPE = "relative_range" _C.INPUT.CROP.SIZE = [0.9, 0.9] _C.INPUT.FORMAT = "BGR" _C.INPUT.MASK_FORMAT = "polygon" _C.DATASETS = CN() _C.DATASETS.TRAIN = () _C.DATASETS.PROPOSAL_FILES_TRAIN = () _C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN = 2000 _C.DATASETS.TEST = () _C.DATASETS.PROPOSAL_FILES_TEST = () _C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST = 1000 _C.DATALOADER = CN() _C.DATALOADER.NUM_WORKERS = 4 _C.DATALOADER.ASPECT_RATIO_GROUPING = True _C.DATALOADER.SAMPLER_TRAIN = "TrainingSampler" _C.DATALOADER.REPEAT_THRESHOLD = 0.0 _C.DATALOADER.FILTER_EMPTY_ANNOTATIONS = True _C.MODEL.BACKBONE = CN() _C.MODEL.BACKBONE.NAME = "build_resnet_backbone" _C.MODEL.BACKBONE.FREEZE_AT = 2 _C.MODEL.FPN = CN() _C.MODEL.FPN.IN_FEATURES = [] _C.MODEL.FPN.OUT_CHANNELS = 256 _C.MODEL.FPN.NORM = "" _C.MODEL.FPN.FUSE_TYPE = "sum" _C.MODEL.PROPOSAL_GENERATOR = CN() _C.MODEL.PROPOSAL_GENERATOR.NAME = "RPN" _C.MODEL.PROPOSAL_GENERATOR.MIN_SIZE = 0 _C.MODEL.ANCHOR_GENERATOR = CN() _C.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator" _C.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64, 128, 256, 512]] _C.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.5, 1.0, 2.0]] _C.MODEL.ANCHOR_GENERATOR.ANGLES = [[-90, 0, 90]] _C.MODEL.ANCHOR_GENERATOR.OFFSET = 0.0 _C.MODEL.RPN = CN() _C.MODEL.RPN.HEAD_NAME = "StandardRPNHead" _C.MODEL.RPN.IN_FEATURES = ["res4"] _C.MODEL.RPN.BOUNDARY_THRESH = -1 _C.MODEL.RPN.IOU_THRESHOLDS = [0.3, 0.7] _C.MODEL.RPN.IOU_LABELS = [0, -1, 1] _C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256 _C.MODEL.RPN.POSITIVE_FRACTION = 0.5 _C.MODEL.RPN.BBOX_REG_LOSS_TYPE = "smooth_l1" _C.MODEL.RPN.BBOX_REG_LOSS_WEIGHT = 1.0 _C.MODEL.RPN.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0) _C.MODEL.RPN.SMOOTH_L1_BETA = 0.0 _C.MODEL.RPN.LOSS_WEIGHT = 1.0 _C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 12000 _C.MODEL.RPN.PRE_NMS_TOPK_TEST = 6000 _C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 2000 _C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000 _C.MODEL.RPN.NMS_THRESH = 0.7 _C.MODEL.RPN.CONV_DIMS = [-1] _C.MODEL.ROI_HEADS = CN() _C.MODEL.ROI_HEADS.NAME = "Res5ROIHeads" _C.MODEL.ROI_HEADS.NUM_CLASSES = 80 _C.MODEL.ROI_HEADS.IN_FEATURES = ["res4"] _C.MODEL.ROI_HEADS.IOU_THRESHOLDS = [0.5] _C.MODEL.ROI_HEADS.IOU_LABELS = [0, 1] _C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 _C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25 _C.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.05 _C.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5 _C.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT = True _C.MODEL.ROI_BOX_HEAD = CN() _C.MODEL.ROI_BOX_HEAD.NAME = "" _C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE = "smooth_l1" _C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT = 1.0 _C.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10.0, 10.0, 5.0, 5.0) _C.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA = 0.0 _C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14 _C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0 _C.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2" _C.MODEL.ROI_BOX_HEAD.NUM_FC = 0 _C.MODEL.ROI_BOX_HEAD.FC_DIM = 1024 _C.MODEL.ROI_BOX_HEAD.NUM_CONV = 0 _C.MODEL.ROI_BOX_HEAD.CONV_DIM = 256 _C.MODEL.ROI_BOX_HEAD.NORM = "" _C.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG = False _C.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES = False _C.MODEL.ROI_BOX_CASCADE_HEAD = CN() _C.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS = ( (10.0, 10.0, 5.0, 5.0), (20.0, 20.0, 10.0, 10.0), (30.0, 30.0, 15.0, 15.0), ) _C.MODEL.ROI_BOX_CASCADE_HEAD.IOUS = (0.5, 0.6, 0.7) _C.MODEL.ROI_MASK_HEAD = CN() _C.MODEL.ROI_MASK_HEAD.NAME = "MaskRCNNConvUpsampleHead" _C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14 _C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0 _C.MODEL.ROI_MASK_HEAD.NUM_CONV = 0 _C.MODEL.ROI_MASK_HEAD.CONV_DIM = 256 _C.MODEL.ROI_MASK_HEAD.NORM = "" _C.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK = False _C.MODEL.ROI_MASK_HEAD.POOLER_TYPE = "ROIAlignV2" _C.MODEL.ROI_KEYPOINT_HEAD = CN() _C.MODEL.ROI_KEYPOINT_HEAD.NAME = "KRCNNConvDeconvUpsampleHead" _C.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION = 14 _C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO = 0 _C.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS = tuple(512 for _ in range(8)) _C.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 17 _C.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE = 1 _C.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS = True _C.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT = 1.0 _C.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE = "ROIAlignV2" _C.MODEL.SEM_SEG_HEAD = CN() _C.MODEL.SEM_SEG_HEAD.NAME = "SemSegFPNHead" _C.MODEL.SEM_SEG_HEAD.IN_FEATURES = ["p2", "p3", "p4", "p5"] _C.MODEL.SEM_SEG_HEAD.IGNORE_VALUE = 255 _C.MODEL.SEM_SEG_HEAD.NUM_CLASSES = 54 _C.MODEL.SEM_SEG_HEAD.CONVS_DIM = 128 _C.MODEL.SEM_SEG_HEAD.COMMON_STRIDE = 4 _C.MODEL.SEM_SEG_HEAD.NORM = "GN" _C.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT = 1.0 _C.MODEL.PANOPTIC_FPN = CN() _C.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT = 1.0 _C.MODEL.PANOPTIC_FPN.COMBINE = CN({"ENABLED": True}) _C.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH = 0.5 _C.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT = 4096 _C.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = 0.5 _C.MODEL.RETINANET = CN() _C.MODEL.RETINANET.NUM_CLASSES = 80 _C.MODEL.RETINANET.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"] _C.MODEL.RETINANET.NUM_CONVS = 4 _C.MODEL.RETINANET.IOU_THRESHOLDS = [0.4, 0.5] _C.MODEL.RETINANET.IOU_LABELS = [0, -1, 1] _C.MODEL.RETINANET.PRIOR_PROB = 0.01 _C.MODEL.RETINANET.SCORE_THRESH_TEST = 0.05 _C.MODEL.RETINANET.TOPK_CANDIDATES_TEST = 1000 _C.MODEL.RETINANET.NMS_THRESH_TEST = 0.5 _C.MODEL.RETINANET.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0) _C.MODEL.RETINANET.FOCAL_LOSS_GAMMA = 2.0 _C.MODEL.RETINANET.FOCAL_LOSS_ALPHA = 0.25 _C.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA = 0.1 _C.MODEL.RETINANET.BBOX_REG_LOSS_TYPE = "smooth_l1" _C.MODEL.RETINANET.NORM = "" _C.MODEL.RESNETS = CN() _C.MODEL.RESNETS.DEPTH = 50 _C.MODEL.RESNETS.OUT_FEATURES = ["res4"] _C.MODEL.RESNETS.NUM_GROUPS = 1 _C.MODEL.RESNETS.NORM = "FrozenBN" _C.MODEL.RESNETS.WIDTH_PER_GROUP = 64 _C.MODEL.RESNETS.STRIDE_IN_1X1 = True _C.MODEL.RESNETS.RES5_DILATION = 1 _C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256 _C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64 _C.MODEL.RESNETS.DEFORM_ON_PER_STAGE = [False, False, False, False] _C.MODEL.RESNETS.DEFORM_MODULATED = False _C.MODEL.RESNETS.DEFORM_NUM_GROUPS = 1 _C.SOLVER = CN() _C.SOLVER.LR_SCHEDULER_NAME = "WarmupMultiStepLR" _C.SOLVER.MAX_ITER = 40000 _C.SOLVER.BASE_LR = 0.001 _C.SOLVER.MOMENTUM = 0.9 _C.SOLVER.NESTEROV = False _C.SOLVER.WEIGHT_DECAY = 0.0001 _C.SOLVER.WEIGHT_DECAY_NORM = 0.0 _C.SOLVER.GAMMA = 0.1 _C.SOLVER.STEPS = (30000,) _C.SOLVER.WARMUP_FACTOR = 1.0 / 1000 _C.SOLVER.WARMUP_ITERS = 1000 _C.SOLVER.WARMUP_METHOD = "linear" _C.SOLVER.CHECKPOINT_PERIOD = 5000 _C.SOLVER.IMS_PER_BATCH = 16 _C.SOLVER.REFERENCE_WORLD_SIZE = 0 _C.SOLVER.BIAS_LR_FACTOR = 1.0 _C.SOLVER.WEIGHT_DECAY_BIAS = None _C.SOLVER.CLIP_GRADIENTS = CN({"ENABLED": False}) _C.SOLVER.CLIP_GRADIENTS.CLIP_TYPE = "value" _C.SOLVER.CLIP_GRADIENTS.CLIP_VALUE = 1.0 _C.SOLVER.CLIP_GRADIENTS.NORM_TYPE = 2.0 _C.SOLVER.AMP = CN({"ENABLED": False}) _C.TEST = CN() _C.TEST.EXPECTED_RESULTS = [] _C.TEST.EVAL_PERIOD = 0 _C.TEST.KEYPOINT_OKS_SIGMAS = [] _C.TEST.DETECTIONS_PER_IMAGE = 100 _C.TEST.AUG = CN({"ENABLED": False}) _C.TEST.AUG.MIN_SIZES = (400, 500, 600, 700, 800, 900, 1000, 1100, 1200) _C.TEST.AUG.MAX_SIZE = 4000 _C.TEST.AUG.FLIP = True _C.TEST.PRECISE_BN = CN({"ENABLED": False}) _C.TEST.PRECISE_BN.NUM_ITER = 200 _C.OUTPUT_DIR = "./output" _C.SEED = -1 _C.CUDNN_BENCHMARK = False _C.VIS_PERIOD = 0 _C.GLOBAL = CN() _C.GLOBAL.HACK = 1.0 The provided code snippet includes necessary dependencies for implementing the `get_cfg` function. Write a Python function `def get_cfg() -> CfgNode` to solve the following problem: Get a copy of the default config. Returns: a detectron2 CfgNode instance. Here is the function: def get_cfg() -> CfgNode: """ Get a copy of the default config. Returns: a detectron2 CfgNode instance. """ from .defaults import _C return _C.clone()
Get a copy of the default config. Returns: a detectron2 CfgNode instance.
3,647
import functools import inspect import logging from fvcore.common.config import CfgNode as _CfgNode from detectron2.utils.file_io import PathManager class CfgNode(_CfgNode): """ The same as `fvcore.common.config.CfgNode`, but different in: 1. Use unsafe yaml loading by default. Note that this may lead to arbitrary code execution: you must not load a config file from untrusted sources before manually inspecting the content of the file. 2. Support config versioning. When attempting to merge an old config, it will convert the old config automatically. .. automethod:: clone .. automethod:: freeze .. automethod:: defrost .. automethod:: is_frozen .. automethod:: load_yaml_with_base .. automethod:: merge_from_list .. automethod:: merge_from_other_cfg """ def _open_cfg(cls, filename): return PathManager.open(filename, "r") # Note that the default value of allow_unsafe is changed to True def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None: """ Load content from the given config file and merge it into self. Args: cfg_filename: config filename allow_unsafe: allow unsafe yaml syntax """ assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!" loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe) loaded_cfg = type(self)(loaded_cfg) # defaults.py needs to import CfgNode from .defaults import _C latest_ver = _C.VERSION assert ( latest_ver == self.VERSION ), "CfgNode.merge_from_file is only allowed on a config object of latest version!" logger = logging.getLogger(__name__) loaded_ver = loaded_cfg.get("VERSION", None) if loaded_ver is None: from .compat import guess_version loaded_ver = guess_version(loaded_cfg, cfg_filename) assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format( loaded_ver, self.VERSION ) if loaded_ver == self.VERSION: self.merge_from_other_cfg(loaded_cfg) else: # compat.py needs to import CfgNode from .compat import upgrade_config, downgrade_config logger.warning( "Loading an old v{} config file '{}' by automatically upgrading to v{}. " "See docs/CHANGELOG.md for instructions to update your files.".format( loaded_ver, cfg_filename, self.VERSION ) ) # To convert, first obtain a full config at an old version old_self = downgrade_config(self, to_version=loaded_ver) old_self.merge_from_other_cfg(loaded_cfg) new_config = upgrade_config(old_self) self.clear() self.update(new_config) def dump(self, *args, **kwargs): """ Returns: str: a yaml string representation of the config """ # to make it show up in docs return super().dump(*args, **kwargs) global_cfg = CfgNode() The provided code snippet includes necessary dependencies for implementing the `set_global_cfg` function. Write a Python function `def set_global_cfg(cfg: CfgNode) -> None` to solve the following problem: Let the global config point to the given cfg. Assume that the given "cfg" has the key "KEY", after calling `set_global_cfg(cfg)`, the key can be accessed by: :: from detectron2.config import global_cfg print(global_cfg.KEY) By using a hacky global config, you can access these configs anywhere, without having to pass the config object or the values deep into the code. This is a hacky feature introduced for quick prototyping / research exploration. Here is the function: def set_global_cfg(cfg: CfgNode) -> None: """ Let the global config point to the given cfg. Assume that the given "cfg" has the key "KEY", after calling `set_global_cfg(cfg)`, the key can be accessed by: :: from detectron2.config import global_cfg print(global_cfg.KEY) By using a hacky global config, you can access these configs anywhere, without having to pass the config object or the values deep into the code. This is a hacky feature introduced for quick prototyping / research exploration. """ global global_cfg global_cfg.clear() global_cfg.update(cfg)
Let the global config point to the given cfg. Assume that the given "cfg" has the key "KEY", after calling `set_global_cfg(cfg)`, the key can be accessed by: :: from detectron2.config import global_cfg print(global_cfg.KEY) By using a hacky global config, you can access these configs anywhere, without having to pass the config object or the values deep into the code. This is a hacky feature introduced for quick prototyping / research exploration.
3,648
import functools import inspect import logging from fvcore.common.config import CfgNode as _CfgNode from detectron2.utils.file_io import PathManager def _get_args_from_config(from_config_func, *args, **kwargs): """ Use `from_config` to obtain explicit arguments. Returns: dict: arguments to be used for cls.__init__ """ signature = inspect.signature(from_config_func) if list(signature.parameters.keys())[0] != "cfg": if inspect.isfunction(from_config_func): name = from_config_func.__name__ else: name = f"{from_config_func.__self__}.from_config" raise TypeError(f"{name} must take 'cfg' as the first argument!") support_var_arg = any( param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD] for param in signature.parameters.values() ) if support_var_arg: # forward all arguments to from_config, if from_config accepts them ret = from_config_func(*args, **kwargs) else: # forward supported arguments to from_config supported_arg_names = set(signature.parameters.keys()) extra_kwargs = {} for name in list(kwargs.keys()): if name not in supported_arg_names: extra_kwargs[name] = kwargs.pop(name) ret = from_config_func(*args, **kwargs) # forward the other arguments to __init__ ret.update(extra_kwargs) return ret def _called_with_cfg(*args, **kwargs): """ Returns: bool: whether the arguments contain CfgNode and should be considered forwarded to from_config. """ from omegaconf import DictConfig if len(args) and isinstance(args[0], (_CfgNode, DictConfig)): return True if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)): return True # `from_config`'s first argument is forced to be "cfg". # So the above check covers all cases. return False The provided code snippet includes necessary dependencies for implementing the `configurable` function. Write a Python function `def configurable(init_func=None, *, from_config=None)` to solve the following problem: Decorate a function or a class's __init__ method so that it can be called with a :class:`CfgNode` object using a :func:`from_config` function that translates :class:`CfgNode` to arguments. Examples: :: # Usage 1: Decorator on __init__: class A: @configurable def __init__(self, a, b=2, c=3): pass @classmethod def from_config(cls, cfg): # 'cfg' must be the first argument # Returns kwargs to be passed to __init__ return {"a": cfg.A, "b": cfg.B} a1 = A(a=1, b=2) # regular construction a2 = A(cfg) # construct with a cfg a3 = A(cfg, b=3, c=4) # construct with extra overwrite # Usage 2: Decorator on any function. Needs an extra from_config argument: @configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B}) def a_func(a, b=2, c=3): pass a1 = a_func(a=1, b=2) # regular call a2 = a_func(cfg) # call with a cfg a3 = a_func(cfg, b=3, c=4) # call with extra overwrite Args: init_func (callable): a class's ``__init__`` method in usage 1. The class must have a ``from_config`` classmethod which takes `cfg` as the first argument. from_config (callable): the from_config function in usage 2. It must take `cfg` as its first argument. Here is the function: def configurable(init_func=None, *, from_config=None): """ Decorate a function or a class's __init__ method so that it can be called with a :class:`CfgNode` object using a :func:`from_config` function that translates :class:`CfgNode` to arguments. Examples: :: # Usage 1: Decorator on __init__: class A: @configurable def __init__(self, a, b=2, c=3): pass @classmethod def from_config(cls, cfg): # 'cfg' must be the first argument # Returns kwargs to be passed to __init__ return {"a": cfg.A, "b": cfg.B} a1 = A(a=1, b=2) # regular construction a2 = A(cfg) # construct with a cfg a3 = A(cfg, b=3, c=4) # construct with extra overwrite # Usage 2: Decorator on any function. Needs an extra from_config argument: @configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B}) def a_func(a, b=2, c=3): pass a1 = a_func(a=1, b=2) # regular call a2 = a_func(cfg) # call with a cfg a3 = a_func(cfg, b=3, c=4) # call with extra overwrite Args: init_func (callable): a class's ``__init__`` method in usage 1. The class must have a ``from_config`` classmethod which takes `cfg` as the first argument. from_config (callable): the from_config function in usage 2. It must take `cfg` as its first argument. """ if init_func is not None: assert ( inspect.isfunction(init_func) and from_config is None and init_func.__name__ == "__init__" ), "Incorrect use of @configurable. Check API documentation for examples." @functools.wraps(init_func) def wrapped(self, *args, **kwargs): try: from_config_func = type(self).from_config except AttributeError as e: raise AttributeError( "Class with @configurable must have a 'from_config' classmethod." ) from e if not inspect.ismethod(from_config_func): raise TypeError("Class with @configurable must have a 'from_config' classmethod.") if _called_with_cfg(*args, **kwargs): explicit_args = _get_args_from_config(from_config_func, *args, **kwargs) init_func(self, **explicit_args) else: init_func(self, *args, **kwargs) return wrapped else: if from_config is None: return configurable # @configurable() is made equivalent to @configurable assert inspect.isfunction( from_config ), "from_config argument of configurable must be a function!" def wrapper(orig_func): @functools.wraps(orig_func) def wrapped(*args, **kwargs): if _called_with_cfg(*args, **kwargs): explicit_args = _get_args_from_config(from_config, *args, **kwargs) return orig_func(**explicit_args) else: return orig_func(*args, **kwargs) wrapped.from_config = from_config return wrapped return wrapper
Decorate a function or a class's __init__ method so that it can be called with a :class:`CfgNode` object using a :func:`from_config` function that translates :class:`CfgNode` to arguments. Examples: :: # Usage 1: Decorator on __init__: class A: @configurable def __init__(self, a, b=2, c=3): pass @classmethod def from_config(cls, cfg): # 'cfg' must be the first argument # Returns kwargs to be passed to __init__ return {"a": cfg.A, "b": cfg.B} a1 = A(a=1, b=2) # regular construction a2 = A(cfg) # construct with a cfg a3 = A(cfg, b=3, c=4) # construct with extra overwrite # Usage 2: Decorator on any function. Needs an extra from_config argument: @configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B}) def a_func(a, b=2, c=3): pass a1 = a_func(a=1, b=2) # regular call a2 = a_func(cfg) # call with a cfg a3 = a_func(cfg, b=3, c=4) # call with extra overwrite Args: init_func (callable): a class's ``__init__`` method in usage 1. The class must have a ``from_config`` classmethod which takes `cfg` as the first argument. from_config (callable): the from_config function in usage 2. It must take `cfg` as its first argument.
3,649
import logging from typing import List, Optional, Tuple from .config import CfgNode as CN from .defaults import _C _C = CN() _C.VERSION = 2 _C.MODEL = CN() _C.MODEL.LOAD_PROPOSALS = False _C.MODEL.MASK_ON = False _C.MODEL.KEYPOINT_ON = False _C.MODEL.DEVICE = "cuda" _C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN" _C.MODEL.WEIGHTS = "" _C.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675] _C.MODEL.PIXEL_STD = [1.0, 1.0, 1.0] _C.INPUT = CN() _C.INPUT.MIN_SIZE_TRAIN = (800,) _C.INPUT.MIN_SIZE_TRAIN_SAMPLING = "choice" _C.INPUT.MAX_SIZE_TRAIN = 1333 _C.INPUT.MIN_SIZE_TEST = 800 _C.INPUT.MAX_SIZE_TEST = 1333 _C.INPUT.RANDOM_FLIP = "horizontal" _C.INPUT.CROP = CN({"ENABLED": False}) _C.INPUT.CROP.TYPE = "relative_range" _C.INPUT.CROP.SIZE = [0.9, 0.9] _C.INPUT.FORMAT = "BGR" _C.INPUT.MASK_FORMAT = "polygon" _C.DATASETS = CN() _C.DATASETS.TRAIN = () _C.DATASETS.PROPOSAL_FILES_TRAIN = () _C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN = 2000 _C.DATASETS.TEST = () _C.DATASETS.PROPOSAL_FILES_TEST = () _C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST = 1000 _C.DATALOADER = CN() _C.DATALOADER.NUM_WORKERS = 4 _C.DATALOADER.ASPECT_RATIO_GROUPING = True _C.DATALOADER.SAMPLER_TRAIN = "TrainingSampler" _C.DATALOADER.REPEAT_THRESHOLD = 0.0 _C.DATALOADER.FILTER_EMPTY_ANNOTATIONS = True _C.MODEL.BACKBONE = CN() _C.MODEL.BACKBONE.NAME = "build_resnet_backbone" _C.MODEL.BACKBONE.FREEZE_AT = 2 _C.MODEL.FPN = CN() _C.MODEL.FPN.IN_FEATURES = [] _C.MODEL.FPN.OUT_CHANNELS = 256 _C.MODEL.FPN.NORM = "" _C.MODEL.FPN.FUSE_TYPE = "sum" _C.MODEL.PROPOSAL_GENERATOR = CN() _C.MODEL.PROPOSAL_GENERATOR.NAME = "RPN" _C.MODEL.PROPOSAL_GENERATOR.MIN_SIZE = 0 _C.MODEL.ANCHOR_GENERATOR = CN() _C.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator" _C.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64, 128, 256, 512]] _C.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.5, 1.0, 2.0]] _C.MODEL.ANCHOR_GENERATOR.ANGLES = [[-90, 0, 90]] _C.MODEL.ANCHOR_GENERATOR.OFFSET = 0.0 _C.MODEL.RPN = CN() _C.MODEL.RPN.HEAD_NAME = "StandardRPNHead" _C.MODEL.RPN.IN_FEATURES = ["res4"] _C.MODEL.RPN.BOUNDARY_THRESH = -1 _C.MODEL.RPN.IOU_THRESHOLDS = [0.3, 0.7] _C.MODEL.RPN.IOU_LABELS = [0, -1, 1] _C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256 _C.MODEL.RPN.POSITIVE_FRACTION = 0.5 _C.MODEL.RPN.BBOX_REG_LOSS_TYPE = "smooth_l1" _C.MODEL.RPN.BBOX_REG_LOSS_WEIGHT = 1.0 _C.MODEL.RPN.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0) _C.MODEL.RPN.SMOOTH_L1_BETA = 0.0 _C.MODEL.RPN.LOSS_WEIGHT = 1.0 _C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 12000 _C.MODEL.RPN.PRE_NMS_TOPK_TEST = 6000 _C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 2000 _C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000 _C.MODEL.RPN.NMS_THRESH = 0.7 _C.MODEL.RPN.CONV_DIMS = [-1] _C.MODEL.ROI_HEADS = CN() _C.MODEL.ROI_HEADS.NAME = "Res5ROIHeads" _C.MODEL.ROI_HEADS.NUM_CLASSES = 80 _C.MODEL.ROI_HEADS.IN_FEATURES = ["res4"] _C.MODEL.ROI_HEADS.IOU_THRESHOLDS = [0.5] _C.MODEL.ROI_HEADS.IOU_LABELS = [0, 1] _C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 _C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25 _C.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.05 _C.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5 _C.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT = True _C.MODEL.ROI_BOX_HEAD = CN() _C.MODEL.ROI_BOX_HEAD.NAME = "" _C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE = "smooth_l1" _C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT = 1.0 _C.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10.0, 10.0, 5.0, 5.0) _C.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA = 0.0 _C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14 _C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0 _C.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2" _C.MODEL.ROI_BOX_HEAD.NUM_FC = 0 _C.MODEL.ROI_BOX_HEAD.FC_DIM = 1024 _C.MODEL.ROI_BOX_HEAD.NUM_CONV = 0 _C.MODEL.ROI_BOX_HEAD.CONV_DIM = 256 _C.MODEL.ROI_BOX_HEAD.NORM = "" _C.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG = False _C.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES = False _C.MODEL.ROI_BOX_CASCADE_HEAD = CN() _C.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS = ( (10.0, 10.0, 5.0, 5.0), (20.0, 20.0, 10.0, 10.0), (30.0, 30.0, 15.0, 15.0), ) _C.MODEL.ROI_BOX_CASCADE_HEAD.IOUS = (0.5, 0.6, 0.7) _C.MODEL.ROI_MASK_HEAD = CN() _C.MODEL.ROI_MASK_HEAD.NAME = "MaskRCNNConvUpsampleHead" _C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14 _C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0 _C.MODEL.ROI_MASK_HEAD.NUM_CONV = 0 _C.MODEL.ROI_MASK_HEAD.CONV_DIM = 256 _C.MODEL.ROI_MASK_HEAD.NORM = "" _C.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK = False _C.MODEL.ROI_MASK_HEAD.POOLER_TYPE = "ROIAlignV2" _C.MODEL.ROI_KEYPOINT_HEAD = CN() _C.MODEL.ROI_KEYPOINT_HEAD.NAME = "KRCNNConvDeconvUpsampleHead" _C.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION = 14 _C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO = 0 _C.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS = tuple(512 for _ in range(8)) _C.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 17 _C.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE = 1 _C.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS = True _C.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT = 1.0 _C.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE = "ROIAlignV2" _C.MODEL.SEM_SEG_HEAD = CN() _C.MODEL.SEM_SEG_HEAD.NAME = "SemSegFPNHead" _C.MODEL.SEM_SEG_HEAD.IN_FEATURES = ["p2", "p3", "p4", "p5"] _C.MODEL.SEM_SEG_HEAD.IGNORE_VALUE = 255 _C.MODEL.SEM_SEG_HEAD.NUM_CLASSES = 54 _C.MODEL.SEM_SEG_HEAD.CONVS_DIM = 128 _C.MODEL.SEM_SEG_HEAD.COMMON_STRIDE = 4 _C.MODEL.SEM_SEG_HEAD.NORM = "GN" _C.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT = 1.0 _C.MODEL.PANOPTIC_FPN = CN() _C.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT = 1.0 _C.MODEL.PANOPTIC_FPN.COMBINE = CN({"ENABLED": True}) _C.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH = 0.5 _C.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT = 4096 _C.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = 0.5 _C.MODEL.RETINANET = CN() _C.MODEL.RETINANET.NUM_CLASSES = 80 _C.MODEL.RETINANET.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"] _C.MODEL.RETINANET.NUM_CONVS = 4 _C.MODEL.RETINANET.IOU_THRESHOLDS = [0.4, 0.5] _C.MODEL.RETINANET.IOU_LABELS = [0, -1, 1] _C.MODEL.RETINANET.PRIOR_PROB = 0.01 _C.MODEL.RETINANET.SCORE_THRESH_TEST = 0.05 _C.MODEL.RETINANET.TOPK_CANDIDATES_TEST = 1000 _C.MODEL.RETINANET.NMS_THRESH_TEST = 0.5 _C.MODEL.RETINANET.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0) _C.MODEL.RETINANET.FOCAL_LOSS_GAMMA = 2.0 _C.MODEL.RETINANET.FOCAL_LOSS_ALPHA = 0.25 _C.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA = 0.1 _C.MODEL.RETINANET.BBOX_REG_LOSS_TYPE = "smooth_l1" _C.MODEL.RETINANET.NORM = "" _C.MODEL.RESNETS = CN() _C.MODEL.RESNETS.DEPTH = 50 _C.MODEL.RESNETS.OUT_FEATURES = ["res4"] _C.MODEL.RESNETS.NUM_GROUPS = 1 _C.MODEL.RESNETS.NORM = "FrozenBN" _C.MODEL.RESNETS.WIDTH_PER_GROUP = 64 _C.MODEL.RESNETS.STRIDE_IN_1X1 = True _C.MODEL.RESNETS.RES5_DILATION = 1 _C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256 _C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64 _C.MODEL.RESNETS.DEFORM_ON_PER_STAGE = [False, False, False, False] _C.MODEL.RESNETS.DEFORM_MODULATED = False _C.MODEL.RESNETS.DEFORM_NUM_GROUPS = 1 _C.SOLVER = CN() _C.SOLVER.LR_SCHEDULER_NAME = "WarmupMultiStepLR" _C.SOLVER.MAX_ITER = 40000 _C.SOLVER.BASE_LR = 0.001 _C.SOLVER.MOMENTUM = 0.9 _C.SOLVER.NESTEROV = False _C.SOLVER.WEIGHT_DECAY = 0.0001 _C.SOLVER.WEIGHT_DECAY_NORM = 0.0 _C.SOLVER.GAMMA = 0.1 _C.SOLVER.STEPS = (30000,) _C.SOLVER.WARMUP_FACTOR = 1.0 / 1000 _C.SOLVER.WARMUP_ITERS = 1000 _C.SOLVER.WARMUP_METHOD = "linear" _C.SOLVER.CHECKPOINT_PERIOD = 5000 _C.SOLVER.IMS_PER_BATCH = 16 _C.SOLVER.REFERENCE_WORLD_SIZE = 0 _C.SOLVER.BIAS_LR_FACTOR = 1.0 _C.SOLVER.WEIGHT_DECAY_BIAS = None _C.SOLVER.CLIP_GRADIENTS = CN({"ENABLED": False}) _C.SOLVER.CLIP_GRADIENTS.CLIP_TYPE = "value" _C.SOLVER.CLIP_GRADIENTS.CLIP_VALUE = 1.0 _C.SOLVER.CLIP_GRADIENTS.NORM_TYPE = 2.0 _C.SOLVER.AMP = CN({"ENABLED": False}) _C.TEST = CN() _C.TEST.EXPECTED_RESULTS = [] _C.TEST.EVAL_PERIOD = 0 _C.TEST.KEYPOINT_OKS_SIGMAS = [] _C.TEST.DETECTIONS_PER_IMAGE = 100 _C.TEST.AUG = CN({"ENABLED": False}) _C.TEST.AUG.MIN_SIZES = (400, 500, 600, 700, 800, 900, 1000, 1100, 1200) _C.TEST.AUG.MAX_SIZE = 4000 _C.TEST.AUG.FLIP = True _C.TEST.PRECISE_BN = CN({"ENABLED": False}) _C.TEST.PRECISE_BN.NUM_ITER = 200 _C.OUTPUT_DIR = "./output" _C.SEED = -1 _C.CUDNN_BENCHMARK = False _C.VIS_PERIOD = 0 _C.GLOBAL = CN() _C.GLOBAL.HACK = 1.0 The provided code snippet includes necessary dependencies for implementing the `upgrade_config` function. Write a Python function `def upgrade_config(cfg: CN, to_version: Optional[int] = None) -> CN` to solve the following problem: Upgrade a config from its current version to a newer version. Args: cfg (CfgNode): to_version (int): defaults to the latest version. Here is the function: def upgrade_config(cfg: CN, to_version: Optional[int] = None) -> CN: """ Upgrade a config from its current version to a newer version. Args: cfg (CfgNode): to_version (int): defaults to the latest version. """ cfg = cfg.clone() if to_version is None: to_version = _C.VERSION assert cfg.VERSION <= to_version, "Cannot upgrade from v{} to v{}!".format( cfg.VERSION, to_version ) for k in range(cfg.VERSION, to_version): converter = globals()["ConverterV" + str(k + 1)] converter.upgrade(cfg) cfg.VERSION = k + 1 return cfg
Upgrade a config from its current version to a newer version. Args: cfg (CfgNode): to_version (int): defaults to the latest version.
3,650
import logging from typing import List, Optional, Tuple from .config import CfgNode as CN from .defaults import _C The provided code snippet includes necessary dependencies for implementing the `downgrade_config` function. Write a Python function `def downgrade_config(cfg: CN, to_version: int) -> CN` to solve the following problem: Downgrade a config from its current version to an older version. Args: cfg (CfgNode): to_version (int): Note: A general downgrade of arbitrary configs is not always possible due to the different functionalities in different versions. The purpose of downgrade is only to recover the defaults in old versions, allowing it to load an old partial yaml config. Therefore, the implementation only needs to fill in the default values in the old version when a general downgrade is not possible. Here is the function: def downgrade_config(cfg: CN, to_version: int) -> CN: """ Downgrade a config from its current version to an older version. Args: cfg (CfgNode): to_version (int): Note: A general downgrade of arbitrary configs is not always possible due to the different functionalities in different versions. The purpose of downgrade is only to recover the defaults in old versions, allowing it to load an old partial yaml config. Therefore, the implementation only needs to fill in the default values in the old version when a general downgrade is not possible. """ cfg = cfg.clone() assert cfg.VERSION >= to_version, "Cannot downgrade from v{} to v{}!".format( cfg.VERSION, to_version ) for k in range(cfg.VERSION, to_version, -1): converter = globals()["ConverterV" + str(k)] converter.downgrade(cfg) cfg.VERSION = k - 1 return cfg
Downgrade a config from its current version to an older version. Args: cfg (CfgNode): to_version (int): Note: A general downgrade of arbitrary configs is not always possible due to the different functionalities in different versions. The purpose of downgrade is only to recover the defaults in old versions, allowing it to load an old partial yaml config. Therefore, the implementation only needs to fill in the default values in the old version when a general downgrade is not possible.
3,651
import logging from typing import List, Optional, Tuple from .config import CfgNode as CN from .defaults import _C _C = CN() _C.VERSION = 2 _C.MODEL = CN() _C.MODEL.LOAD_PROPOSALS = False _C.MODEL.MASK_ON = False _C.MODEL.KEYPOINT_ON = False _C.MODEL.DEVICE = "cuda" _C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN" _C.MODEL.WEIGHTS = "" _C.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675] _C.MODEL.PIXEL_STD = [1.0, 1.0, 1.0] _C.INPUT = CN() _C.INPUT.MIN_SIZE_TRAIN = (800,) _C.INPUT.MIN_SIZE_TRAIN_SAMPLING = "choice" _C.INPUT.MAX_SIZE_TRAIN = 1333 _C.INPUT.MIN_SIZE_TEST = 800 _C.INPUT.MAX_SIZE_TEST = 1333 _C.INPUT.RANDOM_FLIP = "horizontal" _C.INPUT.CROP = CN({"ENABLED": False}) _C.INPUT.CROP.TYPE = "relative_range" _C.INPUT.CROP.SIZE = [0.9, 0.9] _C.INPUT.FORMAT = "BGR" _C.INPUT.MASK_FORMAT = "polygon" _C.DATASETS = CN() _C.DATASETS.TRAIN = () _C.DATASETS.PROPOSAL_FILES_TRAIN = () _C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN = 2000 _C.DATASETS.TEST = () _C.DATASETS.PROPOSAL_FILES_TEST = () _C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST = 1000 _C.DATALOADER = CN() _C.DATALOADER.NUM_WORKERS = 4 _C.DATALOADER.ASPECT_RATIO_GROUPING = True _C.DATALOADER.SAMPLER_TRAIN = "TrainingSampler" _C.DATALOADER.REPEAT_THRESHOLD = 0.0 _C.DATALOADER.FILTER_EMPTY_ANNOTATIONS = True _C.MODEL.BACKBONE = CN() _C.MODEL.BACKBONE.NAME = "build_resnet_backbone" _C.MODEL.BACKBONE.FREEZE_AT = 2 _C.MODEL.FPN = CN() _C.MODEL.FPN.IN_FEATURES = [] _C.MODEL.FPN.OUT_CHANNELS = 256 _C.MODEL.FPN.NORM = "" _C.MODEL.FPN.FUSE_TYPE = "sum" _C.MODEL.PROPOSAL_GENERATOR = CN() _C.MODEL.PROPOSAL_GENERATOR.NAME = "RPN" _C.MODEL.PROPOSAL_GENERATOR.MIN_SIZE = 0 _C.MODEL.ANCHOR_GENERATOR = CN() _C.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator" _C.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64, 128, 256, 512]] _C.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.5, 1.0, 2.0]] _C.MODEL.ANCHOR_GENERATOR.ANGLES = [[-90, 0, 90]] _C.MODEL.ANCHOR_GENERATOR.OFFSET = 0.0 _C.MODEL.RPN = CN() _C.MODEL.RPN.HEAD_NAME = "StandardRPNHead" _C.MODEL.RPN.IN_FEATURES = ["res4"] _C.MODEL.RPN.BOUNDARY_THRESH = -1 _C.MODEL.RPN.IOU_THRESHOLDS = [0.3, 0.7] _C.MODEL.RPN.IOU_LABELS = [0, -1, 1] _C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256 _C.MODEL.RPN.POSITIVE_FRACTION = 0.5 _C.MODEL.RPN.BBOX_REG_LOSS_TYPE = "smooth_l1" _C.MODEL.RPN.BBOX_REG_LOSS_WEIGHT = 1.0 _C.MODEL.RPN.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0) _C.MODEL.RPN.SMOOTH_L1_BETA = 0.0 _C.MODEL.RPN.LOSS_WEIGHT = 1.0 _C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 12000 _C.MODEL.RPN.PRE_NMS_TOPK_TEST = 6000 _C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 2000 _C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000 _C.MODEL.RPN.NMS_THRESH = 0.7 _C.MODEL.RPN.CONV_DIMS = [-1] _C.MODEL.ROI_HEADS = CN() _C.MODEL.ROI_HEADS.NAME = "Res5ROIHeads" _C.MODEL.ROI_HEADS.NUM_CLASSES = 80 _C.MODEL.ROI_HEADS.IN_FEATURES = ["res4"] _C.MODEL.ROI_HEADS.IOU_THRESHOLDS = [0.5] _C.MODEL.ROI_HEADS.IOU_LABELS = [0, 1] _C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 _C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25 _C.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.05 _C.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5 _C.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT = True _C.MODEL.ROI_BOX_HEAD = CN() _C.MODEL.ROI_BOX_HEAD.NAME = "" _C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE = "smooth_l1" _C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT = 1.0 _C.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10.0, 10.0, 5.0, 5.0) _C.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA = 0.0 _C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14 _C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0 _C.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2" _C.MODEL.ROI_BOX_HEAD.NUM_FC = 0 _C.MODEL.ROI_BOX_HEAD.FC_DIM = 1024 _C.MODEL.ROI_BOX_HEAD.NUM_CONV = 0 _C.MODEL.ROI_BOX_HEAD.CONV_DIM = 256 _C.MODEL.ROI_BOX_HEAD.NORM = "" _C.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG = False _C.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES = False _C.MODEL.ROI_BOX_CASCADE_HEAD = CN() _C.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS = ( (10.0, 10.0, 5.0, 5.0), (20.0, 20.0, 10.0, 10.0), (30.0, 30.0, 15.0, 15.0), ) _C.MODEL.ROI_BOX_CASCADE_HEAD.IOUS = (0.5, 0.6, 0.7) _C.MODEL.ROI_MASK_HEAD = CN() _C.MODEL.ROI_MASK_HEAD.NAME = "MaskRCNNConvUpsampleHead" _C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14 _C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0 _C.MODEL.ROI_MASK_HEAD.NUM_CONV = 0 _C.MODEL.ROI_MASK_HEAD.CONV_DIM = 256 _C.MODEL.ROI_MASK_HEAD.NORM = "" _C.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK = False _C.MODEL.ROI_MASK_HEAD.POOLER_TYPE = "ROIAlignV2" _C.MODEL.ROI_KEYPOINT_HEAD = CN() _C.MODEL.ROI_KEYPOINT_HEAD.NAME = "KRCNNConvDeconvUpsampleHead" _C.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION = 14 _C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO = 0 _C.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS = tuple(512 for _ in range(8)) _C.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 17 _C.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE = 1 _C.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS = True _C.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT = 1.0 _C.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE = "ROIAlignV2" _C.MODEL.SEM_SEG_HEAD = CN() _C.MODEL.SEM_SEG_HEAD.NAME = "SemSegFPNHead" _C.MODEL.SEM_SEG_HEAD.IN_FEATURES = ["p2", "p3", "p4", "p5"] _C.MODEL.SEM_SEG_HEAD.IGNORE_VALUE = 255 _C.MODEL.SEM_SEG_HEAD.NUM_CLASSES = 54 _C.MODEL.SEM_SEG_HEAD.CONVS_DIM = 128 _C.MODEL.SEM_SEG_HEAD.COMMON_STRIDE = 4 _C.MODEL.SEM_SEG_HEAD.NORM = "GN" _C.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT = 1.0 _C.MODEL.PANOPTIC_FPN = CN() _C.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT = 1.0 _C.MODEL.PANOPTIC_FPN.COMBINE = CN({"ENABLED": True}) _C.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH = 0.5 _C.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT = 4096 _C.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = 0.5 _C.MODEL.RETINANET = CN() _C.MODEL.RETINANET.NUM_CLASSES = 80 _C.MODEL.RETINANET.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"] _C.MODEL.RETINANET.NUM_CONVS = 4 _C.MODEL.RETINANET.IOU_THRESHOLDS = [0.4, 0.5] _C.MODEL.RETINANET.IOU_LABELS = [0, -1, 1] _C.MODEL.RETINANET.PRIOR_PROB = 0.01 _C.MODEL.RETINANET.SCORE_THRESH_TEST = 0.05 _C.MODEL.RETINANET.TOPK_CANDIDATES_TEST = 1000 _C.MODEL.RETINANET.NMS_THRESH_TEST = 0.5 _C.MODEL.RETINANET.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0) _C.MODEL.RETINANET.FOCAL_LOSS_GAMMA = 2.0 _C.MODEL.RETINANET.FOCAL_LOSS_ALPHA = 0.25 _C.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA = 0.1 _C.MODEL.RETINANET.BBOX_REG_LOSS_TYPE = "smooth_l1" _C.MODEL.RETINANET.NORM = "" _C.MODEL.RESNETS = CN() _C.MODEL.RESNETS.DEPTH = 50 _C.MODEL.RESNETS.OUT_FEATURES = ["res4"] _C.MODEL.RESNETS.NUM_GROUPS = 1 _C.MODEL.RESNETS.NORM = "FrozenBN" _C.MODEL.RESNETS.WIDTH_PER_GROUP = 64 _C.MODEL.RESNETS.STRIDE_IN_1X1 = True _C.MODEL.RESNETS.RES5_DILATION = 1 _C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256 _C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64 _C.MODEL.RESNETS.DEFORM_ON_PER_STAGE = [False, False, False, False] _C.MODEL.RESNETS.DEFORM_MODULATED = False _C.MODEL.RESNETS.DEFORM_NUM_GROUPS = 1 _C.SOLVER = CN() _C.SOLVER.LR_SCHEDULER_NAME = "WarmupMultiStepLR" _C.SOLVER.MAX_ITER = 40000 _C.SOLVER.BASE_LR = 0.001 _C.SOLVER.MOMENTUM = 0.9 _C.SOLVER.NESTEROV = False _C.SOLVER.WEIGHT_DECAY = 0.0001 _C.SOLVER.WEIGHT_DECAY_NORM = 0.0 _C.SOLVER.GAMMA = 0.1 _C.SOLVER.STEPS = (30000,) _C.SOLVER.WARMUP_FACTOR = 1.0 / 1000 _C.SOLVER.WARMUP_ITERS = 1000 _C.SOLVER.WARMUP_METHOD = "linear" _C.SOLVER.CHECKPOINT_PERIOD = 5000 _C.SOLVER.IMS_PER_BATCH = 16 _C.SOLVER.REFERENCE_WORLD_SIZE = 0 _C.SOLVER.BIAS_LR_FACTOR = 1.0 _C.SOLVER.WEIGHT_DECAY_BIAS = None _C.SOLVER.CLIP_GRADIENTS = CN({"ENABLED": False}) _C.SOLVER.CLIP_GRADIENTS.CLIP_TYPE = "value" _C.SOLVER.CLIP_GRADIENTS.CLIP_VALUE = 1.0 _C.SOLVER.CLIP_GRADIENTS.NORM_TYPE = 2.0 _C.SOLVER.AMP = CN({"ENABLED": False}) _C.TEST = CN() _C.TEST.EXPECTED_RESULTS = [] _C.TEST.EVAL_PERIOD = 0 _C.TEST.KEYPOINT_OKS_SIGMAS = [] _C.TEST.DETECTIONS_PER_IMAGE = 100 _C.TEST.AUG = CN({"ENABLED": False}) _C.TEST.AUG.MIN_SIZES = (400, 500, 600, 700, 800, 900, 1000, 1100, 1200) _C.TEST.AUG.MAX_SIZE = 4000 _C.TEST.AUG.FLIP = True _C.TEST.PRECISE_BN = CN({"ENABLED": False}) _C.TEST.PRECISE_BN.NUM_ITER = 200 _C.OUTPUT_DIR = "./output" _C.SEED = -1 _C.CUDNN_BENCHMARK = False _C.VIS_PERIOD = 0 _C.GLOBAL = CN() _C.GLOBAL.HACK = 1.0 The provided code snippet includes necessary dependencies for implementing the `guess_version` function. Write a Python function `def guess_version(cfg: CN, filename: str) -> int` to solve the following problem: Guess the version of a partial config where the VERSION field is not specified. Returns the version, or the latest if cannot make a guess. This makes it easier for users to migrate. Here is the function: def guess_version(cfg: CN, filename: str) -> int: """ Guess the version of a partial config where the VERSION field is not specified. Returns the version, or the latest if cannot make a guess. This makes it easier for users to migrate. """ logger = logging.getLogger(__name__) def _has(name: str) -> bool: cur = cfg for n in name.split("."): if n not in cur: return False cur = cur[n] return True # Most users' partial configs have "MODEL.WEIGHT", so guess on it ret = None if _has("MODEL.WEIGHT") or _has("TEST.AUG_ON"): ret = 1 if ret is not None: logger.warning("Config '{}' has no VERSION. Assuming it to be v{}.".format(filename, ret)) else: ret = _C.VERSION logger.warning( "Config '{}' has no VERSION. Assuming it to be compatible with latest v{}.".format( filename, ret ) ) return ret
Guess the version of a partial config where the VERSION field is not specified. Returns the version, or the latest if cannot make a guess. This makes it easier for users to migrate.
3,652
import logging from typing import List, Optional, Tuple from .config import CfgNode as CN from .defaults import _C def _rename(cfg: CN, old: str, new: str) -> None: old_keys = old.split(".") new_keys = new.split(".") def _set(key_seq: List[str], val: str) -> None: cur = cfg for k in key_seq[:-1]: if k not in cur: cur[k] = CN() cur = cur[k] cur[key_seq[-1]] = val def _get(key_seq: List[str]) -> CN: cur = cfg for k in key_seq: cur = cur[k] return cur def _del(key_seq: List[str]) -> None: cur = cfg for k in key_seq[:-1]: cur = cur[k] del cur[key_seq[-1]] if len(cur) == 0 and len(key_seq) > 1: _del(key_seq[:-1]) _set(new_keys, _get(old_keys)) _del(old_keys)
null
3,653
import argparse import logging import os import sys import weakref from collections import OrderedDict from typing import Optional import torch from fvcore.nn.precise_bn import get_bn_modules from omegaconf import OmegaConf from torch.nn.parallel import DistributedDataParallel import detectron2.data.transforms as T from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import CfgNode, LazyConfig from detectron2.data import ( MetadataCatalog, build_detection_test_loader, build_detection_train_loader, ) from detectron2.evaluation import ( DatasetEvaluator, inference_on_dataset, print_csv_format, verify_results, ) from detectron2.modeling import build_model from detectron2.solver import build_lr_scheduler, build_optimizer from detectron2.utils import comm from detectron2.utils.collect_env import collect_env_info from detectron2.utils.env import seed_all_rng from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter from detectron2.utils.file_io import PathManager from detectron2.utils.logger import setup_logger from . import hooks from .train_loop import AMPTrainer, SimpleTrainer, TrainerBase The provided code snippet includes necessary dependencies for implementing the `default_argument_parser` function. Write a Python function `def default_argument_parser(epilog=None)` to solve the following problem: Create a parser with some common arguments used by detectron2 users. Args: epilog (str): epilog passed to ArgumentParser describing the usage. Returns: argparse.ArgumentParser: Here is the function: def default_argument_parser(epilog=None): """ Create a parser with some common arguments used by detectron2 users. Args: epilog (str): epilog passed to ArgumentParser describing the usage. Returns: argparse.ArgumentParser: """ parser = argparse.ArgumentParser( epilog=epilog or f""" Examples: Run on single machine: $ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml Change some config options: $ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001 Run on multiple machines: (machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url <URL> [--other-flags] (machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url <URL> [--other-flags] """, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file") parser.add_argument( "--resume", action="store_true", help="Whether to attempt to resume from the checkpoint directory. " "See documentation of `DefaultTrainer.resume_or_load()` for what it means.", ) parser.add_argument("--eval-only", action="store_true", help="perform evaluation only") parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*") parser.add_argument("--num-machines", type=int, default=1, help="total number of machines") parser.add_argument( "--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)" ) # PyTorch still may leave orphan processes in multi-gpu training. # Therefore we use a deterministic way to obtain port, # so that users are aware of orphan processes by seeing the port occupied. port = 2 ** 15 + 2 ** 14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14 parser.add_argument( "--dist-url", default="tcp://127.0.0.1:{}".format(port), help="initialization URL for pytorch distributed backend. See " "https://pytorch.org/docs/stable/distributed.html for details.", ) parser.add_argument( "opts", help=""" Modify config options at the end of the command. For Yacs configs, use space-separated "PATH.KEY VALUE" pairs. For python-based LazyConfig, use "path.key=value". """.strip(), default=None, nargs=argparse.REMAINDER, ) return parser
Create a parser with some common arguments used by detectron2 users. Args: epilog (str): epilog passed to ArgumentParser describing the usage. Returns: argparse.ArgumentParser:
3,654
import argparse import logging import os import sys import weakref from collections import OrderedDict from typing import Optional import torch from fvcore.nn.precise_bn import get_bn_modules from omegaconf import OmegaConf from torch.nn.parallel import DistributedDataParallel import detectron2.data.transforms as T from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import CfgNode, LazyConfig from detectron2.data import ( MetadataCatalog, build_detection_test_loader, build_detection_train_loader, ) from detectron2.evaluation import ( DatasetEvaluator, inference_on_dataset, print_csv_format, verify_results, ) from detectron2.modeling import build_model from detectron2.solver import build_lr_scheduler, build_optimizer from detectron2.utils import comm from detectron2.utils.collect_env import collect_env_info from detectron2.utils.env import seed_all_rng from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter from detectron2.utils.file_io import PathManager from detectron2.utils.logger import setup_logger from . import hooks from .train_loop import AMPTrainer, SimpleTrainer, TrainerBase def _try_get_key(cfg, *keys, default=None): """ Try select keys from cfg until the first key that exists. Otherwise return default. """ if isinstance(cfg, CfgNode): cfg = OmegaConf.create(cfg.dump()) for k in keys: none = object() p = OmegaConf.select(cfg, k, default=none) if p is not none: return p return default def _highlight(code, filename): try: import pygments except ImportError: return code from pygments.lexers import Python3Lexer, YamlLexer from pygments.formatters import Terminal256Formatter lexer = Python3Lexer() if filename.endswith(".py") else YamlLexer() code = pygments.highlight(code, lexer, Terminal256Formatter(style="monokai")) return code def collect_env_info(): has_gpu = torch.cuda.is_available() # true for both CUDA & ROCM torch_version = torch.__version__ # NOTE that CUDA_HOME/ROCM_HOME could be None even when CUDA runtime libs are functional from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME has_rocm = False if (getattr(torch.version, "hip", None) is not None) and (ROCM_HOME is not None): has_rocm = True has_cuda = has_gpu and (not has_rocm) data = [] data.append(("sys.platform", sys.platform)) # check-template.yml depends on it data.append(("Python", sys.version.replace("\n", ""))) data.append(("numpy", np.__version__)) try: import detectron2 # noqa data.append( ("detectron2", detectron2.__version__ + " @" + os.path.dirname(detectron2.__file__)) ) except ImportError: data.append(("detectron2", "failed to import")) except AttributeError: data.append(("detectron2", "imported a wrong installation")) try: import detectron2._C as _C except ImportError as e: data.append(("detectron2._C", f"not built correctly: {e}")) # print system compilers when extension fails to build if sys.platform != "win32": # don't know what to do for windows try: # this is how torch/utils/cpp_extensions.py choose compiler cxx = os.environ.get("CXX", "c++") cxx = subprocess.check_output("'{}' --version".format(cxx), shell=True) cxx = cxx.decode("utf-8").strip().split("\n")[0] except subprocess.SubprocessError: cxx = "Not found" data.append(("Compiler ($CXX)", cxx)) if has_cuda and CUDA_HOME is not None: try: nvcc = os.path.join(CUDA_HOME, "bin", "nvcc") nvcc = subprocess.check_output("'{}' -V".format(nvcc), shell=True) nvcc = nvcc.decode("utf-8").strip().split("\n")[-1] except subprocess.SubprocessError: nvcc = "Not found" data.append(("CUDA compiler", nvcc)) if has_cuda and sys.platform != "win32": try: so_file = importlib.util.find_spec("detectron2._C").origin except (ImportError, AttributeError): pass else: data.append( ("detectron2 arch flags", detect_compute_compatibility(CUDA_HOME, so_file)) ) else: # print compilers that are used to build extension data.append(("Compiler", _C.get_compiler_version())) data.append(("CUDA compiler", _C.get_cuda_version())) # cuda or hip if has_cuda and getattr(_C, "has_cuda", lambda: True)(): data.append( ("detectron2 arch flags", detect_compute_compatibility(CUDA_HOME, _C.__file__)) ) data.append(get_env_module()) data.append(("PyTorch", torch_version + " @" + os.path.dirname(torch.__file__))) data.append(("PyTorch debug build", torch.version.debug)) if not has_gpu: has_gpu_text = "No: torch.cuda.is_available() == False" else: has_gpu_text = "Yes" data.append(("GPU available", has_gpu_text)) if has_gpu: devices = defaultdict(list) for k in range(torch.cuda.device_count()): cap = ".".join((str(x) for x in torch.cuda.get_device_capability(k))) name = torch.cuda.get_device_name(k) + f" (arch={cap})" devices[name].append(str(k)) for name, devids in devices.items(): data.append(("GPU " + ",".join(devids), name)) if has_rocm: msg = " - invalid!" if not (ROCM_HOME and os.path.isdir(ROCM_HOME)) else "" data.append(("ROCM_HOME", str(ROCM_HOME) + msg)) else: try: from torch.utils.collect_env import get_nvidia_driver_version, run as _run data.append(("Driver version", get_nvidia_driver_version(_run))) except Exception: pass msg = " - invalid!" if not (CUDA_HOME and os.path.isdir(CUDA_HOME)) else "" data.append(("CUDA_HOME", str(CUDA_HOME) + msg)) cuda_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST", None) if cuda_arch_list: data.append(("TORCH_CUDA_ARCH_LIST", cuda_arch_list)) data.append(("Pillow", PIL.__version__)) try: data.append( ( "torchvision", str(torchvision.__version__) + " @" + os.path.dirname(torchvision.__file__), ) ) if has_cuda: try: torchvision_C = importlib.util.find_spec("torchvision._C").origin msg = detect_compute_compatibility(CUDA_HOME, torchvision_C) data.append(("torchvision arch flags", msg)) except (ImportError, AttributeError): data.append(("torchvision._C", "Not found")) except AttributeError: data.append(("torchvision", "unknown")) try: import fvcore data.append(("fvcore", fvcore.__version__)) except (ImportError, AttributeError): pass try: import iopath data.append(("iopath", iopath.__version__)) except (ImportError, AttributeError): pass try: import cv2 data.append(("cv2", cv2.__version__)) except (ImportError, AttributeError): data.append(("cv2", "Not found")) env_str = tabulate(data) + "\n" env_str += collect_torch_env() return env_str def seed_all_rng(seed=None): """ Set the random seed for the RNG in torch, numpy and python. Args: seed (int): if None, will use a strong random seed. """ if seed is None: seed = ( os.getpid() + int(datetime.now().strftime("%S%f")) + int.from_bytes(os.urandom(2), "big") ) logger = logging.getLogger(__name__) logger.info("Using a generated random seed {}".format(seed)) np.random.seed(seed) torch.manual_seed(seed) random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) PathManager = PathManagerBase() PathManager.register_handler(HTTPURLHandler()) PathManager.register_handler(OneDrivePathHandler()) PathManager.register_handler(Detectron2Handler()) def setup_logger( output=None, distributed_rank=0, *, color=True, name="detectron2", abbrev_name=None ): """ Initialize the detectron2 logger and set its verbosity level to "DEBUG". Args: output (str): a file name or a directory to save log. If None, will not save log file. If ends with ".txt" or ".log", assumed to be a file name. Otherwise, logs will be saved to `output/log.txt`. name (str): the root module name of this logger abbrev_name (str): an abbreviation of the module, to avoid long names in logs. Set to "" to not log the root module in logs. By default, will abbreviate "detectron2" to "d2" and leave other modules unchanged. Returns: logging.Logger: a logger """ logger = logging.getLogger(name) logger.setLevel(logging.DEBUG) logger.propagate = False if abbrev_name is None: abbrev_name = "d2" if name == "detectron2" else name plain_formatter = logging.Formatter( "[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S" ) # stdout logging: master only if distributed_rank == 0: ch = logging.StreamHandler(stream=sys.stdout) ch.setLevel(logging.DEBUG) if color: formatter = _ColorfulFormatter( colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s", datefmt="%m/%d %H:%M:%S", root_name=name, abbrev_name=str(abbrev_name), ) else: formatter = plain_formatter ch.setFormatter(formatter) logger.addHandler(ch) # file logging: all workers if output is not None: if output.endswith(".txt") or output.endswith(".log"): filename = output else: filename = os.path.join(output, "log.txt") if distributed_rank > 0: filename = filename + ".rank{}".format(distributed_rank) PathManager.mkdirs(os.path.dirname(filename)) fh = logging.StreamHandler(_cached_log_stream(filename)) fh.setLevel(logging.DEBUG) fh.setFormatter(plain_formatter) logger.addHandler(fh) return logger The provided code snippet includes necessary dependencies for implementing the `default_setup` function. Write a Python function `def default_setup(cfg, args)` to solve the following problem: Perform some basic common setups at the beginning of a job, including: 1. Set up the detectron2 logger 2. Log basic information about environment, cmdline arguments, and config 3. Backup the config to the output directory Args: cfg (CfgNode or omegaconf.DictConfig): the full config to be used args (argparse.NameSpace): the command line arguments to be logged Here is the function: def default_setup(cfg, args): """ Perform some basic common setups at the beginning of a job, including: 1. Set up the detectron2 logger 2. Log basic information about environment, cmdline arguments, and config 3. Backup the config to the output directory Args: cfg (CfgNode or omegaconf.DictConfig): the full config to be used args (argparse.NameSpace): the command line arguments to be logged """ output_dir = _try_get_key(cfg, "OUTPUT_DIR", "output_dir", "train.output_dir") if comm.is_main_process() and output_dir: PathManager.mkdirs(output_dir) rank = comm.get_rank() setup_logger(output_dir, distributed_rank=rank, name="fvcore") logger = setup_logger(output_dir, distributed_rank=rank) logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size())) logger.info("Environment info:\n" + collect_env_info()) logger.info("Command line arguments: " + str(args)) if hasattr(args, "config_file") and args.config_file != "": logger.info( "Contents of args.config_file={}:\n{}".format( args.config_file, _highlight(PathManager.open(args.config_file, "r").read(), args.config_file), ) ) if comm.is_main_process() and output_dir: # Note: some of our scripts may expect the existence of # config.yaml in output directory path = os.path.join(output_dir, "config.yaml") if isinstance(cfg, CfgNode): logger.info("Running with full config:\n{}".format(_highlight(cfg.dump(), ".yaml"))) with PathManager.open(path, "w") as f: f.write(cfg.dump()) else: LazyConfig.save(cfg, path) logger.info("Full config saved to {}".format(path)) # make sure each worker has a different, yet deterministic seed if specified seed = _try_get_key(cfg, "SEED", "train.seed", default=-1) seed_all_rng(None if seed < 0 else seed + rank) # cudnn benchmark has large overhead. It shouldn't be used considering the small size of # typical validation set. if not (hasattr(args, "eval_only") and args.eval_only): torch.backends.cudnn.benchmark = _try_get_key( cfg, "CUDNN_BENCHMARK", "train.cudnn_benchmark", default=False )
Perform some basic common setups at the beginning of a job, including: 1. Set up the detectron2 logger 2. Log basic information about environment, cmdline arguments, and config 3. Backup the config to the output directory Args: cfg (CfgNode or omegaconf.DictConfig): the full config to be used args (argparse.NameSpace): the command line arguments to be logged
3,655
import logging from datetime import timedelta import torch import torch.distributed as dist import torch.multiprocessing as mp from detectron2.utils import comm DEFAULT_TIMEOUT = timedelta(minutes=30) def _find_free_port(): import socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Binding to port 0 will cause the OS to find an available port for us sock.bind(("", 0)) port = sock.getsockname()[1] sock.close() # NOTE: there is still a chance the port could be taken by other processes. return port def _distributed_worker( local_rank, main_func, world_size, num_gpus_per_machine, machine_rank, dist_url, args, timeout=DEFAULT_TIMEOUT, ): assert torch.cuda.is_available(), "cuda is not available. Please check your installation." global_rank = machine_rank * num_gpus_per_machine + local_rank try: dist.init_process_group( backend="NCCL", init_method=dist_url, world_size=world_size, rank=global_rank, timeout=timeout, ) except Exception as e: logger = logging.getLogger(__name__) logger.error("Process group URL: {}".format(dist_url)) raise e # Setup the local process group (which contains ranks within the same machine) assert comm._LOCAL_PROCESS_GROUP is None num_machines = world_size // num_gpus_per_machine for i in range(num_machines): ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine)) pg = dist.new_group(ranks_on_i) if i == machine_rank: comm._LOCAL_PROCESS_GROUP = pg assert num_gpus_per_machine <= torch.cuda.device_count() torch.cuda.set_device(local_rank) # synchronize is needed here to prevent a possible timeout after calling init_process_group # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172 comm.synchronize() main_func(*args) The provided code snippet includes necessary dependencies for implementing the `launch` function. Write a Python function `def launch( main_func, num_gpus_per_machine, num_machines=1, machine_rank=0, dist_url=None, args=(), timeout=DEFAULT_TIMEOUT, )` to solve the following problem: Launch multi-gpu or distributed training. This function must be called on all machines involved in the training. It will spawn child processes (defined by ``num_gpus_per_machine``) on each machine. Args: main_func: a function that will be called by `main_func(*args)` num_gpus_per_machine (int): number of GPUs per machine num_machines (int): the total number of machines machine_rank (int): the rank of this machine dist_url (str): url to connect to for distributed jobs, including protocol e.g. "tcp://127.0.0.1:8686". Can be set to "auto" to automatically select a free port on localhost timeout (timedelta): timeout of the distributed workers args (tuple): arguments passed to main_func Here is the function: def launch( main_func, num_gpus_per_machine, num_machines=1, machine_rank=0, dist_url=None, args=(), timeout=DEFAULT_TIMEOUT, ): """ Launch multi-gpu or distributed training. This function must be called on all machines involved in the training. It will spawn child processes (defined by ``num_gpus_per_machine``) on each machine. Args: main_func: a function that will be called by `main_func(*args)` num_gpus_per_machine (int): number of GPUs per machine num_machines (int): the total number of machines machine_rank (int): the rank of this machine dist_url (str): url to connect to for distributed jobs, including protocol e.g. "tcp://127.0.0.1:8686". Can be set to "auto" to automatically select a free port on localhost timeout (timedelta): timeout of the distributed workers args (tuple): arguments passed to main_func """ world_size = num_machines * num_gpus_per_machine if world_size > 1: # https://github.com/pytorch/pytorch/pull/14391 # TODO prctl in spawned processes if dist_url == "auto": assert num_machines == 1, "dist_url=auto not supported in multi-machine jobs." port = _find_free_port() dist_url = f"tcp://127.0.0.1:{port}" if num_machines > 1 and dist_url.startswith("file://"): logger = logging.getLogger(__name__) logger.warning( "file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://" ) mp.spawn( _distributed_worker, nprocs=num_gpus_per_machine, args=( main_func, world_size, num_gpus_per_machine, machine_rank, dist_url, args, timeout, ), daemon=False, ) else: main_func(*args)
Launch multi-gpu or distributed training. This function must be called on all machines involved in the training. It will spawn child processes (defined by ``num_gpus_per_machine``) on each machine. Args: main_func: a function that will be called by `main_func(*args)` num_gpus_per_machine (int): number of GPUs per machine num_machines (int): the total number of machines machine_rank (int): the rank of this machine dist_url (str): url to connect to for distributed jobs, including protocol e.g. "tcp://127.0.0.1:8686". Can be set to "auto" to automatically select a free port on localhost timeout (timedelta): timeout of the distributed workers args (tuple): arguments passed to main_func
3,656
import numpy as np _COLORS = np.array( [ 0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494, 0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078, 0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000, 1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000, 0.333, 0.667, 0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000, 0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000, 1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000, 0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500, 0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667, 0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333, 0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000, 0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333, 0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000, 1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000, 1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.857, 0.857, 0.857, 1.000, 1.000, 1.000 ] ).astype(np.float32).reshape(-1, 3) The provided code snippet includes necessary dependencies for implementing the `colormap` function. Write a Python function `def colormap(rgb=False, maximum=255)` to solve the following problem: Args: rgb (bool): whether to return RGB colors or BGR colors. maximum (int): either 255 or 1 Returns: ndarray: a float32 array of Nx3 colors, in range [0, 255] or [0, 1] Here is the function: def colormap(rgb=False, maximum=255): """ Args: rgb (bool): whether to return RGB colors or BGR colors. maximum (int): either 255 or 1 Returns: ndarray: a float32 array of Nx3 colors, in range [0, 255] or [0, 1] """ assert maximum in [255, 1], maximum c = _COLORS * maximum if not rgb: c = c[:, ::-1] return c
Args: rgb (bool): whether to return RGB colors or BGR colors. maximum (int): either 255 or 1 Returns: ndarray: a float32 array of Nx3 colors, in range [0, 255] or [0, 1]
3,657
import numpy as np _COLORS = np.array( [ 0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494, 0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078, 0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000, 1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000, 0.333, 0.667, 0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000, 0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000, 1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000, 0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500, 0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667, 0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333, 0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000, 0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333, 0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000, 1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000, 1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.857, 0.857, 0.857, 1.000, 1.000, 1.000 ] ).astype(np.float32).reshape(-1, 3) The provided code snippet includes necessary dependencies for implementing the `random_color` function. Write a Python function `def random_color(rgb=False, maximum=255)` to solve the following problem: Args: rgb (bool): whether to return RGB colors or BGR colors. maximum (int): either 255 or 1 Returns: ndarray: a vector of 3 numbers Here is the function: def random_color(rgb=False, maximum=255): """ Args: rgb (bool): whether to return RGB colors or BGR colors. maximum (int): either 255 or 1 Returns: ndarray: a vector of 3 numbers """ idx = np.random.randint(0, len(_COLORS)) ret = _COLORS[idx] * maximum if not rgb: ret = ret[::-1] return ret
Args: rgb (bool): whether to return RGB colors or BGR colors. maximum (int): either 255 or 1 Returns: ndarray: a vector of 3 numbers
3,658
import importlib import numpy as np import os import re import subprocess import sys from collections import defaultdict import PIL import torch import torchvision from tabulate import tabulate def _test_nccl_worker(rank, num_gpu, dist_url): import torch.distributed as dist dist.init_process_group(backend="NCCL", init_method=dist_url, rank=rank, world_size=num_gpu) dist.barrier(device_ids=[rank]) def test_nccl_ops(): num_gpu = torch.cuda.device_count() if os.access("/tmp", os.W_OK): import torch.multiprocessing as mp dist_url = "file:///tmp/nccl_tmp_file" print("Testing NCCL connectivity ... this should not hang.") mp.spawn(_test_nccl_worker, nprocs=num_gpu, args=(num_gpu, dist_url), daemon=False) print("NCCL succeeded.")
null
3,659
import functools import numpy as np import torch import torch.distributed as dist _LOCAL_PROCESS_GROUP = None def get_world_size() -> int: if not dist.is_available(): return 1 if not dist.is_initialized(): return 1 return dist.get_world_size() The provided code snippet includes necessary dependencies for implementing the `get_local_size` function. Write a Python function `def get_local_size() -> int` to solve the following problem: Returns: The size of the per-machine process group, i.e. the number of processes per machine. Here is the function: def get_local_size() -> int: """ Returns: The size of the per-machine process group, i.e. the number of processes per machine. """ if not dist.is_available(): return 1 if not dist.is_initialized(): return 1 return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
Returns: The size of the per-machine process group, i.e. the number of processes per machine.
3,660
import functools import numpy as np import torch import torch.distributed as dist def get_world_size() -> int: if not dist.is_available(): return 1 if not dist.is_initialized(): return 1 return dist.get_world_size() def get_rank() -> int: if not dist.is_available(): return 0 if not dist.is_initialized(): return 0 return dist.get_rank() def _get_global_gloo_group(): """ Return a process group based on gloo backend, containing all the ranks The result is cached. """ if dist.get_backend() == "nccl": return dist.new_group(backend="gloo") else: return dist.group.WORLD The provided code snippet includes necessary dependencies for implementing the `gather` function. Write a Python function `def gather(data, dst=0, group=None)` to solve the following problem: Run gather on arbitrary picklable data (not necessarily tensors). Args: data: any picklable object dst (int): destination rank group: a torch process group. By default, will use a group which contains all ranks on gloo backend. Returns: list[data]: on dst, a list of data gathered from each rank. Otherwise, an empty list. Here is the function: def gather(data, dst=0, group=None): """ Run gather on arbitrary picklable data (not necessarily tensors). Args: data: any picklable object dst (int): destination rank group: a torch process group. By default, will use a group which contains all ranks on gloo backend. Returns: list[data]: on dst, a list of data gathered from each rank. Otherwise, an empty list. """ if get_world_size() == 1: return [data] if group is None: group = _get_global_gloo_group() world_size = dist.get_world_size(group=group) if world_size == 1: return [data] rank = dist.get_rank(group=group) if rank == dst: output = [None for _ in range(world_size)] dist.gather_object(data, output, dst=dst, group=group) return output else: dist.gather_object(data, None, dst=dst, group=group) return []
Run gather on arbitrary picklable data (not necessarily tensors). Args: data: any picklable object dst (int): destination rank group: a torch process group. By default, will use a group which contains all ranks on gloo backend. Returns: list[data]: on dst, a list of data gathered from each rank. Otherwise, an empty list.
3,661
import functools import numpy as np import torch import torch.distributed as dist def all_gather(data, group=None): """ Run all_gather on arbitrary picklable data (not necessarily tensors). Args: data: any picklable object group: a torch process group. By default, will use a group which contains all ranks on gloo backend. Returns: list[data]: list of data gathered from each rank """ if get_world_size() == 1: return [data] if group is None: group = _get_global_gloo_group() # use CPU group by default, to reduce GPU RAM usage. world_size = dist.get_world_size(group) if world_size == 1: return [data] output = [None for _ in range(world_size)] dist.all_gather_object(output, data, group=group) return output The provided code snippet includes necessary dependencies for implementing the `shared_random_seed` function. Write a Python function `def shared_random_seed()` to solve the following problem: Returns: int: a random number that is the same across all workers. If workers need a shared RNG, they can use this shared seed to create one. All workers must call this function, otherwise it will deadlock. Here is the function: def shared_random_seed(): """ Returns: int: a random number that is the same across all workers. If workers need a shared RNG, they can use this shared seed to create one. All workers must call this function, otherwise it will deadlock. """ ints = np.random.randint(2 ** 31) all_ints = all_gather(ints) return all_ints[0]
Returns: int: a random number that is the same across all workers. If workers need a shared RNG, they can use this shared seed to create one. All workers must call this function, otherwise it will deadlock.
3,662
import importlib import importlib.util import logging import numpy as np import os import random import sys from datetime import datetime import torch DOC_BUILDING = os.getenv("_DOC_BUILDING", False) The provided code snippet includes necessary dependencies for implementing the `fixup_module_metadata` function. Write a Python function `def fixup_module_metadata(module_name, namespace, keys=None)` to solve the following problem: Fix the __qualname__ of module members to be their exported api name, so when they are referenced in docs, sphinx can find them. Reference: https://github.com/python-trio/trio/blob/6754c74eacfad9cc5c92d5c24727a2f3b620624e/trio/_util.py#L216-L241 Here is the function: def fixup_module_metadata(module_name, namespace, keys=None): """ Fix the __qualname__ of module members to be their exported api name, so when they are referenced in docs, sphinx can find them. Reference: https://github.com/python-trio/trio/blob/6754c74eacfad9cc5c92d5c24727a2f3b620624e/trio/_util.py#L216-L241 """ if not DOC_BUILDING: return seen_ids = set() def fix_one(qualname, name, obj): # avoid infinite recursion (relevant when using # typing.Generic, for example) if id(obj) in seen_ids: return seen_ids.add(id(obj)) mod = getattr(obj, "__module__", None) if mod is not None and (mod.startswith(module_name) or mod.startswith("fvcore.")): obj.__module__ = module_name # Modules, unlike everything else in Python, put fully-qualitied # names into their __name__ attribute. We check for "." to avoid # rewriting these. if hasattr(obj, "__name__") and "." not in obj.__name__: obj.__name__ = name obj.__qualname__ = qualname if isinstance(obj, type): for attr_name, attr_value in obj.__dict__.items(): fix_one(objname + "." + attr_name, attr_name, attr_value) if keys is None: keys = namespace.keys() for objname in keys: if not objname.startswith("_"): obj = namespace[objname] fix_one(objname, objname, obj)
Fix the __qualname__ of module members to be their exported api name, so when they are referenced in docs, sphinx can find them. Reference: https://github.com/python-trio/trio/blob/6754c74eacfad9cc5c92d5c24727a2f3b620624e/trio/_util.py#L216-L241
3,663
import colorsys import logging import math import numpy as np from enum import Enum, unique import cv2 import matplotlib as mpl import matplotlib.colors as mplc import matplotlib.figure as mplfigure import pycocotools.mask as mask_util import torch from matplotlib.backends.backend_agg import FigureCanvasAgg from PIL import Image from detectron2.data import MetadataCatalog from detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes from detectron2.utils.file_io import PathManager from .colormap import random_color The provided code snippet includes necessary dependencies for implementing the `_create_text_labels` function. Write a Python function `def _create_text_labels(classes, scores, class_names, is_crowd=None)` to solve the following problem: Args: classes (list[int] or None): scores (list[float] or None): class_names (list[str] or None): is_crowd (list[bool] or None): Returns: list[str] or None Here is the function: def _create_text_labels(classes, scores, class_names, is_crowd=None): """ Args: classes (list[int] or None): scores (list[float] or None): class_names (list[str] or None): is_crowd (list[bool] or None): Returns: list[str] or None """ labels = None if classes is not None: if class_names is not None and len(class_names) > 0: labels = [class_names[i] for i in classes] else: labels = [str(i) for i in classes] if scores is not None: if labels is None: labels = ["{:.0f}%".format(s * 100) for s in scores] else: labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)] if labels is not None and is_crowd is not None: labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)] return labels
Args: classes (list[int] or None): scores (list[float] or None): class_names (list[str] or None): is_crowd (list[bool] or None): Returns: list[str] or None
3,664
import typing from typing import Any, List import fvcore from fvcore.nn import activation_count, flop_count, parameter_count, parameter_count_table from torch import nn from detectron2.export import TracingAdapter class FlopCountAnalysis(fvcore.nn.FlopCountAnalysis): """ Same as :class:`fvcore.nn.FlopCountAnalysis`, but supports detectron2 models. """ def __init__(self, model, inputs): """ Args: model (nn.Module): inputs (Any): inputs of the given model. Does not have to be tuple of tensors. """ wrapper = TracingAdapter(model, inputs, allow_non_tensor=True) super().__init__(wrapper, wrapper.flattened_inputs) self.set_op_handle(**{k: None for k in _IGNORED_OPS}) The provided code snippet includes necessary dependencies for implementing the `flop_count_operators` function. Write a Python function `def flop_count_operators(model: nn.Module, inputs: list) -> typing.DefaultDict[str, float]` to solve the following problem: Implement operator-level flops counting using jit. This is a wrapper of :func:`fvcore.nn.flop_count` and adds supports for standard detection models in detectron2. Please use :class:`FlopCountAnalysis` for more advanced functionalities. Note: The function runs the input through the model to compute flops. The flops of a detection model is often input-dependent, for example, the flops of box & mask head depends on the number of proposals & the number of detected objects. Therefore, the flops counting using a single input may not accurately reflect the computation cost of a model. It's recommended to average across a number of inputs. Args: model: a detectron2 model that takes `list[dict]` as input. inputs (list[dict]): inputs to model, in detectron2's standard format. Only "image" key will be used. supported_ops (dict[str, Handle]): see documentation of :func:`fvcore.nn.flop_count` Returns: Counter: Gflop count per operator Here is the function: def flop_count_operators(model: nn.Module, inputs: list) -> typing.DefaultDict[str, float]: """ Implement operator-level flops counting using jit. This is a wrapper of :func:`fvcore.nn.flop_count` and adds supports for standard detection models in detectron2. Please use :class:`FlopCountAnalysis` for more advanced functionalities. Note: The function runs the input through the model to compute flops. The flops of a detection model is often input-dependent, for example, the flops of box & mask head depends on the number of proposals & the number of detected objects. Therefore, the flops counting using a single input may not accurately reflect the computation cost of a model. It's recommended to average across a number of inputs. Args: model: a detectron2 model that takes `list[dict]` as input. inputs (list[dict]): inputs to model, in detectron2's standard format. Only "image" key will be used. supported_ops (dict[str, Handle]): see documentation of :func:`fvcore.nn.flop_count` Returns: Counter: Gflop count per operator """ old_train = model.training model.eval() ret = FlopCountAnalysis(model, inputs).by_operator() model.train(old_train) return {k: v / 1e9 for k, v in ret.items()}
Implement operator-level flops counting using jit. This is a wrapper of :func:`fvcore.nn.flop_count` and adds supports for standard detection models in detectron2. Please use :class:`FlopCountAnalysis` for more advanced functionalities. Note: The function runs the input through the model to compute flops. The flops of a detection model is often input-dependent, for example, the flops of box & mask head depends on the number of proposals & the number of detected objects. Therefore, the flops counting using a single input may not accurately reflect the computation cost of a model. It's recommended to average across a number of inputs. Args: model: a detectron2 model that takes `list[dict]` as input. inputs (list[dict]): inputs to model, in detectron2's standard format. Only "image" key will be used. supported_ops (dict[str, Handle]): see documentation of :func:`fvcore.nn.flop_count` Returns: Counter: Gflop count per operator
3,665
import typing from typing import Any, List import fvcore from fvcore.nn import activation_count, flop_count, parameter_count, parameter_count_table from torch import nn from detectron2.export import TracingAdapter The provided code snippet includes necessary dependencies for implementing the `find_unused_parameters` function. Write a Python function `def find_unused_parameters(model: nn.Module, inputs: Any) -> List[str]` to solve the following problem: Given a model, find parameters that do not contribute to the loss. Args: model: a model in training mode that returns losses inputs: argument or a tuple of arguments. Inputs of the model Returns: list[str]: the name of unused parameters Here is the function: def find_unused_parameters(model: nn.Module, inputs: Any) -> List[str]: """ Given a model, find parameters that do not contribute to the loss. Args: model: a model in training mode that returns losses inputs: argument or a tuple of arguments. Inputs of the model Returns: list[str]: the name of unused parameters """ assert model.training for _, prm in model.named_parameters(): prm.grad = None if isinstance(inputs, tuple): losses = model(*inputs) else: losses = model(inputs) if isinstance(losses, dict): losses = sum(losses.values()) losses.backward() unused: List[str] = [] for name, prm in model.named_parameters(): if prm.grad is None: unused.append(name) prm.grad = None return unused
Given a model, find parameters that do not contribute to the loss. Args: model: a model in training mode that returns losses inputs: argument or a tuple of arguments. Inputs of the model Returns: list[str]: the name of unused parameters
3,666
import logging from contextlib import contextmanager from functools import wraps import torch def _ignore_torch_cuda_oom(): """ A context which ignores CUDA OOM exception from pytorch. """ try: yield except RuntimeError as e: # NOTE: the string may change? if "CUDA out of memory. " in str(e): pass else: raise The provided code snippet includes necessary dependencies for implementing the `retry_if_cuda_oom` function. Write a Python function `def retry_if_cuda_oom(func)` to solve the following problem: Makes a function retry itself after encountering pytorch's CUDA OOM error. It will first retry after calling `torch.cuda.empty_cache()`. If that still fails, it will then retry by trying to convert inputs to CPUs. In this case, it expects the function to dispatch to CPU implementation. The return values may become CPU tensors as well and it's user's responsibility to convert it back to CUDA tensor if needed. Args: func: a stateless callable that takes tensor-like objects as arguments Returns: a callable which retries `func` if OOM is encountered. Examples: :: output = retry_if_cuda_oom(some_torch_function)(input1, input2) # output may be on CPU even if inputs are on GPU Note: 1. When converting inputs to CPU, it will only look at each argument and check if it has `.device` and `.to` for conversion. Nested structures of tensors are not supported. 2. Since the function might be called more than once, it has to be stateless. Here is the function: def retry_if_cuda_oom(func): """ Makes a function retry itself after encountering pytorch's CUDA OOM error. It will first retry after calling `torch.cuda.empty_cache()`. If that still fails, it will then retry by trying to convert inputs to CPUs. In this case, it expects the function to dispatch to CPU implementation. The return values may become CPU tensors as well and it's user's responsibility to convert it back to CUDA tensor if needed. Args: func: a stateless callable that takes tensor-like objects as arguments Returns: a callable which retries `func` if OOM is encountered. Examples: :: output = retry_if_cuda_oom(some_torch_function)(input1, input2) # output may be on CPU even if inputs are on GPU Note: 1. When converting inputs to CPU, it will only look at each argument and check if it has `.device` and `.to` for conversion. Nested structures of tensors are not supported. 2. Since the function might be called more than once, it has to be stateless. """ def maybe_to_cpu(x): try: like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to") except AttributeError: like_gpu_tensor = False if like_gpu_tensor: return x.to(device="cpu") else: return x @wraps(func) def wrapped(*args, **kwargs): with _ignore_torch_cuda_oom(): return func(*args, **kwargs) # Clear cache and retry torch.cuda.empty_cache() with _ignore_torch_cuda_oom(): return func(*args, **kwargs) # Try on CPU. This slows down the code significantly, therefore print a notice. logger = logging.getLogger(__name__) logger.info("Attempting to copy inputs of {} to CPU due to CUDA OOM".format(str(func))) new_args = (maybe_to_cpu(x) for x in args) new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()} return func(*new_args, **new_kwargs) return wrapped
Makes a function retry itself after encountering pytorch's CUDA OOM error. It will first retry after calling `torch.cuda.empty_cache()`. If that still fails, it will then retry by trying to convert inputs to CPUs. In this case, it expects the function to dispatch to CPU implementation. The return values may become CPU tensors as well and it's user's responsibility to convert it back to CUDA tensor if needed. Args: func: a stateless callable that takes tensor-like objects as arguments Returns: a callable which retries `func` if OOM is encountered. Examples: :: output = retry_if_cuda_oom(some_torch_function)(input1, input2) # output may be on CPU even if inputs are on GPU Note: 1. When converting inputs to CPU, it will only look at each argument and check if it has `.device` and `.to` for conversion. Nested structures of tensors are not supported. 2. Since the function might be called more than once, it has to be stateless.
3,667
import atexit import functools import logging import os import sys import time from collections import Counter import torch from tabulate import tabulate from termcolor import colored from detectron2.utils.file_io import PathManager def _find_caller(): """ Returns: str: module name of the caller tuple: a hashable key to be used to identify different callers """ frame = sys._getframe(2) while frame: code = frame.f_code if os.path.join("utils", "logger.") not in code.co_filename: mod_name = frame.f_globals["__name__"] if mod_name == "__main__": mod_name = "detectron2" return mod_name, (code.co_filename, frame.f_lineno, code.co_name) frame = frame.f_back _LOG_COUNTER = Counter() The provided code snippet includes necessary dependencies for implementing the `log_every_n` function. Write a Python function `def log_every_n(lvl, msg, n=1, *, name=None)` to solve the following problem: Log once per n times. Args: lvl (int): the logging level msg (str): n (int): name (str): name of the logger to use. Will use the caller's module by default. Here is the function: def log_every_n(lvl, msg, n=1, *, name=None): """ Log once per n times. Args: lvl (int): the logging level msg (str): n (int): name (str): name of the logger to use. Will use the caller's module by default. """ caller_module, key = _find_caller() _LOG_COUNTER[key] += 1 if n == 1 or _LOG_COUNTER[key] % n == 1: logging.getLogger(name or caller_module).log(lvl, msg)
Log once per n times. Args: lvl (int): the logging level msg (str): n (int): name (str): name of the logger to use. Will use the caller's module by default.
3,668
import logging import os from collections import OrderedDict import torch from torch.nn.parallel import DistributedDataParallel import time import datetime import json from fvcore.common.timer import Timer import detectron2.utils.comm as comm from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer from detectron2.config import get_cfg from detectron2.data import ( MetadataCatalog, build_detection_test_loader, ) from detectron2.engine import default_argument_parser, default_setup, launch from detectron2.evaluation import ( COCOEvaluator, LVISEvaluator, inference_on_dataset, print_csv_format, ) from detectron2.modeling import build_model from detectron2.solver import build_lr_scheduler, build_optimizer from detectron2.utils.events import ( CommonMetricPrinter, EventStorage, JSONWriter, TensorboardXWriter, ) from detectron2.modeling.test_time_augmentation import GeneralizedRCNNWithTTA from detectron2.data.dataset_mapper import DatasetMapper from detectron2.data.build import build_detection_train_loader from centernet.config import add_centernet_config from centernet.data.custom_build_augmentation import build_custom_augmentation logger = logging.getLogger("detectron2") def do_test(cfg, model): results = OrderedDict() for dataset_name in cfg.DATASETS.TEST: mapper = None if cfg.INPUT.TEST_INPUT_TYPE == 'default' else \ DatasetMapper( cfg, False, augmentations=build_custom_augmentation(cfg, False)) data_loader = build_detection_test_loader(cfg, dataset_name, mapper=mapper) output_folder = os.path.join( cfg.OUTPUT_DIR, "inference_{}".format(dataset_name)) evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type if evaluator_type == "lvis": evaluator = LVISEvaluator(dataset_name, cfg, True, output_folder) elif evaluator_type == 'coco': evaluator = COCOEvaluator(dataset_name, cfg, True, output_folder) else: assert 0, evaluator_type results[dataset_name] = inference_on_dataset( model, data_loader, evaluator) if comm.is_main_process(): logger.info("Evaluation results for {} in csv format:".format( dataset_name)) print_csv_format(results[dataset_name]) if len(results) == 1: results = list(results.values())[0] return results class JSONWriter(EventWriter): """ Write scalars to a json file. It saves scalars as one json per line (instead of a big json) for easy parsing. Examples parsing such a json file: :: $ cat metrics.json | jq -s '.[0:2]' [ { "data_time": 0.008433341979980469, "iteration": 19, "loss": 1.9228371381759644, "loss_box_reg": 0.050025828182697296, "loss_classifier": 0.5316952466964722, "loss_mask": 0.7236229181289673, "loss_rpn_box": 0.0856662318110466, "loss_rpn_cls": 0.48198649287223816, "lr": 0.007173333333333333, "time": 0.25401854515075684 }, { "data_time": 0.007216215133666992, "iteration": 39, "loss": 1.282649278640747, "loss_box_reg": 0.06222952902317047, "loss_classifier": 0.30682939291000366, "loss_mask": 0.6970193982124329, "loss_rpn_box": 0.038663312792778015, "loss_rpn_cls": 0.1471673548221588, "lr": 0.007706666666666667, "time": 0.2490077018737793 } ] $ cat metrics.json | jq '.loss_mask' 0.7126231789588928 0.689423680305481 0.6776131987571716 ... """ def __init__(self, json_file, window_size=20): """ Args: json_file (str): path to the json file. New data will be appended if the file exists. window_size (int): the window size of median smoothing for the scalars whose `smoothing_hint` are True. """ self._file_handle = PathManager.open(json_file, "a") self._window_size = window_size self._last_write = -1 def write(self): storage = get_event_storage() to_save = defaultdict(dict) for k, (v, iter) in storage.latest_with_smoothing_hint(self._window_size).items(): # keep scalars that have not been written if iter <= self._last_write: continue to_save[iter][k] = v if len(to_save): all_iters = sorted(to_save.keys()) self._last_write = max(all_iters) for itr, scalars_per_iter in to_save.items(): scalars_per_iter["iteration"] = itr self._file_handle.write(json.dumps(scalars_per_iter, sort_keys=True) + "\n") self._file_handle.flush() try: os.fsync(self._file_handle.fileno()) except AttributeError: pass def close(self): self._file_handle.close() class TensorboardXWriter(EventWriter): """ Write all scalars to a tensorboard file. """ def __init__(self, log_dir: str, window_size: int = 20, **kwargs): """ Args: log_dir (str): the directory to save the output events window_size (int): the scalars will be median-smoothed by this window size kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)` """ self._window_size = window_size from torch.utils.tensorboard import SummaryWriter self._writer = SummaryWriter(log_dir, **kwargs) self._last_write = -1 def write(self): storage = get_event_storage() new_last_write = self._last_write for k, (v, iter) in storage.latest_with_smoothing_hint(self._window_size).items(): if iter > self._last_write: self._writer.add_scalar(k, v, iter) new_last_write = max(new_last_write, iter) self._last_write = new_last_write # storage.put_{image,histogram} is only meant to be used by # tensorboard writer. So we access its internal fields directly from here. if len(storage._vis_data) >= 1: for img_name, img, step_num in storage._vis_data: self._writer.add_image(img_name, img, step_num) # Storage stores all image data and rely on this writer to clear them. # As a result it assumes only one writer will use its image data. # An alternative design is to let storage store limited recent # data (e.g. only the most recent image) that all writers can access. # In that case a writer may not see all image data if its period is long. storage.clear_images() if len(storage._histograms) >= 1: for params in storage._histograms: self._writer.add_histogram_raw(**params) storage.clear_histograms() def close(self): if hasattr(self, "_writer"): # doesn't exist when the code fails at import self._writer.close() class CommonMetricPrinter(EventWriter): """ Print **common** metrics to the terminal, including iteration time, ETA, memory, all losses, and the learning rate. It also applies smoothing using a window of 20 elements. It's meant to print common metrics in common ways. To print something in more customized ways, please implement a similar printer by yourself. """ def __init__(self, max_iter: Optional[int] = None, window_size: int = 20): """ Args: max_iter: the maximum number of iterations to train. Used to compute ETA. If not given, ETA will not be printed. window_size (int): the losses will be median-smoothed by this window size """ self.logger = logging.getLogger(__name__) self._max_iter = max_iter self._window_size = window_size self._last_write = None # (step, time) of last call to write(). Used to compute ETA def _get_eta(self, storage) -> Optional[str]: if self._max_iter is None: return "" iteration = storage.iter try: eta_seconds = storage.history("time").median(1000) * (self._max_iter - iteration - 1) storage.put_scalar("eta_seconds", eta_seconds, smoothing_hint=False) return str(datetime.timedelta(seconds=int(eta_seconds))) except KeyError: # estimate eta on our own - more noisy eta_string = None if self._last_write is not None: estimate_iter_time = (time.perf_counter() - self._last_write[1]) / ( iteration - self._last_write[0] ) eta_seconds = estimate_iter_time * (self._max_iter - iteration - 1) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) self._last_write = (iteration, time.perf_counter()) return eta_string def write(self): storage = get_event_storage() iteration = storage.iter if iteration == self._max_iter: # This hook only reports training progress (loss, ETA, etc) but not other data, # therefore do not write anything after training succeeds, even if this method # is called. return try: data_time = storage.history("data_time").avg(20) except KeyError: # they may not exist in the first few iterations (due to warmup) # or when SimpleTrainer is not used data_time = None try: iter_time = storage.history("time").global_avg() except KeyError: iter_time = None try: lr = "{:.5g}".format(storage.history("lr").latest()) except KeyError: lr = "N/A" eta_string = self._get_eta(storage) if torch.cuda.is_available(): max_mem_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0 else: max_mem_mb = None # NOTE: max_mem is parsed by grep in "dev/parse_results.sh" self.logger.info( " {eta}iter: {iter} {losses} {time}{data_time}lr: {lr} {memory}".format( eta=f"eta: {eta_string} " if eta_string else "", iter=iteration, losses=" ".join( [ "{}: {:.4g}".format(k, v.median(self._window_size)) for k, v in storage.histories().items() if "loss" in k ] ), time="time: {:.4f} ".format(iter_time) if iter_time is not None else "", data_time="data_time: {:.4f} ".format(data_time) if data_time is not None else "", lr=lr, memory="max_mem: {:.0f}M".format(max_mem_mb) if max_mem_mb is not None else "", ) ) class EventStorage: """ The user-facing class that provides metric storage functionalities. In the future we may add support for storing / logging other types of data if needed. """ def __init__(self, start_iter=0): """ Args: start_iter (int): the iteration number to start with """ self._history = defaultdict(HistoryBuffer) self._smoothing_hints = {} self._latest_scalars = {} self._iter = start_iter self._current_prefix = "" self._vis_data = [] self._histograms = [] def put_image(self, img_name, img_tensor): """ Add an `img_tensor` associated with `img_name`, to be shown on tensorboard. Args: img_name (str): The name of the image to put into tensorboard. img_tensor (torch.Tensor or numpy.array): An `uint8` or `float` Tensor of shape `[channel, height, width]` where `channel` is 3. The image format should be RGB. The elements in img_tensor can either have values in [0, 1] (float32) or [0, 255] (uint8). The `img_tensor` will be visualized in tensorboard. """ self._vis_data.append((img_name, img_tensor, self._iter)) def put_scalar(self, name, value, smoothing_hint=True): """ Add a scalar `value` to the `HistoryBuffer` associated with `name`. Args: smoothing_hint (bool): a 'hint' on whether this scalar is noisy and should be smoothed when logged. The hint will be accessible through :meth:`EventStorage.smoothing_hints`. A writer may ignore the hint and apply custom smoothing rule. It defaults to True because most scalars we save need to be smoothed to provide any useful signal. """ name = self._current_prefix + name history = self._history[name] value = float(value) history.update(value, self._iter) self._latest_scalars[name] = (value, self._iter) existing_hint = self._smoothing_hints.get(name) if existing_hint is not None: assert ( existing_hint == smoothing_hint ), "Scalar {} was put with a different smoothing_hint!".format(name) else: self._smoothing_hints[name] = smoothing_hint def put_scalars(self, *, smoothing_hint=True, **kwargs): """ Put multiple scalars from keyword arguments. Examples: storage.put_scalars(loss=my_loss, accuracy=my_accuracy, smoothing_hint=True) """ for k, v in kwargs.items(): self.put_scalar(k, v, smoothing_hint=smoothing_hint) def put_histogram(self, hist_name, hist_tensor, bins=1000): """ Create a histogram from a tensor. Args: hist_name (str): The name of the histogram to put into tensorboard. hist_tensor (torch.Tensor): A Tensor of arbitrary shape to be converted into a histogram. bins (int): Number of histogram bins. """ ht_min, ht_max = hist_tensor.min().item(), hist_tensor.max().item() # Create a histogram with PyTorch hist_counts = torch.histc(hist_tensor, bins=bins) hist_edges = torch.linspace(start=ht_min, end=ht_max, steps=bins + 1, dtype=torch.float32) # Parameter for the add_histogram_raw function of SummaryWriter hist_params = dict( tag=hist_name, min=ht_min, max=ht_max, num=len(hist_tensor), sum=float(hist_tensor.sum()), sum_squares=float(torch.sum(hist_tensor ** 2)), bucket_limits=hist_edges[1:].tolist(), bucket_counts=hist_counts.tolist(), global_step=self._iter, ) self._histograms.append(hist_params) def history(self, name): """ Returns: HistoryBuffer: the scalar history for name """ ret = self._history.get(name, None) if ret is None: raise KeyError("No history metric available for {}!".format(name)) return ret def histories(self): """ Returns: dict[name -> HistoryBuffer]: the HistoryBuffer for all scalars """ return self._history def latest(self): """ Returns: dict[str -> (float, int)]: mapping from the name of each scalar to the most recent value and the iteration number its added. """ return self._latest_scalars def latest_with_smoothing_hint(self, window_size=20): """ Similar to :meth:`latest`, but the returned values are either the un-smoothed original latest value, or a median of the given window_size, depend on whether the smoothing_hint is True. This provides a default behavior that other writers can use. """ result = {} for k, (v, itr) in self._latest_scalars.items(): result[k] = ( self._history[k].median(window_size) if self._smoothing_hints[k] else v, itr, ) return result def smoothing_hints(self): """ Returns: dict[name -> bool]: the user-provided hint on whether the scalar is noisy and needs smoothing. """ return self._smoothing_hints def step(self): """ User should either: (1) Call this function to increment storage.iter when needed. Or (2) Set `storage.iter` to the correct iteration number before each iteration. The storage will then be able to associate the new data with an iteration number. """ self._iter += 1 def iter(self): """ Returns: int: The current iteration number. When used together with a trainer, this is ensured to be the same as trainer.iter. """ return self._iter def iter(self, val): self._iter = int(val) def iteration(self): # for backward compatibility return self._iter def __enter__(self): _CURRENT_STORAGE_STACK.append(self) return self def __exit__(self, exc_type, exc_val, exc_tb): assert _CURRENT_STORAGE_STACK[-1] == self _CURRENT_STORAGE_STACK.pop() def name_scope(self, name): """ Yields: A context within which all the events added to this storage will be prefixed by the name scope. """ old_prefix = self._current_prefix self._current_prefix = name.rstrip("/") + "/" yield self._current_prefix = old_prefix def clear_images(self): """ Delete all the stored images for visualization. This should be called after images are written to tensorboard. """ self._vis_data = [] def clear_histograms(self): """ Delete all the stored histograms for visualization. This should be called after histograms are written to tensorboard. """ self._histograms = [] class DatasetMapper: """ A callable which takes a dataset dict in Detectron2 Dataset format, and map it into a format used by the model. This is the default callable to be used to map your dataset dict into training data. You may need to follow it to implement your own one for customized logic, such as a different way to read or transform images. See :doc:`/tutorials/data_loading` for details. The callable currently does the following: 1. Read the image from "file_name" 2. Applies cropping/geometric transforms to the image and annotations 3. Prepare data and annotations to Tensor and :class:`Instances` """ def __init__( self, is_train: bool, *, augmentations: List[Union[T.Augmentation, T.Transform]], image_format: str, use_instance_mask: bool = False, use_keypoint: bool = False, instance_mask_format: str = "polygon", keypoint_hflip_indices: Optional[np.ndarray] = None, precomputed_proposal_topk: Optional[int] = None, recompute_boxes: bool = False, ): """ NOTE: this interface is experimental. Args: is_train: whether it's used in training or inference augmentations: a list of augmentations or deterministic transforms to apply image_format: an image format supported by :func:`detection_utils.read_image`. use_instance_mask: whether to process instance segmentation annotations, if available use_keypoint: whether to process keypoint annotations if available instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation masks into this format. keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices` precomputed_proposal_topk: if given, will load pre-computed proposals from dataset_dict and keep the top k proposals for each image. recompute_boxes: whether to overwrite bounding box annotations by computing tight bounding boxes from instance mask annotations. """ if recompute_boxes: assert use_instance_mask, "recompute_boxes requires instance masks" # fmt: off self.is_train = is_train self.augmentations = T.AugmentationList(augmentations) self.image_format = image_format self.use_instance_mask = use_instance_mask self.instance_mask_format = instance_mask_format self.use_keypoint = use_keypoint self.keypoint_hflip_indices = keypoint_hflip_indices self.proposal_topk = precomputed_proposal_topk self.recompute_boxes = recompute_boxes # fmt: on logger = logging.getLogger(__name__) mode = "training" if is_train else "inference" logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}") def from_config(cls, cfg, is_train: bool = True): augs = utils.build_augmentation(cfg, is_train) if cfg.INPUT.CROP.ENABLED and is_train: augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)) recompute_boxes = cfg.MODEL.MASK_ON else: recompute_boxes = False ret = { "is_train": is_train, "augmentations": augs, "image_format": cfg.INPUT.FORMAT, "use_instance_mask": cfg.MODEL.MASK_ON, "instance_mask_format": cfg.INPUT.MASK_FORMAT, "use_keypoint": cfg.MODEL.KEYPOINT_ON, "recompute_boxes": recompute_boxes, } if cfg.MODEL.KEYPOINT_ON: ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN) if cfg.MODEL.LOAD_PROPOSALS: ret["precomputed_proposal_topk"] = ( cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN if is_train else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST ) return ret def _transform_annotations(self, dataset_dict, transforms, image_shape): # USER: Modify this if you want to keep them for some reason. for anno in dataset_dict["annotations"]: if not self.use_instance_mask: anno.pop("segmentation", None) if not self.use_keypoint: anno.pop("keypoints", None) # USER: Implement additional transformations if you have other types of data annos = [ utils.transform_instance_annotations( obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices ) for obj in dataset_dict.pop("annotations") if obj.get("iscrowd", 0) == 0 ] instances = utils.annotations_to_instances( annos, image_shape, mask_format=self.instance_mask_format ) # After transforms such as cropping are applied, the bounding box may no longer # tightly bound the object. As an example, imagine a triangle object # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to # the intersection of original bounding box and the cropping box. if self.recompute_boxes: instances.gt_boxes = instances.gt_masks.get_bounding_boxes() dataset_dict["instances"] = utils.filter_empty_instances(instances) def __call__(self, dataset_dict): """ Args: dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. Returns: dict: a format that builtin models in detectron2 accept """ dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below # USER: Write your own image loading if it's not from a file image = utils.read_image(dataset_dict["file_name"], format=self.image_format) utils.check_image_size(dataset_dict, image) # USER: Remove if you don't do semantic/panoptic segmentation. if "sem_seg_file_name" in dataset_dict: sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2) else: sem_seg_gt = None aug_input = T.AugInput(image, sem_seg=sem_seg_gt) transforms = self.augmentations(aug_input) image, sem_seg_gt = aug_input.image, aug_input.sem_seg image_shape = image.shape[:2] # h, w # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, # but not efficient on large generic data structures due to the use of pickle & mp.Queue. # Therefore it's important to use torch.Tensor. dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) if sem_seg_gt is not None: dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long")) # USER: Remove if you don't use pre-computed proposals. # Most users would not need this feature. if self.proposal_topk is not None: utils.transform_proposals( dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk ) if not self.is_train: # USER: Modify this if you want to keep them for some reason. dataset_dict.pop("annotations", None) dataset_dict.pop("sem_seg_file_name", None) return dataset_dict if "annotations" in dataset_dict: self._transform_annotations(dataset_dict, transforms, image_shape) return dataset_dict def build_detection_train_loader( dataset, *, mapper, sampler=None, total_batch_size, aspect_ratio_grouping=True, num_workers=0, collate_fn=None, ): """ Build a dataloader for object detection with some default features. Args: dataset (list or torch.utils.data.Dataset): a list of dataset dicts, or a pytorch dataset (either map-style or iterable). It can be obtained by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. mapper (callable): a callable which takes a sample (dict) from dataset and returns the format to be consumed by the model. When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``. sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices to be applied on ``dataset``. If ``dataset`` is map-style, the default sampler is a :class:`TrainingSampler`, which coordinates an infinite random shuffle sequence across all workers. Sampler must be None if ``dataset`` is iterable. total_batch_size (int): total batch size across all workers. aspect_ratio_grouping (bool): whether to group images with similar aspect ratio for efficiency. When enabled, it requires each element in dataset be a dict with keys "width" and "height". num_workers (int): number of parallel data loading workers collate_fn: a function that determines how to do batching, same as the argument of `torch.utils.data.DataLoader`. Defaults to do no collation and return a list of data. No collation is OK for small batch size and simple data structures. If your batch size is large and each sample contains too many small tensors, it's more efficient to collate them in data loader. Returns: torch.utils.data.DataLoader: a dataloader. Each output from it is a ``list[mapped_element]`` of length ``total_batch_size / num_workers``, where ``mapped_element`` is produced by the ``mapper``. """ if isinstance(dataset, list): dataset = DatasetFromList(dataset, copy=False) if mapper is not None: dataset = MapDataset(dataset, mapper) if isinstance(dataset, torchdata.IterableDataset): assert sampler is None, "sampler must be None if dataset is IterableDataset" else: if sampler is None: sampler = TrainingSampler(len(dataset)) assert isinstance(sampler, torchdata.Sampler), f"Expect a Sampler but got {type(sampler)}" return build_batch_data_loader( dataset, sampler, total_batch_size, aspect_ratio_grouping=aspect_ratio_grouping, num_workers=num_workers, collate_fn=collate_fn, ) def build_custom_augmentation(cfg, is_train): """ Create a list of default :class:`Augmentation` from config. Now it includes resizing and flipping. Returns: list[Augmentation] """ if cfg.INPUT.CUSTOM_AUG == 'ResizeShortestEdge': if is_train: min_size = cfg.INPUT.MIN_SIZE_TRAIN max_size = cfg.INPUT.MAX_SIZE_TRAIN sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING else: min_size = cfg.INPUT.MIN_SIZE_TEST max_size = cfg.INPUT.MAX_SIZE_TEST sample_style = "choice" augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)] elif cfg.INPUT.CUSTOM_AUG == 'EfficientDetResizeCrop': if is_train: scale = cfg.INPUT.SCALE_RANGE size = cfg.INPUT.TRAIN_SIZE else: scale = (1, 1) size = cfg.INPUT.TEST_SIZE augmentation = [EfficientDetResizeCrop(size, scale)] else: assert 0, cfg.INPUT.CUSTOM_AUG if is_train: augmentation.append(T.RandomFlip()) return augmentation def build_custom_train_loader(cfg, mapper=None): """ Modified from detectron2.data.build.build_custom_train_loader, but supports different samplers """ source_aware = cfg.DATALOADER.SOURCE_AWARE if source_aware: dataset_dicts = get_detection_dataset_dicts_with_source( cfg.DATASETS.TRAIN, filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.MODEL.KEYPOINT_ON else 0, proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, ) sizes = [0 for _ in range(len(cfg.DATASETS.TRAIN))] for d in dataset_dicts: sizes[d['dataset_source']] += 1 print('dataset sizes', sizes) else: dataset_dicts = get_detection_dataset_dicts( cfg.DATASETS.TRAIN, filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.MODEL.KEYPOINT_ON else 0, proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, ) dataset = DatasetFromList(dataset_dicts, copy=False) if mapper is None: assert 0 # mapper = DatasetMapper(cfg, True) dataset = MapDataset(dataset, mapper) sampler_name = cfg.DATALOADER.SAMPLER_TRAIN logger = logging.getLogger(__name__) logger.info("Using training sampler {}".format(sampler_name)) # TODO avoid if-else? if sampler_name == "TrainingSampler": sampler = TrainingSampler(len(dataset)) elif sampler_name == "MultiDatasetSampler": assert source_aware sampler = MultiDatasetSampler(cfg, sizes, dataset_dicts) elif sampler_name == "RepeatFactorTrainingSampler": repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency( dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD ) sampler = RepeatFactorTrainingSampler(repeat_factors) elif sampler_name == "ClassAwareSampler": sampler = ClassAwareSampler(dataset_dicts) else: raise ValueError("Unknown training sampler: {}".format(sampler_name)) return build_batch_data_loader( dataset, sampler, cfg.SOLVER.IMS_PER_BATCH, aspect_ratio_grouping=cfg.DATALOADER.ASPECT_RATIO_GROUPING, num_workers=cfg.DATALOADER.NUM_WORKERS, ) def do_train(cfg, model, resume=False): model.train() optimizer = build_optimizer(cfg, model) scheduler = build_lr_scheduler(cfg, optimizer) checkpointer = DetectionCheckpointer( model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler ) start_iter = ( checkpointer.resume_or_load( cfg.MODEL.WEIGHTS, resume=resume, ).get("iteration", -1) + 1 ) if cfg.SOLVER.RESET_ITER: logger.info('Reset loaded iteration. Start training from iteration 0.') start_iter = 0 max_iter = cfg.SOLVER.MAX_ITER if cfg.SOLVER.TRAIN_ITER < 0 else cfg.SOLVER.TRAIN_ITER periodic_checkpointer = PeriodicCheckpointer( checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter ) writers = ( [ CommonMetricPrinter(max_iter), JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")), TensorboardXWriter(cfg.OUTPUT_DIR), ] if comm.is_main_process() else [] ) mapper = DatasetMapper(cfg, True) if cfg.INPUT.CUSTOM_AUG == '' else \ DatasetMapper(cfg, True, augmentations=build_custom_augmentation(cfg, True)) if cfg.DATALOADER.SAMPLER_TRAIN in ['TrainingSampler', 'RepeatFactorTrainingSampler']: data_loader = build_detection_train_loader(cfg, mapper=mapper) else: from centernet.data.custom_dataset_dataloader import build_custom_train_loader data_loader = build_custom_train_loader(cfg, mapper=mapper) logger.info("Starting training from iteration {}".format(start_iter)) with EventStorage(start_iter) as storage: step_timer = Timer() data_timer = Timer() start_time = time.perf_counter() for data, iteration in zip(data_loader, range(start_iter, max_iter)): data_time = data_timer.seconds() storage.put_scalars(data_time=data_time) step_timer.reset() iteration = iteration + 1 storage.step() loss_dict = model(data) losses = sum( loss for k, loss in loss_dict.items()) assert torch.isfinite(losses).all(), loss_dict loss_dict_reduced = {k: v.item() \ for k, v in comm.reduce_dict(loss_dict).items()} losses_reduced = sum(loss for loss in loss_dict_reduced.values()) if comm.is_main_process(): storage.put_scalars( total_loss=losses_reduced, **loss_dict_reduced) optimizer.zero_grad() losses.backward() optimizer.step() storage.put_scalar( "lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) step_time = step_timer.seconds() storage.put_scalars(time=step_time) data_timer.reset() scheduler.step() if ( cfg.TEST.EVAL_PERIOD > 0 and iteration % cfg.TEST.EVAL_PERIOD == 0 and iteration != max_iter ): do_test(cfg, model) comm.synchronize() if iteration - start_iter > 5 and \ (iteration % 20 == 0 or iteration == max_iter): for writer in writers: writer.write() periodic_checkpointer.step(iteration) total_time = time.perf_counter() - start_time logger.info( "Total training time: {}".format( str(datetime.timedelta(seconds=int(total_time)))))
null