code
stringlengths
1
1.05M
repo_name
stringlengths
6
83
path
stringlengths
3
242
language
stringclasses
222 values
license
stringclasses
20 values
size
int64
1
1.05M
# Ultralytics YOLO 🚀, AGPL-3.0 license import math from typing import Tuple, Type import torch from torch import Tensor, nn from ultralytics.nn.modules import MLPBlock class TwoWayTransformer(nn.Module): """ A Two-Way Transformer module that enables the simultaneous attention to both image and query points. This class serves as a specialized transformer decoder that attends to an input image using queries whose positional embedding is supplied. This is particularly useful for tasks like object detection, image segmentation, and point cloud processing. Attributes: depth (int): The number of layers in the transformer. embedding_dim (int): The channel dimension for the input embeddings. num_heads (int): The number of heads for multihead attention. mlp_dim (int): The internal channel dimension for the MLP block. layers (nn.ModuleList): The list of TwoWayAttentionBlock layers that make up the transformer. final_attn_token_to_image (Attention): The final attention layer applied from the queries to the image. norm_final_attn (nn.LayerNorm): The layer normalization applied to the final queries. """ def __init__( self, depth: int, embedding_dim: int, num_heads: int, mlp_dim: int, activation: Type[nn.Module] = nn.ReLU, attention_downsample_rate: int = 2, ) -> None: """ A transformer decoder that attends to an input image using queries whose positional embedding is supplied. Args: depth (int): number of layers in the transformer embedding_dim (int): the channel dimension for the input embeddings num_heads (int): the number of heads for multihead attention. Must divide embedding_dim mlp_dim (int): the channel dimension internal to the MLP block activation (nn.Module): the activation to use in the MLP block """ super().__init__() self.depth = depth self.embedding_dim = embedding_dim self.num_heads = num_heads self.mlp_dim = mlp_dim self.layers = nn.ModuleList() for i in range(depth): self.layers.append( TwoWayAttentionBlock( embedding_dim=embedding_dim, num_heads=num_heads, mlp_dim=mlp_dim, activation=activation, attention_downsample_rate=attention_downsample_rate, skip_first_layer_pe=(i == 0), ) ) self.final_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate) self.norm_final_attn = nn.LayerNorm(embedding_dim) def forward( self, image_embedding: Tensor, image_pe: Tensor, point_embedding: Tensor, ) -> Tuple[Tensor, Tensor]: """ Args: image_embedding (torch.Tensor): image to attend to. Should be shape B x embedding_dim x h x w for any h and w. image_pe (torch.Tensor): the positional encoding to add to the image. Must have same shape as image_embedding. point_embedding (torch.Tensor): the embedding to add to the query points. Must have shape B x N_points x embedding_dim for any N_points. Returns: (torch.Tensor): the processed point_embedding (torch.Tensor): the processed image_embedding """ # BxCxHxW -> BxHWxC == B x N_image_tokens x C bs, c, h, w = image_embedding.shape image_embedding = image_embedding.flatten(2).permute(0, 2, 1) image_pe = image_pe.flatten(2).permute(0, 2, 1) # Prepare queries queries = point_embedding keys = image_embedding # Apply transformer blocks and final layernorm for layer in self.layers: queries, keys = layer( queries=queries, keys=keys, query_pe=point_embedding, key_pe=image_pe, ) # Apply the final attention layer from the points to the image q = queries + point_embedding k = keys + image_pe attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys) queries = queries + attn_out queries = self.norm_final_attn(queries) return queries, keys class TwoWayAttentionBlock(nn.Module): """ An attention block that performs both self-attention and cross-attention in two directions: queries to keys and keys to queries. This block consists of four main layers: (1) self-attention on sparse inputs, (2) cross-attention of sparse inputs to dense inputs, (3) an MLP block on sparse inputs, and (4) cross-attention of dense inputs to sparse inputs. Attributes: self_attn (Attention): The self-attention layer for the queries. norm1 (nn.LayerNorm): Layer normalization following the first attention block. cross_attn_token_to_image (Attention): Cross-attention layer from queries to keys. norm2 (nn.LayerNorm): Layer normalization following the second attention block. mlp (MLPBlock): MLP block that transforms the query embeddings. norm3 (nn.LayerNorm): Layer normalization following the MLP block. norm4 (nn.LayerNorm): Layer normalization following the third attention block. cross_attn_image_to_token (Attention): Cross-attention layer from keys to queries. skip_first_layer_pe (bool): Whether to skip the positional encoding in the first layer. """ def __init__( self, embedding_dim: int, num_heads: int, mlp_dim: int = 2048, activation: Type[nn.Module] = nn.ReLU, attention_downsample_rate: int = 2, skip_first_layer_pe: bool = False, ) -> None: """ A transformer block with four layers: (1) self-attention of sparse inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp block on sparse inputs, and (4) cross attention of dense inputs to sparse inputs. Args: embedding_dim (int): the channel dimension of the embeddings num_heads (int): the number of heads in the attention layers mlp_dim (int): the hidden dimension of the mlp block activation (nn.Module): the activation of the mlp block skip_first_layer_pe (bool): skip the PE on the first layer """ super().__init__() self.self_attn = Attention(embedding_dim, num_heads) self.norm1 = nn.LayerNorm(embedding_dim) self.cross_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate) self.norm2 = nn.LayerNorm(embedding_dim) self.mlp = MLPBlock(embedding_dim, mlp_dim, activation) self.norm3 = nn.LayerNorm(embedding_dim) self.norm4 = nn.LayerNorm(embedding_dim) self.cross_attn_image_to_token = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate) self.skip_first_layer_pe = skip_first_layer_pe def forward(self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor) -> Tuple[Tensor, Tensor]: """Apply self-attention and cross-attention to queries and keys and return the processed embeddings.""" # Self attention block if self.skip_first_layer_pe: queries = self.self_attn(q=queries, k=queries, v=queries) else: q = queries + query_pe attn_out = self.self_attn(q=q, k=q, v=queries) queries = queries + attn_out queries = self.norm1(queries) # Cross attention block, tokens attending to image embedding q = queries + query_pe k = keys + key_pe attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys) queries = queries + attn_out queries = self.norm2(queries) # MLP block mlp_out = self.mlp(queries) queries = queries + mlp_out queries = self.norm3(queries) # Cross attention block, image embedding attending to tokens q = queries + query_pe k = keys + key_pe attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries) keys = keys + attn_out keys = self.norm4(keys) return queries, keys class Attention(nn.Module): """An attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and values. """ def __init__( self, embedding_dim: int, num_heads: int, downsample_rate: int = 1, ) -> None: """ Initializes the Attention model with the given dimensions and settings. Args: embedding_dim (int): The dimensionality of the input embeddings. num_heads (int): The number of attention heads. downsample_rate (int, optional): The factor by which the internal dimensions are downsampled. Defaults to 1. Raises: AssertionError: If 'num_heads' does not evenly divide the internal dimension (embedding_dim / downsample_rate). """ super().__init__() self.embedding_dim = embedding_dim self.internal_dim = embedding_dim // downsample_rate self.num_heads = num_heads assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim." self.q_proj = nn.Linear(embedding_dim, self.internal_dim) self.k_proj = nn.Linear(embedding_dim, self.internal_dim) self.v_proj = nn.Linear(embedding_dim, self.internal_dim) self.out_proj = nn.Linear(self.internal_dim, embedding_dim) @staticmethod def _separate_heads(x: Tensor, num_heads: int) -> Tensor: """Separate the input tensor into the specified number of attention heads.""" b, n, c = x.shape x = x.reshape(b, n, num_heads, c // num_heads) return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head @staticmethod def _recombine_heads(x: Tensor) -> Tensor: """Recombine the separated attention heads into a single tensor.""" b, n_heads, n_tokens, c_per_head = x.shape x = x.transpose(1, 2) return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor: """Compute the attention output given the input query, key, and value tensors.""" # Input projections q = self.q_proj(q) k = self.k_proj(k) v = self.v_proj(v) # Separate into heads q = self._separate_heads(q, self.num_heads) k = self._separate_heads(k, self.num_heads) v = self._separate_heads(v, self.num_heads) # Attention _, _, _, c_per_head = q.shape attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens attn = attn / math.sqrt(c_per_head) attn = torch.softmax(attn, dim=-1) # Get output out = attn @ v out = self._recombine_heads(out) return self.out_proj(out)
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/sam/modules/transformer.py
Python
unknown
11,170
# Ultralytics YOLO 🚀, AGPL-3.0 license """ Generate predictions using the Segment Anything Model (SAM). SAM is an advanced image segmentation model offering features like promptable segmentation and zero-shot performance. This module contains the implementation of the prediction logic and auxiliary utilities required to perform segmentation using SAM. It forms an integral part of the Ultralytics framework and is designed for high-performance, real-time image segmentation tasks. """ import numpy as np import torch import torch.nn.functional as F import torchvision from ultralytics.data.augment import LetterBox from ultralytics.engine.predictor import BasePredictor from ultralytics.engine.results import Results from ultralytics.utils import DEFAULT_CFG, ops from ultralytics.utils.torch_utils import select_device from .amg import ( batch_iterator, batched_mask_to_box, build_all_layer_point_grids, calculate_stability_score, generate_crop_boxes, is_box_near_crop_edge, remove_small_regions, uncrop_boxes_xyxy, uncrop_masks, ) from .build import build_sam class Predictor(BasePredictor): """ Predictor class for the Segment Anything Model (SAM), extending BasePredictor. The class provides an interface for model inference tailored to image segmentation tasks. With advanced architecture and promptable segmentation capabilities, it facilitates flexible and real-time mask generation. The class is capable of working with various types of prompts such as bounding boxes, points, and low-resolution masks. Attributes: cfg (dict): Configuration dictionary specifying model and task-related parameters. overrides (dict): Dictionary containing values that override the default configuration. _callbacks (dict): Dictionary of user-defined callback functions to augment behavior. args (namespace): Namespace to hold command-line arguments or other operational variables. im (torch.Tensor): Preprocessed input image tensor. features (torch.Tensor): Extracted image features used for inference. prompts (dict): Collection of various prompt types, such as bounding boxes and points. segment_all (bool): Flag to control whether to segment all objects in the image or only specified ones. """ def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): """ Initialize the Predictor with configuration, overrides, and callbacks. The method sets up the Predictor object and applies any configuration overrides or callbacks provided. It initializes task-specific settings for SAM, such as retina_masks being set to True for optimal results. Args: cfg (dict): Configuration dictionary. overrides (dict, optional): Dictionary of values to override default configuration. _callbacks (dict, optional): Dictionary of callback functions to customize behavior. """ if overrides is None: overrides = {} overrides.update(dict(task="segment", mode="predict", imgsz=1024)) super().__init__(cfg, overrides, _callbacks) self.args.retina_masks = True self.im = None self.features = None self.prompts = {} self.segment_all = False def preprocess(self, im): """ Preprocess the input image for model inference. The method prepares the input image by applying transformations and normalization. It supports both torch.Tensor and list of np.ndarray as input formats. Args: im (torch.Tensor | List[np.ndarray]): BCHW tensor format or list of HWC numpy arrays. Returns: (torch.Tensor): The preprocessed image tensor. """ if self.im is not None: return self.im not_tensor = not isinstance(im, torch.Tensor) if not_tensor: im = np.stack(self.pre_transform(im)) im = im[..., ::-1].transpose((0, 3, 1, 2)) im = np.ascontiguousarray(im) im = torch.from_numpy(im) im = im.to(self.device) im = im.half() if self.model.fp16 else im.float() if not_tensor: im = (im - self.mean) / self.std return im def pre_transform(self, im): """ Perform initial transformations on the input image for preprocessing. The method applies transformations such as resizing to prepare the image for further preprocessing. Currently, batched inference is not supported; hence the list length should be 1. Args: im (List[np.ndarray]): List containing images in HWC numpy array format. Returns: (List[np.ndarray]): List of transformed images. """ assert len(im) == 1, "SAM model does not currently support batched inference" letterbox = LetterBox(self.args.imgsz, auto=False, center=False) return [letterbox(image=x) for x in im] def inference(self, im, bboxes=None, points=None, labels=None, masks=None, multimask_output=False, *args, **kwargs): """ Perform image segmentation inference based on the given input cues, using the currently loaded image. This method leverages SAM's (Segment Anything Model) architecture consisting of image encoder, prompt encoder, and mask decoder for real-time and promptable segmentation tasks. Args: im (torch.Tensor): The preprocessed input image in tensor format, with shape (N, C, H, W). bboxes (np.ndarray | List, optional): Bounding boxes with shape (N, 4), in XYXY format. points (np.ndarray | List, optional): Points indicating object locations with shape (N, 2), in pixel coordinates. labels (np.ndarray | List, optional): Labels for point prompts, shape (N, ). 1 for foreground and 0 for background. masks (np.ndarray, optional): Low-resolution masks from previous predictions. Shape should be (N, H, W). For SAM, H=W=256. multimask_output (bool, optional): Flag to return multiple masks. Helpful for ambiguous prompts. Defaults to False. Returns: (tuple): Contains the following three elements. - np.ndarray: The output masks in shape CxHxW, where C is the number of generated masks. - np.ndarray: An array of length C containing quality scores predicted by the model for each mask. - np.ndarray: Low-resolution logits of shape CxHxW for subsequent inference, where H=W=256. """ # Override prompts if any stored in self.prompts bboxes = self.prompts.pop("bboxes", bboxes) points = self.prompts.pop("points", points) masks = self.prompts.pop("masks", masks) if all(i is None for i in [bboxes, points, masks]): return self.generate(im, *args, **kwargs) return self.prompt_inference(im, bboxes, points, labels, masks, multimask_output) def prompt_inference(self, im, bboxes=None, points=None, labels=None, masks=None, multimask_output=False): """ Internal function for image segmentation inference based on cues like bounding boxes, points, and masks. Leverages SAM's specialized architecture for prompt-based, real-time segmentation. Args: im (torch.Tensor): The preprocessed input image in tensor format, with shape (N, C, H, W). bboxes (np.ndarray | List, optional): Bounding boxes with shape (N, 4), in XYXY format. points (np.ndarray | List, optional): Points indicating object locations with shape (N, 2), in pixel coordinates. labels (np.ndarray | List, optional): Labels for point prompts, shape (N, ). 1 for foreground and 0 for background. masks (np.ndarray, optional): Low-resolution masks from previous predictions. Shape should be (N, H, W). For SAM, H=W=256. multimask_output (bool, optional): Flag to return multiple masks. Helpful for ambiguous prompts. Defaults to False. Returns: (tuple): Contains the following three elements. - np.ndarray: The output masks in shape CxHxW, where C is the number of generated masks. - np.ndarray: An array of length C containing quality scores predicted by the model for each mask. - np.ndarray: Low-resolution logits of shape CxHxW for subsequent inference, where H=W=256. """ features = self.model.image_encoder(im) if self.features is None else self.features src_shape, dst_shape = self.batch[1][0].shape[:2], im.shape[2:] r = 1.0 if self.segment_all else min(dst_shape[0] / src_shape[0], dst_shape[1] / src_shape[1]) # Transform input prompts if points is not None: points = torch.as_tensor(points, dtype=torch.float32, device=self.device) points = points[None] if points.ndim == 1 else points # Assuming labels are all positive if users don't pass labels. if labels is None: labels = np.ones(points.shape[0]) labels = torch.as_tensor(labels, dtype=torch.int32, device=self.device) points *= r # (N, 2) --> (N, 1, 2), (N, ) --> (N, 1) points, labels = points[:, None, :], labels[:, None] if bboxes is not None: bboxes = torch.as_tensor(bboxes, dtype=torch.float32, device=self.device) bboxes = bboxes[None] if bboxes.ndim == 1 else bboxes bboxes *= r if masks is not None: masks = torch.as_tensor(masks, dtype=torch.float32, device=self.device).unsqueeze(1) points = (points, labels) if points is not None else None # Embed prompts sparse_embeddings, dense_embeddings = self.model.prompt_encoder(points=points, boxes=bboxes, masks=masks) # Predict masks pred_masks, pred_scores = self.model.mask_decoder( image_embeddings=features, image_pe=self.model.prompt_encoder.get_dense_pe(), sparse_prompt_embeddings=sparse_embeddings, dense_prompt_embeddings=dense_embeddings, multimask_output=multimask_output, ) # (N, d, H, W) --> (N*d, H, W), (N, d) --> (N*d, ) # `d` could be 1 or 3 depends on `multimask_output`. return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1) def generate( self, im, crop_n_layers=0, crop_overlap_ratio=512 / 1500, crop_downscale_factor=1, point_grids=None, points_stride=32, points_batch_size=64, conf_thres=0.88, stability_score_thresh=0.95, stability_score_offset=0.95, crop_nms_thresh=0.7, ): """ Perform image segmentation using the Segment Anything Model (SAM). This function segments an entire image into constituent parts by leveraging SAM's advanced architecture and real-time performance capabilities. It can optionally work on image crops for finer segmentation. Args: im (torch.Tensor): Input tensor representing the preprocessed image with dimensions (N, C, H, W). crop_n_layers (int): Specifies the number of layers for additional mask predictions on image crops. Each layer produces 2**i_layer number of image crops. crop_overlap_ratio (float): Determines the extent of overlap between crops. Scaled down in subsequent layers. crop_downscale_factor (int): Scaling factor for the number of sampled points-per-side in each layer. point_grids (list[np.ndarray], optional): Custom grids for point sampling normalized to [0,1]. Used in the nth crop layer. points_stride (int, optional): Number of points to sample along each side of the image. Exclusive with 'point_grids'. points_batch_size (int): Batch size for the number of points processed simultaneously. conf_thres (float): Confidence threshold [0,1] for filtering based on the model's mask quality prediction. stability_score_thresh (float): Stability threshold [0,1] for mask filtering based on mask stability. stability_score_offset (float): Offset value for calculating stability score. crop_nms_thresh (float): IoU cutoff for Non-Maximum Suppression (NMS) to remove duplicate masks between crops. Returns: (tuple): A tuple containing segmented masks, confidence scores, and bounding boxes. """ self.segment_all = True ih, iw = im.shape[2:] crop_regions, layer_idxs = generate_crop_boxes((ih, iw), crop_n_layers, crop_overlap_ratio) if point_grids is None: point_grids = build_all_layer_point_grids(points_stride, crop_n_layers, crop_downscale_factor) pred_masks, pred_scores, pred_bboxes, region_areas = [], [], [], [] for crop_region, layer_idx in zip(crop_regions, layer_idxs): x1, y1, x2, y2 = crop_region w, h = x2 - x1, y2 - y1 area = torch.tensor(w * h, device=im.device) points_scale = np.array([[w, h]]) # w, h # Crop image and interpolate to input size crop_im = F.interpolate(im[..., y1:y2, x1:x2], (ih, iw), mode="bilinear", align_corners=False) # (num_points, 2) points_for_image = point_grids[layer_idx] * points_scale crop_masks, crop_scores, crop_bboxes = [], [], [] for (points,) in batch_iterator(points_batch_size, points_for_image): pred_mask, pred_score = self.prompt_inference(crop_im, points=points, multimask_output=True) # Interpolate predicted masks to input size pred_mask = F.interpolate(pred_mask[None], (h, w), mode="bilinear", align_corners=False)[0] idx = pred_score > conf_thres pred_mask, pred_score = pred_mask[idx], pred_score[idx] stability_score = calculate_stability_score( pred_mask, self.model.mask_threshold, stability_score_offset ) idx = stability_score > stability_score_thresh pred_mask, pred_score = pred_mask[idx], pred_score[idx] # Bool type is much more memory-efficient. pred_mask = pred_mask > self.model.mask_threshold # (N, 4) pred_bbox = batched_mask_to_box(pred_mask).float() keep_mask = ~is_box_near_crop_edge(pred_bbox, crop_region, [0, 0, iw, ih]) if not torch.all(keep_mask): pred_bbox, pred_mask, pred_score = pred_bbox[keep_mask], pred_mask[keep_mask], pred_score[keep_mask] crop_masks.append(pred_mask) crop_bboxes.append(pred_bbox) crop_scores.append(pred_score) # Do nms within this crop crop_masks = torch.cat(crop_masks) crop_bboxes = torch.cat(crop_bboxes) crop_scores = torch.cat(crop_scores) keep = torchvision.ops.nms(crop_bboxes, crop_scores, self.args.iou) # NMS crop_bboxes = uncrop_boxes_xyxy(crop_bboxes[keep], crop_region) crop_masks = uncrop_masks(crop_masks[keep], crop_region, ih, iw) crop_scores = crop_scores[keep] pred_masks.append(crop_masks) pred_bboxes.append(crop_bboxes) pred_scores.append(crop_scores) region_areas.append(area.expand(len(crop_masks))) pred_masks = torch.cat(pred_masks) pred_bboxes = torch.cat(pred_bboxes) pred_scores = torch.cat(pred_scores) region_areas = torch.cat(region_areas) # Remove duplicate masks between crops if len(crop_regions) > 1: scores = 1 / region_areas keep = torchvision.ops.nms(pred_bboxes, scores, crop_nms_thresh) pred_masks, pred_bboxes, pred_scores = pred_masks[keep], pred_bboxes[keep], pred_scores[keep] return pred_masks, pred_scores, pred_bboxes def setup_model(self, model, verbose=True): """ Initializes the Segment Anything Model (SAM) for inference. This method sets up the SAM model by allocating it to the appropriate device and initializing the necessary parameters for image normalization and other Ultralytics compatibility settings. Args: model (torch.nn.Module): A pre-trained SAM model. If None, a model will be built based on configuration. verbose (bool): If True, prints selected device information. Attributes: model (torch.nn.Module): The SAM model allocated to the chosen device for inference. device (torch.device): The device to which the model and tensors are allocated. mean (torch.Tensor): The mean values for image normalization. std (torch.Tensor): The standard deviation values for image normalization. """ device = select_device(self.args.device, verbose=verbose) if model is None: model = build_sam(self.args.model) model.eval() self.model = model.to(device) self.device = device self.mean = torch.tensor([123.675, 116.28, 103.53]).view(-1, 1, 1).to(device) self.std = torch.tensor([58.395, 57.12, 57.375]).view(-1, 1, 1).to(device) # Ultralytics compatibility settings self.model.pt = False self.model.triton = False self.model.stride = 32 self.model.fp16 = False self.done_warmup = True def postprocess(self, preds, img, orig_imgs): """ Post-processes SAM's inference outputs to generate object detection masks and bounding boxes. The method scales masks and boxes to the original image size and applies a threshold to the mask predictions. The SAM model uses advanced architecture and promptable segmentation tasks to achieve real-time performance. Args: preds (tuple): The output from SAM model inference, containing masks, scores, and optional bounding boxes. img (torch.Tensor): The processed input image tensor. orig_imgs (list | torch.Tensor): The original, unprocessed images. Returns: (list): List of Results objects containing detection masks, bounding boxes, and other metadata. """ # (N, 1, H, W), (N, 1) pred_masks, pred_scores = preds[:2] pred_bboxes = preds[2] if self.segment_all else None names = dict(enumerate(str(i) for i in range(len(pred_masks)))) if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list orig_imgs = ops.convert_torch2numpy_batch(orig_imgs) results = [] for i, masks in enumerate([pred_masks]): orig_img = orig_imgs[i] if pred_bboxes is not None: pred_bboxes = ops.scale_boxes(img.shape[2:], pred_bboxes.float(), orig_img.shape, padding=False) cls = torch.arange(len(pred_masks), dtype=torch.int32, device=pred_masks.device) pred_bboxes = torch.cat([pred_bboxes, pred_scores[:, None], cls[:, None]], dim=-1) masks = ops.scale_masks(masks[None].float(), orig_img.shape[:2], padding=False)[0] masks = masks > self.model.mask_threshold # to bool img_path = self.batch[0][i] results.append(Results(orig_img, path=img_path, names=names, masks=masks, boxes=pred_bboxes)) # Reset segment-all mode. self.segment_all = False return results def setup_source(self, source): """ Sets up the data source for inference. This method configures the data source from which images will be fetched for inference. The source could be a directory, a video file, or other types of image data sources. Args: source (str | Path): The path to the image data source for inference. """ if source is not None: super().setup_source(source) def set_image(self, image): """ Preprocesses and sets a single image for inference. This function sets up the model if not already initialized, configures the data source to the specified image, and preprocesses the image for feature extraction. Only one image can be set at a time. Args: image (str | np.ndarray): Image file path as a string, or a np.ndarray image read by cv2. Raises: AssertionError: If more than one image is set. """ if self.model is None: model = build_sam(self.args.model) self.setup_model(model) self.setup_source(image) assert len(self.dataset) == 1, "`set_image` only supports setting one image!" for batch in self.dataset: im = self.preprocess(batch[1]) self.features = self.model.image_encoder(im) self.im = im break def set_prompts(self, prompts): """Set prompts in advance.""" self.prompts = prompts def reset_image(self): """Resets the image and its features to None.""" self.im = None self.features = None @staticmethod def remove_small_regions(masks, min_area=0, nms_thresh=0.7): """ Perform post-processing on segmentation masks generated by the Segment Anything Model (SAM). Specifically, this function removes small disconnected regions and holes from the input masks, and then performs Non-Maximum Suppression (NMS) to eliminate any newly created duplicate boxes. Args: masks (torch.Tensor): A tensor containing the masks to be processed. Shape should be (N, H, W), where N is the number of masks, H is height, and W is width. min_area (int): The minimum area below which disconnected regions and holes will be removed. Defaults to 0. nms_thresh (float): The IoU threshold for the NMS algorithm. Defaults to 0.7. Returns: (tuple([torch.Tensor, List[int]])): - new_masks (torch.Tensor): The processed masks with small regions removed. Shape is (N, H, W). - keep (List[int]): The indices of the remaining masks post-NMS, which can be used to filter the boxes. """ if len(masks) == 0: return masks # Filter small disconnected regions and holes new_masks = [] scores = [] for mask in masks: mask = mask.cpu().numpy().astype(np.uint8) mask, changed = remove_small_regions(mask, min_area, mode="holes") unchanged = not changed mask, changed = remove_small_regions(mask, min_area, mode="islands") unchanged = unchanged and not changed new_masks.append(torch.as_tensor(mask).unsqueeze(0)) # Give score=0 to changed masks and 1 to unchanged masks so NMS prefers masks not needing postprocessing scores.append(float(unchanged)) # Recalculate boxes and remove any new duplicates new_masks = torch.cat(new_masks, dim=0) boxes = batched_mask_to_box(new_masks) keep = torchvision.ops.nms(boxes.float(), torch.as_tensor(scores), nms_thresh) return new_masks[keep].to(device=masks.device, dtype=masks.dtype), keep
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/sam/predict.py
Python
unknown
23,632
# Ultralytics YOLO 🚀, AGPL-3.0 license
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/utils/__init__.py
Python
unknown
42
# Ultralytics YOLO 🚀, AGPL-3.0 license import torch import torch.nn as nn import torch.nn.functional as F from ultralytics.utils.loss import FocalLoss, VarifocalLoss from ultralytics.utils.metrics import bbox_iou from .ops import HungarianMatcher class DETRLoss(nn.Module): """ DETR (DEtection TRansformer) Loss class. This class calculates and returns the different loss components for the DETR object detection model. It computes classification loss, bounding box loss, GIoU loss, and optionally auxiliary losses. Attributes: nc (int): The number of classes. loss_gain (dict): Coefficients for different loss components. aux_loss (bool): Whether to compute auxiliary losses. use_fl (bool): Use FocalLoss or not. use_vfl (bool): Use VarifocalLoss or not. use_uni_match (bool): Whether to use a fixed layer to assign labels for the auxiliary branch. uni_match_ind (int): The fixed indices of a layer to use if `use_uni_match` is True. matcher (HungarianMatcher): Object to compute matching cost and indices. fl (FocalLoss or None): Focal Loss object if `use_fl` is True, otherwise None. vfl (VarifocalLoss or None): Varifocal Loss object if `use_vfl` is True, otherwise None. device (torch.device): Device on which tensors are stored. """ def __init__( self, nc=80, loss_gain=None, aux_loss=True, use_fl=True, use_vfl=False, use_uni_match=False, uni_match_ind=0 ): """ DETR loss function. Args: nc (int): The number of classes. loss_gain (dict): The coefficient of loss. aux_loss (bool): If 'aux_loss = True', loss at each decoder layer are to be used. use_vfl (bool): Use VarifocalLoss or not. use_uni_match (bool): Whether to use a fixed layer to assign labels for auxiliary branch. uni_match_ind (int): The fixed indices of a layer. """ super().__init__() if loss_gain is None: loss_gain = {"class": 1, "bbox": 5, "giou": 2, "no_object": 0.1, "mask": 1, "dice": 1} self.nc = nc self.matcher = HungarianMatcher(cost_gain={"class": 2, "bbox": 5, "giou": 2}) self.loss_gain = loss_gain self.aux_loss = aux_loss self.fl = FocalLoss() if use_fl else None self.vfl = VarifocalLoss() if use_vfl else None self.use_uni_match = use_uni_match self.uni_match_ind = uni_match_ind self.device = None def _get_loss_class(self, pred_scores, targets, gt_scores, num_gts, postfix=""): """Computes the classification loss based on predictions, target values, and ground truth scores.""" # Logits: [b, query, num_classes], gt_class: list[[n, 1]] name_class = f"loss_class{postfix}" bs, nq = pred_scores.shape[:2] # one_hot = F.one_hot(targets, self.nc + 1)[..., :-1] # (bs, num_queries, num_classes) one_hot = torch.zeros((bs, nq, self.nc + 1), dtype=torch.int64, device=targets.device) one_hot.scatter_(2, targets.unsqueeze(-1), 1) one_hot = one_hot[..., :-1] gt_scores = gt_scores.view(bs, nq, 1) * one_hot if self.fl: if num_gts and self.vfl: loss_cls = self.vfl(pred_scores, gt_scores, one_hot) else: loss_cls = self.fl(pred_scores, one_hot.float()) loss_cls /= max(num_gts, 1) / nq else: loss_cls = nn.BCEWithLogitsLoss(reduction="none")(pred_scores, gt_scores).mean(1).sum() # YOLO CLS loss return {name_class: loss_cls.squeeze() * self.loss_gain["class"]} def _get_loss_bbox(self, pred_bboxes, gt_bboxes, postfix=""): """Calculates and returns the bounding box loss and GIoU loss for the predicted and ground truth bounding boxes. """ # Boxes: [b, query, 4], gt_bbox: list[[n, 4]] name_bbox = f"loss_bbox{postfix}" name_giou = f"loss_giou{postfix}" loss = {} if len(gt_bboxes) == 0: loss[name_bbox] = torch.tensor(0.0, device=self.device) loss[name_giou] = torch.tensor(0.0, device=self.device) return loss loss[name_bbox] = self.loss_gain["bbox"] * F.l1_loss(pred_bboxes, gt_bboxes, reduction="sum") / len(gt_bboxes) loss[name_giou] = 1.0 - bbox_iou(pred_bboxes, gt_bboxes, xywh=True, GIoU=True) loss[name_giou] = loss[name_giou].sum() / len(gt_bboxes) loss[name_giou] = self.loss_gain["giou"] * loss[name_giou] return {k: v.squeeze() for k, v in loss.items()} # This function is for future RT-DETR Segment models # def _get_loss_mask(self, masks, gt_mask, match_indices, postfix=''): # # masks: [b, query, h, w], gt_mask: list[[n, H, W]] # name_mask = f'loss_mask{postfix}' # name_dice = f'loss_dice{postfix}' # # loss = {} # if sum(len(a) for a in gt_mask) == 0: # loss[name_mask] = torch.tensor(0., device=self.device) # loss[name_dice] = torch.tensor(0., device=self.device) # return loss # # num_gts = len(gt_mask) # src_masks, target_masks = self._get_assigned_bboxes(masks, gt_mask, match_indices) # src_masks = F.interpolate(src_masks.unsqueeze(0), size=target_masks.shape[-2:], mode='bilinear')[0] # # TODO: torch does not have `sigmoid_focal_loss`, but it's not urgent since we don't use mask branch for now. # loss[name_mask] = self.loss_gain['mask'] * F.sigmoid_focal_loss(src_masks, target_masks, # torch.tensor([num_gts], dtype=torch.float32)) # loss[name_dice] = self.loss_gain['dice'] * self._dice_loss(src_masks, target_masks, num_gts) # return loss # This function is for future RT-DETR Segment models # @staticmethod # def _dice_loss(inputs, targets, num_gts): # inputs = F.sigmoid(inputs).flatten(1) # targets = targets.flatten(1) # numerator = 2 * (inputs * targets).sum(1) # denominator = inputs.sum(-1) + targets.sum(-1) # loss = 1 - (numerator + 1) / (denominator + 1) # return loss.sum() / num_gts def _get_loss_aux( self, pred_bboxes, pred_scores, gt_bboxes, gt_cls, gt_groups, match_indices=None, postfix="", masks=None, gt_mask=None, ): """Get auxiliary losses.""" # NOTE: loss class, bbox, giou, mask, dice loss = torch.zeros(5 if masks is not None else 3, device=pred_bboxes.device) if match_indices is None and self.use_uni_match: match_indices = self.matcher( pred_bboxes[self.uni_match_ind], pred_scores[self.uni_match_ind], gt_bboxes, gt_cls, gt_groups, masks=masks[self.uni_match_ind] if masks is not None else None, gt_mask=gt_mask, ) for i, (aux_bboxes, aux_scores) in enumerate(zip(pred_bboxes, pred_scores)): aux_masks = masks[i] if masks is not None else None loss_ = self._get_loss( aux_bboxes, aux_scores, gt_bboxes, gt_cls, gt_groups, masks=aux_masks, gt_mask=gt_mask, postfix=postfix, match_indices=match_indices, ) loss[0] += loss_[f"loss_class{postfix}"] loss[1] += loss_[f"loss_bbox{postfix}"] loss[2] += loss_[f"loss_giou{postfix}"] # if masks is not None and gt_mask is not None: # loss_ = self._get_loss_mask(aux_masks, gt_mask, match_indices, postfix) # loss[3] += loss_[f'loss_mask{postfix}'] # loss[4] += loss_[f'loss_dice{postfix}'] loss = { f"loss_class_aux{postfix}": loss[0], f"loss_bbox_aux{postfix}": loss[1], f"loss_giou_aux{postfix}": loss[2], } # if masks is not None and gt_mask is not None: # loss[f'loss_mask_aux{postfix}'] = loss[3] # loss[f'loss_dice_aux{postfix}'] = loss[4] return loss @staticmethod def _get_index(match_indices): """Returns batch indices, source indices, and destination indices from provided match indices.""" batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(match_indices)]) src_idx = torch.cat([src for (src, _) in match_indices]) dst_idx = torch.cat([dst for (_, dst) in match_indices]) return (batch_idx, src_idx), dst_idx def _get_assigned_bboxes(self, pred_bboxes, gt_bboxes, match_indices): """Assigns predicted bounding boxes to ground truth bounding boxes based on the match indices.""" pred_assigned = torch.cat( [ t[i] if len(i) > 0 else torch.zeros(0, t.shape[-1], device=self.device) for t, (i, _) in zip(pred_bboxes, match_indices) ] ) gt_assigned = torch.cat( [ t[j] if len(j) > 0 else torch.zeros(0, t.shape[-1], device=self.device) for t, (_, j) in zip(gt_bboxes, match_indices) ] ) return pred_assigned, gt_assigned def _get_loss( self, pred_bboxes, pred_scores, gt_bboxes, gt_cls, gt_groups, masks=None, gt_mask=None, postfix="", match_indices=None, ): """Get losses.""" if match_indices is None: match_indices = self.matcher( pred_bboxes, pred_scores, gt_bboxes, gt_cls, gt_groups, masks=masks, gt_mask=gt_mask ) idx, gt_idx = self._get_index(match_indices) pred_bboxes, gt_bboxes = pred_bboxes[idx], gt_bboxes[gt_idx] bs, nq = pred_scores.shape[:2] targets = torch.full((bs, nq), self.nc, device=pred_scores.device, dtype=gt_cls.dtype) targets[idx] = gt_cls[gt_idx] gt_scores = torch.zeros([bs, nq], device=pred_scores.device) if len(gt_bboxes): gt_scores[idx] = bbox_iou(pred_bboxes.detach(), gt_bboxes, xywh=True).squeeze(-1) loss = {} loss.update(self._get_loss_class(pred_scores, targets, gt_scores, len(gt_bboxes), postfix)) loss.update(self._get_loss_bbox(pred_bboxes, gt_bboxes, postfix)) # if masks is not None and gt_mask is not None: # loss.update(self._get_loss_mask(masks, gt_mask, match_indices, postfix)) return loss def forward(self, pred_bboxes, pred_scores, batch, postfix="", **kwargs): """ Args: pred_bboxes (torch.Tensor): [l, b, query, 4] pred_scores (torch.Tensor): [l, b, query, num_classes] batch (dict): A dict includes: gt_cls (torch.Tensor) with shape [num_gts, ], gt_bboxes (torch.Tensor): [num_gts, 4], gt_groups (List(int)): a list of batch size length includes the number of gts of each image. postfix (str): postfix of loss name. """ self.device = pred_bboxes.device match_indices = kwargs.get("match_indices", None) gt_cls, gt_bboxes, gt_groups = batch["cls"], batch["bboxes"], batch["gt_groups"] total_loss = self._get_loss( pred_bboxes[-1], pred_scores[-1], gt_bboxes, gt_cls, gt_groups, postfix=postfix, match_indices=match_indices ) if self.aux_loss: total_loss.update( self._get_loss_aux( pred_bboxes[:-1], pred_scores[:-1], gt_bboxes, gt_cls, gt_groups, match_indices, postfix ) ) return total_loss class RTDETRDetectionLoss(DETRLoss): """ Real-Time DeepTracker (RT-DETR) Detection Loss class that extends the DETRLoss. This class computes the detection loss for the RT-DETR model, which includes the standard detection loss as well as an additional denoising training loss when provided with denoising metadata. """ def forward(self, preds, batch, dn_bboxes=None, dn_scores=None, dn_meta=None): """ Forward pass to compute the detection loss. Args: preds (tuple): Predicted bounding boxes and scores. batch (dict): Batch data containing ground truth information. dn_bboxes (torch.Tensor, optional): Denoising bounding boxes. Default is None. dn_scores (torch.Tensor, optional): Denoising scores. Default is None. dn_meta (dict, optional): Metadata for denoising. Default is None. Returns: (dict): Dictionary containing the total loss and, if applicable, the denoising loss. """ pred_bboxes, pred_scores = preds total_loss = super().forward(pred_bboxes, pred_scores, batch) # Check for denoising metadata to compute denoising training loss if dn_meta is not None: dn_pos_idx, dn_num_group = dn_meta["dn_pos_idx"], dn_meta["dn_num_group"] assert len(batch["gt_groups"]) == len(dn_pos_idx) # Get the match indices for denoising match_indices = self.get_dn_match_indices(dn_pos_idx, dn_num_group, batch["gt_groups"]) # Compute the denoising training loss dn_loss = super().forward(dn_bboxes, dn_scores, batch, postfix="_dn", match_indices=match_indices) total_loss.update(dn_loss) else: # If no denoising metadata is provided, set denoising loss to zero total_loss.update({f"{k}_dn": torch.tensor(0.0, device=self.device) for k in total_loss.keys()}) return total_loss @staticmethod def get_dn_match_indices(dn_pos_idx, dn_num_group, gt_groups): """ Get the match indices for denoising. Args: dn_pos_idx (List[torch.Tensor]): List of tensors containing positive indices for denoising. dn_num_group (int): Number of denoising groups. gt_groups (List[int]): List of integers representing the number of ground truths for each image. Returns: (List[tuple]): List of tuples containing matched indices for denoising. """ dn_match_indices = [] idx_groups = torch.as_tensor([0, *gt_groups[:-1]]).cumsum_(0) for i, num_gt in enumerate(gt_groups): if num_gt > 0: gt_idx = torch.arange(end=num_gt, dtype=torch.long) + idx_groups[i] gt_idx = gt_idx.repeat(dn_num_group) assert len(dn_pos_idx[i]) == len(gt_idx), "Expected the same length, " f"but got {len(dn_pos_idx[i])} and {len(gt_idx)} respectively." dn_match_indices.append((dn_pos_idx[i], gt_idx)) else: dn_match_indices.append((torch.zeros([0], dtype=torch.long), torch.zeros([0], dtype=torch.long))) return dn_match_indices
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/utils/loss.py
Python
unknown
15,134
# Ultralytics YOLO 🚀, AGPL-3.0 license import torch import torch.nn as nn import torch.nn.functional as F from scipy.optimize import linear_sum_assignment from ultralytics.utils.metrics import bbox_iou from ultralytics.utils.ops import xywh2xyxy, xyxy2xywh class HungarianMatcher(nn.Module): """ A module implementing the HungarianMatcher, which is a differentiable module to solve the assignment problem in an end-to-end fashion. HungarianMatcher performs optimal assignment over the predicted and ground truth bounding boxes using a cost function that considers classification scores, bounding box coordinates, and optionally, mask predictions. Attributes: cost_gain (dict): Dictionary of cost coefficients: 'class', 'bbox', 'giou', 'mask', and 'dice'. use_fl (bool): Indicates whether to use Focal Loss for the classification cost calculation. with_mask (bool): Indicates whether the model makes mask predictions. num_sample_points (int): The number of sample points used in mask cost calculation. alpha (float): The alpha factor in Focal Loss calculation. gamma (float): The gamma factor in Focal Loss calculation. Methods: forward(pred_bboxes, pred_scores, gt_bboxes, gt_cls, gt_groups, masks=None, gt_mask=None): Computes the assignment between predictions and ground truths for a batch. _cost_mask(bs, num_gts, masks=None, gt_mask=None): Computes the mask cost and dice cost if masks are predicted. """ def __init__(self, cost_gain=None, use_fl=True, with_mask=False, num_sample_points=12544, alpha=0.25, gamma=2.0): """Initializes HungarianMatcher with cost coefficients, Focal Loss, mask prediction, sample points, and alpha gamma factors. """ super().__init__() if cost_gain is None: cost_gain = {"class": 1, "bbox": 5, "giou": 2, "mask": 1, "dice": 1} self.cost_gain = cost_gain self.use_fl = use_fl self.with_mask = with_mask self.num_sample_points = num_sample_points self.alpha = alpha self.gamma = gamma def forward(self, pred_bboxes, pred_scores, gt_bboxes, gt_cls, gt_groups, masks=None, gt_mask=None): """ Forward pass for HungarianMatcher. This function computes costs based on prediction and ground truth (classification cost, L1 cost between boxes and GIoU cost between boxes) and finds the optimal matching between predictions and ground truth based on these costs. Args: pred_bboxes (Tensor): Predicted bounding boxes with shape [batch_size, num_queries, 4]. pred_scores (Tensor): Predicted scores with shape [batch_size, num_queries, num_classes]. gt_cls (torch.Tensor): Ground truth classes with shape [num_gts, ]. gt_bboxes (torch.Tensor): Ground truth bounding boxes with shape [num_gts, 4]. gt_groups (List[int]): List of length equal to batch size, containing the number of ground truths for each image. masks (Tensor, optional): Predicted masks with shape [batch_size, num_queries, height, width]. Defaults to None. gt_mask (List[Tensor], optional): List of ground truth masks, each with shape [num_masks, Height, Width]. Defaults to None. Returns: (List[Tuple[Tensor, Tensor]]): A list of size batch_size, each element is a tuple (index_i, index_j), where: - index_i is the tensor of indices of the selected predictions (in order) - index_j is the tensor of indices of the corresponding selected ground truth targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) """ bs, nq, nc = pred_scores.shape if sum(gt_groups) == 0: return [(torch.tensor([], dtype=torch.long), torch.tensor([], dtype=torch.long)) for _ in range(bs)] # We flatten to compute the cost matrices in a batch # [batch_size * num_queries, num_classes] pred_scores = pred_scores.detach().view(-1, nc) pred_scores = F.sigmoid(pred_scores) if self.use_fl else F.softmax(pred_scores, dim=-1) # [batch_size * num_queries, 4] pred_bboxes = pred_bboxes.detach().view(-1, 4) # Compute the classification cost pred_scores = pred_scores[:, gt_cls] if self.use_fl: neg_cost_class = (1 - self.alpha) * (pred_scores**self.gamma) * (-(1 - pred_scores + 1e-8).log()) pos_cost_class = self.alpha * ((1 - pred_scores) ** self.gamma) * (-(pred_scores + 1e-8).log()) cost_class = pos_cost_class - neg_cost_class else: cost_class = -pred_scores # Compute the L1 cost between boxes cost_bbox = (pred_bboxes.unsqueeze(1) - gt_bboxes.unsqueeze(0)).abs().sum(-1) # (bs*num_queries, num_gt) # Compute the GIoU cost between boxes, (bs*num_queries, num_gt) cost_giou = 1.0 - bbox_iou(pred_bboxes.unsqueeze(1), gt_bboxes.unsqueeze(0), xywh=True, GIoU=True).squeeze(-1) # Final cost matrix C = ( self.cost_gain["class"] * cost_class + self.cost_gain["bbox"] * cost_bbox + self.cost_gain["giou"] * cost_giou ) # Compute the mask cost and dice cost if self.with_mask: C += self._cost_mask(bs, gt_groups, masks, gt_mask) # Set invalid values (NaNs and infinities) to 0 (fixes ValueError: matrix contains invalid numeric entries) C[C.isnan() | C.isinf()] = 0.0 C = C.view(bs, nq, -1).cpu() indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(gt_groups, -1))] gt_groups = torch.as_tensor([0, *gt_groups[:-1]]).cumsum_(0) # (idx for queries, idx for gt) return [ (torch.tensor(i, dtype=torch.long), torch.tensor(j, dtype=torch.long) + gt_groups[k]) for k, (i, j) in enumerate(indices) ] # This function is for future RT-DETR Segment models # def _cost_mask(self, bs, num_gts, masks=None, gt_mask=None): # assert masks is not None and gt_mask is not None, 'Make sure the input has `mask` and `gt_mask`' # # all masks share the same set of points for efficient matching # sample_points = torch.rand([bs, 1, self.num_sample_points, 2]) # sample_points = 2.0 * sample_points - 1.0 # # out_mask = F.grid_sample(masks.detach(), sample_points, align_corners=False).squeeze(-2) # out_mask = out_mask.flatten(0, 1) # # tgt_mask = torch.cat(gt_mask).unsqueeze(1) # sample_points = torch.cat([a.repeat(b, 1, 1, 1) for a, b in zip(sample_points, num_gts) if b > 0]) # tgt_mask = F.grid_sample(tgt_mask, sample_points, align_corners=False).squeeze([1, 2]) # # with torch.cuda.amp.autocast(False): # # binary cross entropy cost # pos_cost_mask = F.binary_cross_entropy_with_logits(out_mask, torch.ones_like(out_mask), reduction='none') # neg_cost_mask = F.binary_cross_entropy_with_logits(out_mask, torch.zeros_like(out_mask), reduction='none') # cost_mask = torch.matmul(pos_cost_mask, tgt_mask.T) + torch.matmul(neg_cost_mask, 1 - tgt_mask.T) # cost_mask /= self.num_sample_points # # # dice cost # out_mask = F.sigmoid(out_mask) # numerator = 2 * torch.matmul(out_mask, tgt_mask.T) # denominator = out_mask.sum(-1, keepdim=True) + tgt_mask.sum(-1).unsqueeze(0) # cost_dice = 1 - (numerator + 1) / (denominator + 1) # # C = self.cost_gain['mask'] * cost_mask + self.cost_gain['dice'] * cost_dice # return C def get_cdn_group( batch, num_classes, num_queries, class_embed, num_dn=100, cls_noise_ratio=0.5, box_noise_scale=1.0, training=False ): """ Get contrastive denoising training group. This function creates a contrastive denoising training group with positive and negative samples from the ground truths (gt). It applies noise to the class labels and bounding box coordinates, and returns the modified labels, bounding boxes, attention mask and meta information. Args: batch (dict): A dict that includes 'gt_cls' (torch.Tensor with shape [num_gts, ]), 'gt_bboxes' (torch.Tensor with shape [num_gts, 4]), 'gt_groups' (List(int)) which is a list of batch size length indicating the number of gts of each image. num_classes (int): Number of classes. num_queries (int): Number of queries. class_embed (torch.Tensor): Embedding weights to map class labels to embedding space. num_dn (int, optional): Number of denoising. Defaults to 100. cls_noise_ratio (float, optional): Noise ratio for class labels. Defaults to 0.5. box_noise_scale (float, optional): Noise scale for bounding box coordinates. Defaults to 1.0. training (bool, optional): If it's in training mode. Defaults to False. Returns: (Tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor], Optional[Dict]]): The modified class embeddings, bounding boxes, attention mask and meta information for denoising. If not in training mode or 'num_dn' is less than or equal to 0, the function returns None for all elements in the tuple. """ if (not training) or num_dn <= 0: return None, None, None, None gt_groups = batch["gt_groups"] total_num = sum(gt_groups) max_nums = max(gt_groups) if max_nums == 0: return None, None, None, None num_group = num_dn // max_nums num_group = 1 if num_group == 0 else num_group # Pad gt to max_num of a batch bs = len(gt_groups) gt_cls = batch["cls"] # (bs*num, ) gt_bbox = batch["bboxes"] # bs*num, 4 b_idx = batch["batch_idx"] # Each group has positive and negative queries. dn_cls = gt_cls.repeat(2 * num_group) # (2*num_group*bs*num, ) dn_bbox = gt_bbox.repeat(2 * num_group, 1) # 2*num_group*bs*num, 4 dn_b_idx = b_idx.repeat(2 * num_group).view(-1) # (2*num_group*bs*num, ) # Positive and negative mask # (bs*num*num_group, ), the second total_num*num_group part as negative samples neg_idx = torch.arange(total_num * num_group, dtype=torch.long, device=gt_bbox.device) + num_group * total_num if cls_noise_ratio > 0: # Half of bbox prob mask = torch.rand(dn_cls.shape) < (cls_noise_ratio * 0.5) idx = torch.nonzero(mask).squeeze(-1) # Randomly put a new one here new_label = torch.randint_like(idx, 0, num_classes, dtype=dn_cls.dtype, device=dn_cls.device) dn_cls[idx] = new_label if box_noise_scale > 0: known_bbox = xywh2xyxy(dn_bbox) diff = (dn_bbox[..., 2:] * 0.5).repeat(1, 2) * box_noise_scale # 2*num_group*bs*num, 4 rand_sign = torch.randint_like(dn_bbox, 0, 2) * 2.0 - 1.0 rand_part = torch.rand_like(dn_bbox) rand_part[neg_idx] += 1.0 rand_part *= rand_sign known_bbox += rand_part * diff known_bbox.clip_(min=0.0, max=1.0) dn_bbox = xyxy2xywh(known_bbox) dn_bbox = torch.logit(dn_bbox, eps=1e-6) # inverse sigmoid num_dn = int(max_nums * 2 * num_group) # total denoising queries # class_embed = torch.cat([class_embed, torch.zeros([1, class_embed.shape[-1]], device=class_embed.device)]) dn_cls_embed = class_embed[dn_cls] # bs*num * 2 * num_group, 256 padding_cls = torch.zeros(bs, num_dn, dn_cls_embed.shape[-1], device=gt_cls.device) padding_bbox = torch.zeros(bs, num_dn, 4, device=gt_bbox.device) map_indices = torch.cat([torch.tensor(range(num), dtype=torch.long) for num in gt_groups]) pos_idx = torch.stack([map_indices + max_nums * i for i in range(num_group)], dim=0) map_indices = torch.cat([map_indices + max_nums * i for i in range(2 * num_group)]) padding_cls[(dn_b_idx, map_indices)] = dn_cls_embed padding_bbox[(dn_b_idx, map_indices)] = dn_bbox tgt_size = num_dn + num_queries attn_mask = torch.zeros([tgt_size, tgt_size], dtype=torch.bool) # Match query cannot see the reconstruct attn_mask[num_dn:, :num_dn] = True # Reconstruct cannot see each other for i in range(num_group): if i == 0: attn_mask[max_nums * 2 * i : max_nums * 2 * (i + 1), max_nums * 2 * (i + 1) : num_dn] = True if i == num_group - 1: attn_mask[max_nums * 2 * i : max_nums * 2 * (i + 1), : max_nums * i * 2] = True else: attn_mask[max_nums * 2 * i : max_nums * 2 * (i + 1), max_nums * 2 * (i + 1) : num_dn] = True attn_mask[max_nums * 2 * i : max_nums * 2 * (i + 1), : max_nums * 2 * i] = True dn_meta = { "dn_pos_idx": [p.reshape(-1) for p in pos_idx.cpu().split(list(gt_groups), dim=1)], "dn_num_group": num_group, "dn_num_split": [num_dn, num_queries], } return ( padding_cls.to(class_embed.device), padding_bbox.to(class_embed.device), attn_mask.to(class_embed.device), dn_meta, )
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/utils/ops.py
Python
unknown
13,244
# Ultralytics YOLO 🚀, AGPL-3.0 license from ultralytics.models.yolo import classify, detect, obb, pose, segment from .model import YOLO __all__ = "classify", "segment", "detect", "pose", "obb", "YOLO"
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/__init__.py
Python
unknown
207
# Ultralytics YOLO 🚀, AGPL-3.0 license from ultralytics.models.yolo.classify.predict import ClassificationPredictor from ultralytics.models.yolo.classify.train import ClassificationTrainer from ultralytics.models.yolo.classify.val import ClassificationValidator __all__ = "ClassificationPredictor", "ClassificationTrainer", "ClassificationValidator"
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/classify/__init__.py
Python
unknown
355
# Ultralytics YOLO 🚀, AGPL-3.0 license import cv2 import torch from PIL import Image from ultralytics.engine.predictor import BasePredictor from ultralytics.engine.results import Results from ultralytics.utils import DEFAULT_CFG, ops class ClassificationPredictor(BasePredictor): """ A class extending the BasePredictor class for prediction based on a classification model. Notes: - Torchvision classification models can also be passed to the 'model' argument, i.e. model='resnet18'. Example: ```python from ultralytics.utils import ASSETS from ultralytics.models.yolo.classify import ClassificationPredictor args = dict(model='yolov8n-cls.pt', source=ASSETS) predictor = ClassificationPredictor(overrides=args) predictor.predict_cli() ``` """ def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): """Initializes ClassificationPredictor setting the task to 'classify'.""" super().__init__(cfg, overrides, _callbacks) self.args.task = "classify" self._legacy_transform_name = "ultralytics.yolo.data.augment.ToTensor" def preprocess(self, img): """Converts input image to model-compatible data type.""" if not isinstance(img, torch.Tensor): is_legacy_transform = any( self._legacy_transform_name in str(transform) for transform in self.transforms.transforms ) if is_legacy_transform: # to handle legacy transforms img = torch.stack([self.transforms(im) for im in img], dim=0) else: img = torch.stack( [self.transforms(Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))) for im in img], dim=0 ) img = (img if isinstance(img, torch.Tensor) else torch.from_numpy(img)).to(self.model.device) return img.half() if self.model.fp16 else img.float() # uint8 to fp16/32 def postprocess(self, preds, img, orig_imgs): """Post-processes predictions to return Results objects.""" if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list orig_imgs = ops.convert_torch2numpy_batch(orig_imgs) results = [] for i, pred in enumerate(preds): orig_img = orig_imgs[i] img_path = self.batch[0][i] results.append(Results(orig_img, path=img_path, names=self.model.names, probs=pred)) return results
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/classify/predict.py
Python
unknown
2,513
# Ultralytics YOLO 🚀, AGPL-3.0 license import torch import torchvision from ultralytics.data import ClassificationDataset, build_dataloader from ultralytics.engine.trainer import BaseTrainer from ultralytics.models import yolo from ultralytics.nn.tasks import ClassificationModel, attempt_load_one_weight from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK, colorstr from ultralytics.utils.plotting import plot_images, plot_results from ultralytics.utils.torch_utils import is_parallel, strip_optimizer, torch_distributed_zero_first class ClassificationTrainer(BaseTrainer): """ A class extending the BaseTrainer class for training based on a classification model. Notes: - Torchvision classification models can also be passed to the 'model' argument, i.e. model='resnet18'. Example: ```python from ultralytics.models.yolo.classify import ClassificationTrainer args = dict(model='yolov8n-cls.pt', data='imagenet10', epochs=3) trainer = ClassificationTrainer(overrides=args) trainer.train() ``` """ def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): """Initialize a ClassificationTrainer object with optional configuration overrides and callbacks.""" if overrides is None: overrides = {} overrides["task"] = "classify" if overrides.get("imgsz") is None: overrides["imgsz"] = 224 super().__init__(cfg, overrides, _callbacks) def set_model_attributes(self): """Set the YOLO model's class names from the loaded dataset.""" self.model.names = self.data["names"] def get_model(self, cfg=None, weights=None, verbose=True): """Returns a modified PyTorch model configured for training YOLO.""" model = ClassificationModel(cfg, nc=self.data["nc"], verbose=verbose and RANK == -1) if weights: model.load(weights) for m in model.modules(): if not self.args.pretrained and hasattr(m, "reset_parameters"): m.reset_parameters() if isinstance(m, torch.nn.Dropout) and self.args.dropout: m.p = self.args.dropout # set dropout for p in model.parameters(): p.requires_grad = True # for training return model def setup_model(self): """Load, create or download model for any task.""" if isinstance(self.model, torch.nn.Module): # if model is loaded beforehand. No setup needed return model, ckpt = str(self.model), None # Load a YOLO model locally, from torchvision, or from Ultralytics assets if model.endswith(".pt"): self.model, ckpt = attempt_load_one_weight(model, device="cpu") for p in self.model.parameters(): p.requires_grad = True # for training elif model.split(".")[-1] in ("yaml", "yml"): self.model = self.get_model(cfg=model) elif model in torchvision.models.__dict__: self.model = torchvision.models.__dict__[model](weights="IMAGENET1K_V1" if self.args.pretrained else None) else: FileNotFoundError(f"ERROR: model={model} not found locally or online. Please check model name.") ClassificationModel.reshape_outputs(self.model, self.data["nc"]) return ckpt def build_dataset(self, img_path, mode="train", batch=None): """Creates a ClassificationDataset instance given an image path, and mode (train/test etc.).""" return ClassificationDataset(root=img_path, args=self.args, augment=mode == "train", prefix=mode) def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode="train"): """Returns PyTorch DataLoader with transforms to preprocess images for inference.""" with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = self.build_dataset(dataset_path, mode) loader = build_dataloader(dataset, batch_size, self.args.workers, rank=rank) # Attach inference transforms if mode != "train": if is_parallel(self.model): self.model.module.transforms = loader.dataset.torch_transforms else: self.model.transforms = loader.dataset.torch_transforms return loader def preprocess_batch(self, batch): """Preprocesses a batch of images and classes.""" batch["img"] = batch["img"].to(self.device) batch["cls"] = batch["cls"].to(self.device) return batch def progress_string(self): """Returns a formatted string showing training progress.""" return ("\n" + "%11s" * (4 + len(self.loss_names))) % ( "Epoch", "GPU_mem", *self.loss_names, "Instances", "Size", ) def get_validator(self): """Returns an instance of ClassificationValidator for validation.""" self.loss_names = ["loss"] return yolo.classify.ClassificationValidator(self.test_loader, self.save_dir, _callbacks=self.callbacks) def label_loss_items(self, loss_items=None, prefix="train"): """ Returns a loss dict with labelled training loss items tensor. Not needed for classification but necessary for segmentation & detection """ keys = [f"{prefix}/{x}" for x in self.loss_names] if loss_items is None: return keys loss_items = [round(float(loss_items), 5)] return dict(zip(keys, loss_items)) def plot_metrics(self): """Plots metrics from a CSV file.""" plot_results(file=self.csv, classify=True, on_plot=self.on_plot) # save results.png def final_eval(self): """Evaluate trained model and save validation results.""" for f in self.last, self.best: if f.exists(): strip_optimizer(f) # strip optimizers if f is self.best: LOGGER.info(f"\nValidating {f}...") self.validator.args.data = self.args.data self.validator.args.plots = self.args.plots self.metrics = self.validator(model=f) self.metrics.pop("fitness", None) self.run_callbacks("on_fit_epoch_end") LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}") def plot_training_samples(self, batch, ni): """Plots training samples with their annotations.""" plot_images( images=batch["img"], batch_idx=torch.arange(len(batch["img"])), cls=batch["cls"].view(-1), # warning: use .view(), not .squeeze() for Classify models fname=self.save_dir / f"train_batch{ni}.jpg", on_plot=self.on_plot, )
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/classify/train.py
Python
unknown
6,832
# Ultralytics YOLO 🚀, AGPL-3.0 license import torch from ultralytics.data import ClassificationDataset, build_dataloader from ultralytics.engine.validator import BaseValidator from ultralytics.utils import LOGGER from ultralytics.utils.metrics import ClassifyMetrics, ConfusionMatrix from ultralytics.utils.plotting import plot_images class ClassificationValidator(BaseValidator): """ A class extending the BaseValidator class for validation based on a classification model. Notes: - Torchvision classification models can also be passed to the 'model' argument, i.e. model='resnet18'. Example: ```python from ultralytics.models.yolo.classify import ClassificationValidator args = dict(model='yolov8n-cls.pt', data='imagenet10') validator = ClassificationValidator(args=args) validator() ``` """ def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None): """Initializes ClassificationValidator instance with args, dataloader, save_dir, and progress bar.""" super().__init__(dataloader, save_dir, pbar, args, _callbacks) self.targets = None self.pred = None self.args.task = "classify" self.metrics = ClassifyMetrics() def get_desc(self): """Returns a formatted string summarizing classification metrics.""" return ("%22s" + "%11s" * 2) % ("classes", "top1_acc", "top5_acc") def init_metrics(self, model): """Initialize confusion matrix, class names, and top-1 and top-5 accuracy.""" self.names = model.names self.nc = len(model.names) self.confusion_matrix = ConfusionMatrix(nc=self.nc, conf=self.args.conf, task="classify") self.pred = [] self.targets = [] def preprocess(self, batch): """Preprocesses input batch and returns it.""" batch["img"] = batch["img"].to(self.device, non_blocking=True) batch["img"] = batch["img"].half() if self.args.half else batch["img"].float() batch["cls"] = batch["cls"].to(self.device) return batch def update_metrics(self, preds, batch): """Updates running metrics with model predictions and batch targets.""" n5 = min(len(self.names), 5) self.pred.append(preds.argsort(1, descending=True)[:, :n5]) self.targets.append(batch["cls"]) def finalize_metrics(self, *args, **kwargs): """Finalizes metrics of the model such as confusion_matrix and speed.""" self.confusion_matrix.process_cls_preds(self.pred, self.targets) if self.args.plots: for normalize in True, False: self.confusion_matrix.plot( save_dir=self.save_dir, names=self.names.values(), normalize=normalize, on_plot=self.on_plot ) self.metrics.speed = self.speed self.metrics.confusion_matrix = self.confusion_matrix self.metrics.save_dir = self.save_dir def get_stats(self): """Returns a dictionary of metrics obtained by processing targets and predictions.""" self.metrics.process(self.targets, self.pred) return self.metrics.results_dict def build_dataset(self, img_path): """Creates and returns a ClassificationDataset instance using given image path and preprocessing parameters.""" return ClassificationDataset(root=img_path, args=self.args, augment=False, prefix=self.args.split) def get_dataloader(self, dataset_path, batch_size): """Builds and returns a data loader for classification tasks with given parameters.""" dataset = self.build_dataset(dataset_path) return build_dataloader(dataset, batch_size, self.args.workers, rank=-1) def print_results(self): """Prints evaluation metrics for YOLO object detection model.""" pf = "%22s" + "%11.3g" * len(self.metrics.keys) # print format LOGGER.info(pf % ("all", self.metrics.top1, self.metrics.top5)) def plot_val_samples(self, batch, ni): """Plot validation image samples.""" plot_images( images=batch["img"], batch_idx=torch.arange(len(batch["img"])), cls=batch["cls"].view(-1), # warning: use .view(), not .squeeze() for Classify models fname=self.save_dir / f"val_batch{ni}_labels.jpg", names=self.names, on_plot=self.on_plot, ) def plot_predictions(self, batch, preds, ni): """Plots predicted bounding boxes on input images and saves the result.""" plot_images( batch["img"], batch_idx=torch.arange(len(batch["img"])), cls=torch.argmax(preds, dim=1), fname=self.save_dir / f"val_batch{ni}_pred.jpg", names=self.names, on_plot=self.on_plot, ) # pred
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/classify/val.py
Python
unknown
4,861
# Ultralytics YOLO 🚀, AGPL-3.0 license from .predict import DetectionPredictor from .train import DetectionTrainer from .val import DetectionValidator __all__ = "DetectionPredictor", "DetectionTrainer", "DetectionValidator"
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/detect/__init__.py
Python
unknown
229
# Ultralytics YOLO 🚀, AGPL-3.0 license from ultralytics.engine.predictor import BasePredictor from ultralytics.engine.results import Results from ultralytics.utils import ops class DetectionPredictor(BasePredictor): """ A class extending the BasePredictor class for prediction based on a detection model. Example: ```python from ultralytics.utils import ASSETS from ultralytics.models.yolo.detect import DetectionPredictor args = dict(model='yolov8n.pt', source=ASSETS) predictor = DetectionPredictor(overrides=args) predictor.predict_cli() ``` """ def postprocess(self, preds, img, orig_imgs): """Post-processes predictions and returns a list of Results objects.""" preds = ops.non_max_suppression( preds, self.args.conf, self.args.iou, agnostic=self.args.agnostic_nms, max_det=self.args.max_det, classes=self.args.classes, ) if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list orig_imgs = ops.convert_torch2numpy_batch(orig_imgs) results = [] for i, pred in enumerate(preds): orig_img = orig_imgs[i] pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape) img_path = self.batch[0][i] results.append(Results(orig_img, path=img_path, names=self.model.names, boxes=pred)) return results
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/detect/predict.py
Python
unknown
1,510
# Ultralytics YOLO 🚀, AGPL-3.0 license import math import random from copy import copy import numpy as np import torch.nn as nn from ultralytics.data import build_dataloader, build_yolo_dataset from ultralytics.engine.trainer import BaseTrainer from ultralytics.models import yolo from ultralytics.nn.tasks import DetectionModel from ultralytics.utils import LOGGER, RANK from ultralytics.utils.plotting import plot_images, plot_labels, plot_results from ultralytics.utils.torch_utils import de_parallel, torch_distributed_zero_first class DetectionTrainer(BaseTrainer): """ A class extending the BaseTrainer class for training based on a detection model. Example: ```python from ultralytics.models.yolo.detect import DetectionTrainer args = dict(model='yolov8n.pt', data='coco8.yaml', epochs=3) trainer = DetectionTrainer(overrides=args) trainer.train() ``` """ def build_dataset(self, img_path, mode="train", batch=None): """ Build YOLO Dataset. Args: img_path (str): Path to the folder containing images. mode (str): `train` mode or `val` mode, users are able to customize different augmentations for each mode. batch (int, optional): Size of batches, this is for `rect`. Defaults to None. """ gs = max(int(de_parallel(self.model).stride.max() if self.model else 0), 32) return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, rect=mode == "val", stride=gs) def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode="train"): """Construct and return dataloader.""" assert mode in ["train", "val"] with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = self.build_dataset(dataset_path, mode, batch_size) shuffle = mode == "train" if getattr(dataset, "rect", False) and shuffle: LOGGER.warning("WARNING ⚠️ 'rect=True' is incompatible with DataLoader shuffle, setting shuffle=False") shuffle = False workers = self.args.workers if mode == "train" else self.args.workers * 2 return build_dataloader(dataset, batch_size, workers, shuffle, rank) # return dataloader def preprocess_batch(self, batch): """Preprocesses a batch of images by scaling and converting to float.""" batch["img"] = batch["img"].to(self.device, non_blocking=True).float() / 255 if self.args.multi_scale: imgs = batch["img"] sz = ( random.randrange(self.args.imgsz * 0.5, self.args.imgsz * 1.5 + self.stride) // self.stride * self.stride ) # size sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [ math.ceil(x * sf / self.stride) * self.stride for x in imgs.shape[2:] ] # new shape (stretched to gs-multiple) imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False) batch["img"] = imgs return batch def set_model_attributes(self): """Nl = de_parallel(self.model).model[-1].nl # number of detection layers (to scale hyps).""" # self.args.box *= 3 / nl # scale to layers # self.args.cls *= self.data["nc"] / 80 * 3 / nl # scale to classes and layers # self.args.cls *= (self.args.imgsz / 640) ** 2 * 3 / nl # scale to image size and layers self.model.nc = self.data["nc"] # attach number of classes to model self.model.names = self.data["names"] # attach class names to model self.model.args = self.args # attach hyperparameters to model # TODO: self.model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc def get_model(self, cfg=None, weights=None, verbose=True): """Return a YOLO detection model.""" model = DetectionModel(cfg, nc=self.data["nc"], verbose=verbose and RANK == -1) if weights: model.load(weights) return model def get_validator(self): """Returns a DetectionValidator for YOLO model validation.""" self.loss_names = "box_loss", "cls_loss", "dfl_loss" return yolo.detect.DetectionValidator( self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks ) def label_loss_items(self, loss_items=None, prefix="train"): """ Returns a loss dict with labelled training loss items tensor. Not needed for classification but necessary for segmentation & detection """ keys = [f"{prefix}/{x}" for x in self.loss_names] if loss_items is not None: loss_items = [round(float(x), 5) for x in loss_items] # convert tensors to 5 decimal place floats return dict(zip(keys, loss_items)) else: return keys def progress_string(self): """Returns a formatted string of training progress with epoch, GPU memory, loss, instances and size.""" return ("\n" + "%11s" * (4 + len(self.loss_names))) % ( "Epoch", "GPU_mem", *self.loss_names, "Instances", "Size", ) def plot_training_samples(self, batch, ni): """Plots training samples with their annotations.""" plot_images( images=batch["img"], batch_idx=batch["batch_idx"], cls=batch["cls"].squeeze(-1), bboxes=batch["bboxes"], paths=batch["im_file"], fname=self.save_dir / f"train_batch{ni}.jpg", on_plot=self.on_plot, ) def plot_metrics(self): """Plots metrics from a CSV file.""" plot_results(file=self.csv, on_plot=self.on_plot) # save results.png def plot_training_labels(self): """Create a labeled training plot of the YOLO model.""" boxes = np.concatenate([lb["bboxes"] for lb in self.train_loader.dataset.labels], 0) cls = np.concatenate([lb["cls"] for lb in self.train_loader.dataset.labels], 0) plot_labels(boxes, cls.squeeze(), names=self.data["names"], save_dir=self.save_dir, on_plot=self.on_plot)
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/detect/train.py
Python
unknown
6,306
# Ultralytics YOLO 🚀, AGPL-3.0 license import os from pathlib import Path import numpy as np import torch from ultralytics.data import build_dataloader, build_yolo_dataset, converter from ultralytics.engine.validator import BaseValidator from ultralytics.utils import LOGGER, ops from ultralytics.utils.checks import check_requirements from ultralytics.utils.metrics import ConfusionMatrix, DetMetrics, box_iou from ultralytics.utils.plotting import output_to_target, plot_images class DetectionValidator(BaseValidator): """ A class extending the BaseValidator class for validation based on a detection model. Example: ```python from ultralytics.models.yolo.detect import DetectionValidator args = dict(model='yolov8n.pt', data='coco8.yaml') validator = DetectionValidator(args=args) validator() ``` """ def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None): """Initialize detection model with necessary variables and settings.""" super().__init__(dataloader, save_dir, pbar, args, _callbacks) self.nt_per_class = None self.is_coco = False self.class_map = None self.args.task = "detect" self.metrics = DetMetrics(save_dir=self.save_dir, on_plot=self.on_plot) self.iouv = torch.linspace(0.5, 0.95, 10) # iou vector for mAP@0.5:0.95 self.niou = self.iouv.numel() self.lb = [] # for autolabelling def preprocess(self, batch): """Preprocesses batch of images for YOLO training.""" batch["img"] = batch["img"].to(self.device, non_blocking=True) batch["img"] = (batch["img"].half() if self.args.half else batch["img"].float()) / 255 for k in ["batch_idx", "cls", "bboxes"]: batch[k] = batch[k].to(self.device) if self.args.save_hybrid: height, width = batch["img"].shape[2:] nb = len(batch["img"]) bboxes = batch["bboxes"] * torch.tensor((width, height, width, height), device=self.device) self.lb = ( [ torch.cat([batch["cls"][batch["batch_idx"] == i], bboxes[batch["batch_idx"] == i]], dim=-1) for i in range(nb) ] if self.args.save_hybrid else [] ) # for autolabelling return batch def init_metrics(self, model): """Initialize evaluation metrics for YOLO.""" val = self.data.get(self.args.split, "") # validation path self.is_coco = isinstance(val, str) and "coco" in val and val.endswith(f"{os.sep}val2017.txt") # is COCO self.class_map = converter.coco80_to_coco91_class() if self.is_coco else list(range(1000)) self.args.save_json |= self.is_coco and not self.training # run on final val if training COCO self.names = model.names self.nc = len(model.names) self.metrics.names = self.names self.metrics.plot = self.args.plots self.confusion_matrix = ConfusionMatrix(nc=self.nc, conf=self.args.conf) self.seen = 0 self.jdict = [] self.stats = dict(tp=[], conf=[], pred_cls=[], target_cls=[]) def get_desc(self): """Return a formatted string summarizing class metrics of YOLO model.""" return ("%22s" + "%11s" * 6) % ("Class", "Images", "Instances", "Box(P", "R", "mAP50", "mAP50-95)") def postprocess(self, preds): """Apply Non-maximum suppression to prediction outputs.""" return ops.non_max_suppression( preds, self.args.conf, self.args.iou, labels=self.lb, multi_label=True, agnostic=self.args.single_cls, max_det=self.args.max_det, ) def _prepare_batch(self, si, batch): """Prepares a batch of images and annotations for validation.""" idx = batch["batch_idx"] == si cls = batch["cls"][idx].squeeze(-1) bbox = batch["bboxes"][idx] ori_shape = batch["ori_shape"][si] imgsz = batch["img"].shape[2:] ratio_pad = batch["ratio_pad"][si] if len(cls): bbox = ops.xywh2xyxy(bbox) * torch.tensor(imgsz, device=self.device)[[1, 0, 1, 0]] # target boxes ops.scale_boxes(imgsz, bbox, ori_shape, ratio_pad=ratio_pad) # native-space labels return dict(cls=cls, bbox=bbox, ori_shape=ori_shape, imgsz=imgsz, ratio_pad=ratio_pad) def _prepare_pred(self, pred, pbatch): """Prepares a batch of images and annotations for validation.""" predn = pred.clone() ops.scale_boxes( pbatch["imgsz"], predn[:, :4], pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"] ) # native-space pred return predn def update_metrics(self, preds, batch): """Metrics.""" for si, pred in enumerate(preds): self.seen += 1 npr = len(pred) stat = dict( conf=torch.zeros(0, device=self.device), pred_cls=torch.zeros(0, device=self.device), tp=torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device), ) pbatch = self._prepare_batch(si, batch) cls, bbox = pbatch.pop("cls"), pbatch.pop("bbox") nl = len(cls) stat["target_cls"] = cls if npr == 0: if nl: for k in self.stats.keys(): self.stats[k].append(stat[k]) # TODO: obb has not supported confusion_matrix yet. if self.args.plots and self.args.task != "obb": self.confusion_matrix.process_batch(detections=None, gt_bboxes=bbox, gt_cls=cls) continue # Predictions if self.args.single_cls: pred[:, 5] = 0 predn = self._prepare_pred(pred, pbatch) stat["conf"] = predn[:, 4] stat["pred_cls"] = predn[:, 5] # Evaluate if nl: stat["tp"] = self._process_batch(predn, bbox, cls) # TODO: obb has not supported confusion_matrix yet. if self.args.plots and self.args.task != "obb": self.confusion_matrix.process_batch(predn, bbox, cls) for k in self.stats.keys(): self.stats[k].append(stat[k]) # Save if self.args.save_json: self.pred_to_json(predn, batch["im_file"][si]) if self.args.save_txt: file = self.save_dir / "labels" / f'{Path(batch["im_file"][si]).stem}.txt' self.save_one_txt(predn, self.args.save_conf, pbatch["ori_shape"], file) def finalize_metrics(self, *args, **kwargs): """Set final values for metrics speed and confusion matrix.""" self.metrics.speed = self.speed self.metrics.confusion_matrix = self.confusion_matrix def get_stats(self): """Returns metrics statistics and results dictionary.""" stats = {k: torch.cat(v, 0).cpu().numpy() for k, v in self.stats.items()} # to numpy if len(stats) and stats["tp"].any(): self.metrics.process(**stats) self.nt_per_class = np.bincount( stats["target_cls"].astype(int), minlength=self.nc ) # number of targets per class return self.metrics.results_dict def print_results(self): """Prints training/validation set metrics per class.""" pf = "%22s" + "%11i" * 2 + "%11.3g" * len(self.metrics.keys) # print format LOGGER.info(pf % ("all", self.seen, self.nt_per_class.sum(), *self.metrics.mean_results())) if self.nt_per_class.sum() == 0: LOGGER.warning(f"WARNING ⚠️ no labels found in {self.args.task} set, can not compute metrics without labels") # Print results per class if self.args.verbose and not self.training and self.nc > 1 and len(self.stats): for i, c in enumerate(self.metrics.ap_class_index): LOGGER.info(pf % (self.names[c], self.seen, self.nt_per_class[c], *self.metrics.class_result(i))) if self.args.plots: for normalize in True, False: self.confusion_matrix.plot( save_dir=self.save_dir, names=self.names.values(), normalize=normalize, on_plot=self.on_plot ) def _process_batch(self, detections, gt_bboxes, gt_cls): """ Return correct prediction matrix. Args: detections (torch.Tensor): Tensor of shape [N, 6] representing detections. Each detection is of the format: x1, y1, x2, y2, conf, class. labels (torch.Tensor): Tensor of shape [M, 5] representing labels. Each label is of the format: class, x1, y1, x2, y2. Returns: (torch.Tensor): Correct prediction matrix of shape [N, 10] for 10 IoU levels. """ iou = box_iou(gt_bboxes, detections[:, :4]) return self.match_predictions(detections[:, 5], gt_cls, iou) def build_dataset(self, img_path, mode="val", batch=None): """ Build YOLO Dataset. Args: img_path (str): Path to the folder containing images. mode (str): `train` mode or `val` mode, users are able to customize different augmentations for each mode. batch (int, optional): Size of batches, this is for `rect`. Defaults to None. """ return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, stride=self.stride) def get_dataloader(self, dataset_path, batch_size): """Construct and return dataloader.""" dataset = self.build_dataset(dataset_path, batch=batch_size, mode="val") return build_dataloader(dataset, batch_size, self.args.workers, shuffle=False, rank=-1) # return dataloader def plot_val_samples(self, batch, ni): """Plot validation image samples.""" plot_images( batch["img"], batch["batch_idx"], batch["cls"].squeeze(-1), batch["bboxes"], paths=batch["im_file"], fname=self.save_dir / f"val_batch{ni}_labels.jpg", names=self.names, on_plot=self.on_plot, ) def plot_predictions(self, batch, preds, ni): """Plots predicted bounding boxes on input images and saves the result.""" plot_images( batch["img"], *output_to_target(preds, max_det=self.args.max_det), paths=batch["im_file"], fname=self.save_dir / f"val_batch{ni}_pred.jpg", names=self.names, on_plot=self.on_plot, ) # pred def save_one_txt(self, predn, save_conf, shape, file): """Save YOLO detections to a txt file in normalized coordinates in a specific format.""" gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (ops.xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(file, "a") as f: f.write(("%g " * len(line)).rstrip() % line + "\n") def pred_to_json(self, predn, filename): """Serialize YOLO predictions to COCO json format.""" stem = Path(filename).stem image_id = int(stem) if stem.isnumeric() else stem box = ops.xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(predn.tolist(), box.tolist()): self.jdict.append( { "image_id": image_id, "category_id": self.class_map[int(p[5])], "bbox": [round(x, 3) for x in b], "score": round(p[4], 5), } ) def eval_json(self, stats): """Evaluates YOLO output in JSON format and returns performance statistics.""" if self.args.save_json and self.is_coco and len(self.jdict): anno_json = self.data["path"] / "annotations/instances_val2017.json" # annotations pred_json = self.save_dir / "predictions.json" # predictions LOGGER.info(f"\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...") try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb check_requirements("pycocotools>=2.0.6") from pycocotools.coco import COCO # noqa from pycocotools.cocoeval import COCOeval # noqa for x in anno_json, pred_json: assert x.is_file(), f"{x} file not found" anno = COCO(str(anno_json)) # init annotations api pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path) eval = COCOeval(anno, pred, "bbox") if self.is_coco: eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # images to eval eval.evaluate() eval.accumulate() eval.summarize() stats[self.metrics.keys[-1]], stats[self.metrics.keys[-2]] = eval.stats[:2] # update mAP50-95 and mAP50 except Exception as e: LOGGER.warning(f"pycocotools unable to run: {e}") return stats
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/detect/val.py
Python
unknown
13,591
# Ultralytics YOLO 🚀, AGPL-3.0 license from ultralytics.engine.model import Model from ultralytics.models import yolo from ultralytics.nn.tasks import ClassificationModel, DetectionModel, OBBModel, PoseModel, SegmentationModel class YOLO(Model): """YOLO (You Only Look Once) object detection model.""" @property def task_map(self): """Map head to model, trainer, validator, and predictor classes.""" return { "classify": { "model": ClassificationModel, "trainer": yolo.classify.ClassificationTrainer, "validator": yolo.classify.ClassificationValidator, "predictor": yolo.classify.ClassificationPredictor, }, "detect": { "model": DetectionModel, "trainer": yolo.detect.DetectionTrainer, "validator": yolo.detect.DetectionValidator, "predictor": yolo.detect.DetectionPredictor, }, "segment": { "model": SegmentationModel, "trainer": yolo.segment.SegmentationTrainer, "validator": yolo.segment.SegmentationValidator, "predictor": yolo.segment.SegmentationPredictor, }, "pose": { "model": PoseModel, "trainer": yolo.pose.PoseTrainer, "validator": yolo.pose.PoseValidator, "predictor": yolo.pose.PosePredictor, }, "obb": { "model": OBBModel, "trainer": yolo.obb.OBBTrainer, "validator": yolo.obb.OBBValidator, "predictor": yolo.obb.OBBPredictor, }, }
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/model.py
Python
unknown
1,729
# Ultralytics YOLO 🚀, AGPL-3.0 license from .predict import OBBPredictor from .train import OBBTrainer from .val import OBBValidator __all__ = "OBBPredictor", "OBBTrainer", "OBBValidator"
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/obb/__init__.py
Python
unknown
193
# Ultralytics YOLO 🚀, AGPL-3.0 license import torch from ultralytics.engine.results import Results from ultralytics.models.yolo.detect.predict import DetectionPredictor from ultralytics.utils import DEFAULT_CFG, ops class OBBPredictor(DetectionPredictor): """ A class extending the DetectionPredictor class for prediction based on an Oriented Bounding Box (OBB) model. Example: ```python from ultralytics.utils import ASSETS from ultralytics.models.yolo.obb import OBBPredictor args = dict(model='yolov8n-obb.pt', source=ASSETS) predictor = OBBPredictor(overrides=args) predictor.predict_cli() ``` """ def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): """Initializes OBBPredictor with optional model and data configuration overrides.""" super().__init__(cfg, overrides, _callbacks) self.args.task = "obb" def postprocess(self, preds, img, orig_imgs): """Post-processes predictions and returns a list of Results objects.""" preds = ops.non_max_suppression( preds, self.args.conf, self.args.iou, agnostic=self.args.agnostic_nms, max_det=self.args.max_det, nc=len(self.model.names), classes=self.args.classes, rotated=True, ) if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list orig_imgs = ops.convert_torch2numpy_batch(orig_imgs) results = [] for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0]): rboxes = ops.regularize_rboxes(torch.cat([pred[:, :4], pred[:, -1:]], dim=-1)) rboxes[:, :4] = ops.scale_boxes(img.shape[2:], rboxes[:, :4], orig_img.shape, xywh=True) # xywh, r, conf, cls obb = torch.cat([rboxes, pred[:, 4:6]], dim=-1) results.append(Results(orig_img, path=img_path, names=self.model.names, obb=obb)) return results
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/obb/predict.py
Python
unknown
2,037
# Ultralytics YOLO 🚀, AGPL-3.0 license from copy import copy from ultralytics.models import yolo from ultralytics.nn.tasks import OBBModel from ultralytics.utils import DEFAULT_CFG, RANK class OBBTrainer(yolo.detect.DetectionTrainer): """ A class extending the DetectionTrainer class for training based on an Oriented Bounding Box (OBB) model. Example: ```python from ultralytics.models.yolo.obb import OBBTrainer args = dict(model='yolov8n-seg.pt', data='coco8-seg.yaml', epochs=3) trainer = OBBTrainer(overrides=args) trainer.train() ``` """ def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): """Initialize a OBBTrainer object with given arguments.""" if overrides is None: overrides = {} overrides["task"] = "obb" super().__init__(cfg, overrides, _callbacks) def get_model(self, cfg=None, weights=None, verbose=True): """Return OBBModel initialized with specified config and weights.""" model = OBBModel(cfg, ch=3, nc=self.data["nc"], verbose=verbose and RANK == -1) if weights: model.load(weights) return model def get_validator(self): """Return an instance of OBBValidator for validation of YOLO model.""" self.loss_names = "box_loss", "cls_loss", "dfl_loss" return yolo.obb.OBBValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args))
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/obb/train.py
Python
unknown
1,477
# Ultralytics YOLO 🚀, AGPL-3.0 license from pathlib import Path import torch from ultralytics.models.yolo.detect import DetectionValidator from ultralytics.utils import LOGGER, ops from ultralytics.utils.metrics import OBBMetrics, batch_probiou from ultralytics.utils.plotting import output_to_rotated_target, plot_images class OBBValidator(DetectionValidator): """ A class extending the DetectionValidator class for validation based on an Oriented Bounding Box (OBB) model. Example: ```python from ultralytics.models.yolo.obb import OBBValidator args = dict(model='yolov8n-obb.pt', data='dota8.yaml') validator = OBBValidator(args=args) validator(model=args['model']) ``` """ def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None): """Initialize OBBValidator and set task to 'obb', metrics to OBBMetrics.""" super().__init__(dataloader, save_dir, pbar, args, _callbacks) self.args.task = "obb" self.metrics = OBBMetrics(save_dir=self.save_dir, plot=True, on_plot=self.on_plot) def init_metrics(self, model): """Initialize evaluation metrics for YOLO.""" super().init_metrics(model) val = self.data.get(self.args.split, "") # validation path self.is_dota = isinstance(val, str) and "DOTA" in val # is COCO def postprocess(self, preds): """Apply Non-maximum suppression to prediction outputs.""" return ops.non_max_suppression( preds, self.args.conf, self.args.iou, labels=self.lb, nc=self.nc, multi_label=True, agnostic=self.args.single_cls, max_det=self.args.max_det, rotated=True, ) def _process_batch(self, detections, gt_bboxes, gt_cls): """ Return correct prediction matrix. Args: detections (torch.Tensor): Tensor of shape [N, 6] representing detections. Each detection is of the format: x1, y1, x2, y2, conf, class. labels (torch.Tensor): Tensor of shape [M, 5] representing labels. Each label is of the format: class, x1, y1, x2, y2. Returns: (torch.Tensor): Correct prediction matrix of shape [N, 10] for 10 IoU levels. """ iou = batch_probiou(gt_bboxes, torch.cat([detections[:, :4], detections[:, -1:]], dim=-1)) return self.match_predictions(detections[:, 5], gt_cls, iou) def _prepare_batch(self, si, batch): """Prepares and returns a batch for OBB validation.""" idx = batch["batch_idx"] == si cls = batch["cls"][idx].squeeze(-1) bbox = batch["bboxes"][idx] ori_shape = batch["ori_shape"][si] imgsz = batch["img"].shape[2:] ratio_pad = batch["ratio_pad"][si] if len(cls): bbox[..., :4].mul_(torch.tensor(imgsz, device=self.device)[[1, 0, 1, 0]]) # target boxes ops.scale_boxes(imgsz, bbox, ori_shape, ratio_pad=ratio_pad, xywh=True) # native-space labels return dict(cls=cls, bbox=bbox, ori_shape=ori_shape, imgsz=imgsz, ratio_pad=ratio_pad) def _prepare_pred(self, pred, pbatch): """Prepares and returns a batch for OBB validation with scaled and padded bounding boxes.""" predn = pred.clone() ops.scale_boxes( pbatch["imgsz"], predn[:, :4], pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"], xywh=True ) # native-space pred return predn def plot_predictions(self, batch, preds, ni): """Plots predicted bounding boxes on input images and saves the result.""" plot_images( batch["img"], *output_to_rotated_target(preds, max_det=self.args.max_det), paths=batch["im_file"], fname=self.save_dir / f"val_batch{ni}_pred.jpg", names=self.names, on_plot=self.on_plot, ) # pred def pred_to_json(self, predn, filename): """Serialize YOLO predictions to COCO json format.""" stem = Path(filename).stem image_id = int(stem) if stem.isnumeric() else stem rbox = torch.cat([predn[:, :4], predn[:, -1:]], dim=-1) poly = ops.xywhr2xyxyxyxy(rbox).view(-1, 8) for i, (r, b) in enumerate(zip(rbox.tolist(), poly.tolist())): self.jdict.append( { "image_id": image_id, "category_id": self.class_map[int(predn[i, 5].item())], "score": round(predn[i, 4].item(), 5), "rbox": [round(x, 3) for x in r], "poly": [round(x, 3) for x in b], } ) def save_one_txt(self, predn, save_conf, shape, file): """Save YOLO detections to a txt file in normalized coordinates in a specific format.""" gn = torch.tensor(shape)[[1, 0]] # normalization gain whwh for *xywh, conf, cls, angle in predn.tolist(): xywha = torch.tensor([*xywh, angle]).view(1, 5) xyxyxyxy = (ops.xywhr2xyxyxyxy(xywha) / gn).view(-1).tolist() # normalized xywh line = (cls, *xyxyxyxy, conf) if save_conf else (cls, *xyxyxyxy) # label format with open(file, "a") as f: f.write(("%g " * len(line)).rstrip() % line + "\n") def eval_json(self, stats): """Evaluates YOLO output in JSON format and returns performance statistics.""" if self.args.save_json and self.is_dota and len(self.jdict): import json import re from collections import defaultdict pred_json = self.save_dir / "predictions.json" # predictions pred_txt = self.save_dir / "predictions_txt" # predictions pred_txt.mkdir(parents=True, exist_ok=True) data = json.load(open(pred_json)) # Save split results LOGGER.info(f"Saving predictions with DOTA format to {pred_txt}...") for d in data: image_id = d["image_id"] score = d["score"] classname = self.names[d["category_id"]].replace(" ", "-") p = d["poly"] with open(f'{pred_txt / f"Task1_{classname}"}.txt', "a") as f: f.writelines(f"{image_id} {score} {p[0]} {p[1]} {p[2]} {p[3]} {p[4]} {p[5]} {p[6]} {p[7]}\n") # Save merged results, this could result slightly lower map than using official merging script, # because of the probiou calculation. pred_merged_txt = self.save_dir / "predictions_merged_txt" # predictions pred_merged_txt.mkdir(parents=True, exist_ok=True) merged_results = defaultdict(list) LOGGER.info(f"Saving merged predictions with DOTA format to {pred_merged_txt}...") for d in data: image_id = d["image_id"].split("__")[0] pattern = re.compile(r"\d+___\d+") x, y = (int(c) for c in re.findall(pattern, d["image_id"])[0].split("___")) bbox, score, cls = d["rbox"], d["score"], d["category_id"] bbox[0] += x bbox[1] += y bbox.extend([score, cls]) merged_results[image_id].append(bbox) for image_id, bbox in merged_results.items(): bbox = torch.tensor(bbox) max_wh = torch.max(bbox[:, :2]).item() * 2 c = bbox[:, 6:7] * max_wh # classes scores = bbox[:, 5] # scores b = bbox[:, :5].clone() b[:, :2] += c # 0.3 could get results close to the ones from official merging script, even slightly better. i = ops.nms_rotated(b, scores, 0.3) bbox = bbox[i] b = ops.xywhr2xyxyxyxy(bbox[:, :5]).view(-1, 8) for x in torch.cat([b, bbox[:, 5:7]], dim=-1).tolist(): classname = self.names[int(x[-1])].replace(" ", "-") p = [round(i, 3) for i in x[:-2]] # poly score = round(x[-2], 3) with open(f'{pred_merged_txt / f"Task1_{classname}"}.txt', "a") as f: f.writelines(f"{image_id} {score} {p[0]} {p[1]} {p[2]} {p[3]} {p[4]} {p[5]} {p[6]} {p[7]}\n") return stats
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/obb/val.py
Python
unknown
8,409
# Ultralytics YOLO 🚀, AGPL-3.0 license from .predict import PosePredictor from .train import PoseTrainer from .val import PoseValidator __all__ = "PoseTrainer", "PoseValidator", "PosePredictor"
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/pose/__init__.py
Python
unknown
199
# Ultralytics YOLO 🚀, AGPL-3.0 license from ultralytics.engine.results import Results from ultralytics.models.yolo.detect.predict import DetectionPredictor from ultralytics.utils import DEFAULT_CFG, LOGGER, ops class PosePredictor(DetectionPredictor): """ A class extending the DetectionPredictor class for prediction based on a pose model. Example: ```python from ultralytics.utils import ASSETS from ultralytics.models.yolo.pose import PosePredictor args = dict(model='yolov8n-pose.pt', source=ASSETS) predictor = PosePredictor(overrides=args) predictor.predict_cli() ``` """ def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): """Initializes PosePredictor, sets task to 'pose' and logs a warning for using 'mps' as device.""" super().__init__(cfg, overrides, _callbacks) self.args.task = "pose" if isinstance(self.args.device, str) and self.args.device.lower() == "mps": LOGGER.warning( "WARNING ⚠️ Apple MPS known Pose bug. Recommend 'device=cpu' for Pose models. " "See https://github.com/ultralytics/ultralytics/issues/4031." ) def postprocess(self, preds, img, orig_imgs): """Return detection results for a given input image or list of images.""" preds = ops.non_max_suppression( preds, self.args.conf, self.args.iou, agnostic=self.args.agnostic_nms, max_det=self.args.max_det, classes=self.args.classes, nc=len(self.model.names), ) if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list orig_imgs = ops.convert_torch2numpy_batch(orig_imgs) results = [] for i, pred in enumerate(preds): orig_img = orig_imgs[i] pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape).round() pred_kpts = pred[:, 6:].view(len(pred), *self.model.kpt_shape) if len(pred) else pred[:, 6:] pred_kpts = ops.scale_coords(img.shape[2:], pred_kpts, orig_img.shape) img_path = self.batch[0][i] results.append( Results(orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6], keypoints=pred_kpts) ) return results
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/pose/predict.py
Python
unknown
2,404
# Ultralytics YOLO 🚀, AGPL-3.0 license from copy import copy from ultralytics.models import yolo from ultralytics.nn.tasks import PoseModel from ultralytics.utils import DEFAULT_CFG, LOGGER from ultralytics.utils.plotting import plot_images, plot_results class PoseTrainer(yolo.detect.DetectionTrainer): """ A class extending the DetectionTrainer class for training based on a pose model. Example: ```python from ultralytics.models.yolo.pose import PoseTrainer args = dict(model='yolov8n-pose.pt', data='coco8-pose.yaml', epochs=3) trainer = PoseTrainer(overrides=args) trainer.train() ``` """ def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): """Initialize a PoseTrainer object with specified configurations and overrides.""" if overrides is None: overrides = {} overrides["task"] = "pose" super().__init__(cfg, overrides, _callbacks) if isinstance(self.args.device, str) and self.args.device.lower() == "mps": LOGGER.warning( "WARNING ⚠️ Apple MPS known Pose bug. Recommend 'device=cpu' for Pose models. " "See https://github.com/ultralytics/ultralytics/issues/4031." ) def get_model(self, cfg=None, weights=None, verbose=True): """Get pose estimation model with specified configuration and weights.""" model = PoseModel(cfg, ch=3, nc=self.data["nc"], data_kpt_shape=self.data["kpt_shape"], verbose=verbose) if weights: model.load(weights) return model def set_model_attributes(self): """Sets keypoints shape attribute of PoseModel.""" super().set_model_attributes() self.model.kpt_shape = self.data["kpt_shape"] def get_validator(self): """Returns an instance of the PoseValidator class for validation.""" self.loss_names = "box_loss", "pose_loss", "kobj_loss", "cls_loss", "dfl_loss" return yolo.pose.PoseValidator( self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks ) def plot_training_samples(self, batch, ni): """Plot a batch of training samples with annotated class labels, bounding boxes, and keypoints.""" images = batch["img"] kpts = batch["keypoints"] cls = batch["cls"].squeeze(-1) bboxes = batch["bboxes"] paths = batch["im_file"] batch_idx = batch["batch_idx"] plot_images( images, batch_idx, cls, bboxes, kpts=kpts, paths=paths, fname=self.save_dir / f"train_batch{ni}.jpg", on_plot=self.on_plot, ) def plot_metrics(self): """Plots training/val metrics.""" plot_results(file=self.csv, pose=True, on_plot=self.on_plot) # save results.png
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/pose/train.py
Python
unknown
2,926
# Ultralytics YOLO 🚀, AGPL-3.0 license from pathlib import Path import numpy as np import torch from ultralytics.models.yolo.detect import DetectionValidator from ultralytics.utils import LOGGER, ops from ultralytics.utils.checks import check_requirements from ultralytics.utils.metrics import OKS_SIGMA, PoseMetrics, box_iou, kpt_iou from ultralytics.utils.plotting import output_to_target, plot_images class PoseValidator(DetectionValidator): """ A class extending the DetectionValidator class for validation based on a pose model. Example: ```python from ultralytics.models.yolo.pose import PoseValidator args = dict(model='yolov8n-pose.pt', data='coco8-pose.yaml') validator = PoseValidator(args=args) validator() ``` """ def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None): """Initialize a 'PoseValidator' object with custom parameters and assigned attributes.""" super().__init__(dataloader, save_dir, pbar, args, _callbacks) self.sigma = None self.kpt_shape = None self.args.task = "pose" self.metrics = PoseMetrics(save_dir=self.save_dir, on_plot=self.on_plot) if isinstance(self.args.device, str) and self.args.device.lower() == "mps": LOGGER.warning( "WARNING ⚠️ Apple MPS known Pose bug. Recommend 'device=cpu' for Pose models. " "See https://github.com/ultralytics/ultralytics/issues/4031." ) def preprocess(self, batch): """Preprocesses the batch by converting the 'keypoints' data into a float and moving it to the device.""" batch = super().preprocess(batch) batch["keypoints"] = batch["keypoints"].to(self.device).float() return batch def get_desc(self): """Returns description of evaluation metrics in string format.""" return ("%22s" + "%11s" * 10) % ( "Class", "Images", "Instances", "Box(P", "R", "mAP50", "mAP50-95)", "Pose(P", "R", "mAP50", "mAP50-95)", ) def postprocess(self, preds): """Apply non-maximum suppression and return detections with high confidence scores.""" return ops.non_max_suppression( preds, self.args.conf, self.args.iou, labels=self.lb, multi_label=True, agnostic=self.args.single_cls, max_det=self.args.max_det, nc=self.nc, ) def init_metrics(self, model): """Initiate pose estimation metrics for YOLO model.""" super().init_metrics(model) self.kpt_shape = self.data["kpt_shape"] is_pose = self.kpt_shape == [17, 3] nkpt = self.kpt_shape[0] self.sigma = OKS_SIGMA if is_pose else np.ones(nkpt) / nkpt self.stats = dict(tp_p=[], tp=[], conf=[], pred_cls=[], target_cls=[]) def _prepare_batch(self, si, batch): """Prepares a batch for processing by converting keypoints to float and moving to device.""" pbatch = super()._prepare_batch(si, batch) kpts = batch["keypoints"][batch["batch_idx"] == si] h, w = pbatch["imgsz"] kpts = kpts.clone() kpts[..., 0] *= w kpts[..., 1] *= h kpts = ops.scale_coords(pbatch["imgsz"], kpts, pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"]) pbatch["kpts"] = kpts return pbatch def _prepare_pred(self, pred, pbatch): """Prepares and scales keypoints in a batch for pose processing.""" predn = super()._prepare_pred(pred, pbatch) nk = pbatch["kpts"].shape[1] pred_kpts = predn[:, 6:].view(len(predn), nk, -1) ops.scale_coords(pbatch["imgsz"], pred_kpts, pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"]) return predn, pred_kpts def update_metrics(self, preds, batch): """Metrics.""" for si, pred in enumerate(preds): self.seen += 1 npr = len(pred) stat = dict( conf=torch.zeros(0, device=self.device), pred_cls=torch.zeros(0, device=self.device), tp=torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device), tp_p=torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device), ) pbatch = self._prepare_batch(si, batch) cls, bbox = pbatch.pop("cls"), pbatch.pop("bbox") nl = len(cls) stat["target_cls"] = cls if npr == 0: if nl: for k in self.stats.keys(): self.stats[k].append(stat[k]) if self.args.plots: self.confusion_matrix.process_batch(detections=None, gt_bboxes=bbox, gt_cls=cls) continue # Predictions if self.args.single_cls: pred[:, 5] = 0 predn, pred_kpts = self._prepare_pred(pred, pbatch) stat["conf"] = predn[:, 4] stat["pred_cls"] = predn[:, 5] # Evaluate if nl: stat["tp"] = self._process_batch(predn, bbox, cls) stat["tp_p"] = self._process_batch(predn, bbox, cls, pred_kpts, pbatch["kpts"]) if self.args.plots: self.confusion_matrix.process_batch(predn, bbox, cls) for k in self.stats.keys(): self.stats[k].append(stat[k]) # Save if self.args.save_json: self.pred_to_json(predn, batch["im_file"][si]) # if self.args.save_txt: # save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') def _process_batch(self, detections, gt_bboxes, gt_cls, pred_kpts=None, gt_kpts=None): """ Return correct prediction matrix. Args: detections (torch.Tensor): Tensor of shape [N, 6] representing detections. Each detection is of the format: x1, y1, x2, y2, conf, class. labels (torch.Tensor): Tensor of shape [M, 5] representing labels. Each label is of the format: class, x1, y1, x2, y2. pred_kpts (torch.Tensor, optional): Tensor of shape [N, 51] representing predicted keypoints. 51 corresponds to 17 keypoints each with 3 values. gt_kpts (torch.Tensor, optional): Tensor of shape [N, 51] representing ground truth keypoints. Returns: torch.Tensor: Correct prediction matrix of shape [N, 10] for 10 IoU levels. """ if pred_kpts is not None and gt_kpts is not None: # `0.53` is from https://github.com/jin-s13/xtcocoapi/blob/master/xtcocotools/cocoeval.py#L384 area = ops.xyxy2xywh(gt_bboxes)[:, 2:].prod(1) * 0.53 iou = kpt_iou(gt_kpts, pred_kpts, sigma=self.sigma, area=area) else: # boxes iou = box_iou(gt_bboxes, detections[:, :4]) return self.match_predictions(detections[:, 5], gt_cls, iou) def plot_val_samples(self, batch, ni): """Plots and saves validation set samples with predicted bounding boxes and keypoints.""" plot_images( batch["img"], batch["batch_idx"], batch["cls"].squeeze(-1), batch["bboxes"], kpts=batch["keypoints"], paths=batch["im_file"], fname=self.save_dir / f"val_batch{ni}_labels.jpg", names=self.names, on_plot=self.on_plot, ) def plot_predictions(self, batch, preds, ni): """Plots predictions for YOLO model.""" pred_kpts = torch.cat([p[:, 6:].view(-1, *self.kpt_shape) for p in preds], 0) plot_images( batch["img"], *output_to_target(preds, max_det=self.args.max_det), kpts=pred_kpts, paths=batch["im_file"], fname=self.save_dir / f"val_batch{ni}_pred.jpg", names=self.names, on_plot=self.on_plot, ) # pred def pred_to_json(self, predn, filename): """Converts YOLO predictions to COCO JSON format.""" stem = Path(filename).stem image_id = int(stem) if stem.isnumeric() else stem box = ops.xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(predn.tolist(), box.tolist()): self.jdict.append( { "image_id": image_id, "category_id": self.class_map[int(p[5])], "bbox": [round(x, 3) for x in b], "keypoints": p[6:], "score": round(p[4], 5), } ) def eval_json(self, stats): """Evaluates object detection model using COCO JSON format.""" if self.args.save_json and self.is_coco and len(self.jdict): anno_json = self.data["path"] / "annotations/person_keypoints_val2017.json" # annotations pred_json = self.save_dir / "predictions.json" # predictions LOGGER.info(f"\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...") try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb check_requirements("pycocotools>=2.0.6") from pycocotools.coco import COCO # noqa from pycocotools.cocoeval import COCOeval # noqa for x in anno_json, pred_json: assert x.is_file(), f"{x} file not found" anno = COCO(str(anno_json)) # init annotations api pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path) for i, eval in enumerate([COCOeval(anno, pred, "bbox"), COCOeval(anno, pred, "keypoints")]): if self.is_coco: eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # im to eval eval.evaluate() eval.accumulate() eval.summarize() idx = i * 4 + 2 stats[self.metrics.keys[idx + 1]], stats[self.metrics.keys[idx]] = eval.stats[ :2 ] # update mAP50-95 and mAP50 except Exception as e: LOGGER.warning(f"pycocotools unable to run: {e}") return stats
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/pose/val.py
Python
unknown
10,607
# Ultralytics YOLO 🚀, AGPL-3.0 license from .predict import SegmentationPredictor from .train import SegmentationTrainer from .val import SegmentationValidator __all__ = "SegmentationPredictor", "SegmentationTrainer", "SegmentationValidator"
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/segment/__init__.py
Python
unknown
247
# Ultralytics YOLO 🚀, AGPL-3.0 license from ultralytics.engine.results import Results from ultralytics.models.yolo.detect.predict import DetectionPredictor from ultralytics.utils import DEFAULT_CFG, ops class SegmentationPredictor(DetectionPredictor): """ A class extending the DetectionPredictor class for prediction based on a segmentation model. Example: ```python from ultralytics.utils import ASSETS from ultralytics.models.yolo.segment import SegmentationPredictor args = dict(model='yolov8n-seg.pt', source=ASSETS) predictor = SegmentationPredictor(overrides=args) predictor.predict_cli() ``` """ def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): """Initializes the SegmentationPredictor with the provided configuration, overrides, and callbacks.""" super().__init__(cfg, overrides, _callbacks) self.args.task = "segment" def postprocess(self, preds, img, orig_imgs): """Applies non-max suppression and processes detections for each image in an input batch.""" p = ops.non_max_suppression( preds[0], self.args.conf, self.args.iou, agnostic=self.args.agnostic_nms, max_det=self.args.max_det, nc=len(self.model.names), classes=self.args.classes, ) if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list orig_imgs = ops.convert_torch2numpy_batch(orig_imgs) results = [] proto = preds[1][-1] if isinstance(preds[1], tuple) else preds[1] # tuple if PyTorch model or array if exported for i, pred in enumerate(p): orig_img = orig_imgs[i] img_path = self.batch[0][i] if not len(pred): # save empty boxes masks = None elif self.args.retina_masks: pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape) masks = ops.process_mask_native(proto[i], pred[:, 6:], pred[:, :4], orig_img.shape[:2]) # HWC else: masks = ops.process_mask(proto[i], pred[:, 6:], pred[:, :4], img.shape[2:], upsample=True) # HWC pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape) results.append(Results(orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6], masks=masks)) return results
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/segment/predict.py
Python
unknown
2,491
# Ultralytics YOLO 🚀, AGPL-3.0 license from copy import copy from ultralytics.models import yolo from ultralytics.nn.tasks import SegmentationModel from ultralytics.utils import DEFAULT_CFG, RANK from ultralytics.utils.plotting import plot_images, plot_results class SegmentationTrainer(yolo.detect.DetectionTrainer): """ A class extending the DetectionTrainer class for training based on a segmentation model. Example: ```python from ultralytics.models.yolo.segment import SegmentationTrainer args = dict(model='yolov8n-seg.pt', data='coco8-seg.yaml', epochs=3) trainer = SegmentationTrainer(overrides=args) trainer.train() ``` """ def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None): """Initialize a SegmentationTrainer object with given arguments.""" if overrides is None: overrides = {} overrides["task"] = "segment" super().__init__(cfg, overrides, _callbacks) def get_model(self, cfg=None, weights=None, verbose=True): """Return SegmentationModel initialized with specified config and weights.""" model = SegmentationModel(cfg, ch=3, nc=self.data["nc"], verbose=verbose and RANK == -1) if weights: model.load(weights) return model def get_validator(self): """Return an instance of SegmentationValidator for validation of YOLO model.""" self.loss_names = "box_loss", "seg_loss", "cls_loss", "dfl_loss" return yolo.segment.SegmentationValidator( self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks ) def plot_training_samples(self, batch, ni): """Creates a plot of training sample images with labels and box coordinates.""" plot_images( batch["img"], batch["batch_idx"], batch["cls"].squeeze(-1), batch["bboxes"], masks=batch["masks"], paths=batch["im_file"], fname=self.save_dir / f"train_batch{ni}.jpg", on_plot=self.on_plot, ) def plot_metrics(self): """Plots training/val metrics.""" plot_results(file=self.csv, segment=True, on_plot=self.on_plot) # save results.png
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/segment/train.py
Python
unknown
2,298
# Ultralytics YOLO 🚀, AGPL-3.0 license from multiprocessing.pool import ThreadPool from pathlib import Path import numpy as np import torch import torch.nn.functional as F from ultralytics.models.yolo.detect import DetectionValidator from ultralytics.utils import LOGGER, NUM_THREADS, ops from ultralytics.utils.checks import check_requirements from ultralytics.utils.metrics import SegmentMetrics, box_iou, mask_iou from ultralytics.utils.plotting import output_to_target, plot_images class SegmentationValidator(DetectionValidator): """ A class extending the DetectionValidator class for validation based on a segmentation model. Example: ```python from ultralytics.models.yolo.segment import SegmentationValidator args = dict(model='yolov8n-seg.pt', data='coco8-seg.yaml') validator = SegmentationValidator(args=args) validator() ``` """ def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None): """Initialize SegmentationValidator and set task to 'segment', metrics to SegmentMetrics.""" super().__init__(dataloader, save_dir, pbar, args, _callbacks) self.plot_masks = None self.process = None self.args.task = "segment" self.metrics = SegmentMetrics(save_dir=self.save_dir, on_plot=self.on_plot) def preprocess(self, batch): """Preprocesses batch by converting masks to float and sending to device.""" batch = super().preprocess(batch) batch["masks"] = batch["masks"].to(self.device).float() return batch def init_metrics(self, model): """Initialize metrics and select mask processing function based on save_json flag.""" super().init_metrics(model) self.plot_masks = [] if self.args.save_json: check_requirements("pycocotools>=2.0.6") self.process = ops.process_mask_upsample # more accurate else: self.process = ops.process_mask # faster self.stats = dict(tp_m=[], tp=[], conf=[], pred_cls=[], target_cls=[]) def get_desc(self): """Return a formatted description of evaluation metrics.""" return ("%22s" + "%11s" * 10) % ( "Class", "Images", "Instances", "Box(P", "R", "mAP50", "mAP50-95)", "Mask(P", "R", "mAP50", "mAP50-95)", ) def postprocess(self, preds): """Post-processes YOLO predictions and returns output detections with proto.""" p = ops.non_max_suppression( preds[0], self.args.conf, self.args.iou, labels=self.lb, multi_label=True, agnostic=self.args.single_cls, max_det=self.args.max_det, nc=self.nc, ) proto = preds[1][-1] if len(preds[1]) == 3 else preds[1] # second output is len 3 if pt, but only 1 if exported return p, proto def _prepare_batch(self, si, batch): """Prepares a batch for training or inference by processing images and targets.""" prepared_batch = super()._prepare_batch(si, batch) midx = [si] if self.args.overlap_mask else batch["batch_idx"] == si prepared_batch["masks"] = batch["masks"][midx] return prepared_batch def _prepare_pred(self, pred, pbatch, proto): """Prepares a batch for training or inference by processing images and targets.""" predn = super()._prepare_pred(pred, pbatch) pred_masks = self.process(proto, pred[:, 6:], pred[:, :4], shape=pbatch["imgsz"]) return predn, pred_masks def update_metrics(self, preds, batch): """Metrics.""" for si, (pred, proto) in enumerate(zip(preds[0], preds[1])): self.seen += 1 npr = len(pred) stat = dict( conf=torch.zeros(0, device=self.device), pred_cls=torch.zeros(0, device=self.device), tp=torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device), tp_m=torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device), ) pbatch = self._prepare_batch(si, batch) cls, bbox = pbatch.pop("cls"), pbatch.pop("bbox") nl = len(cls) stat["target_cls"] = cls if npr == 0: if nl: for k in self.stats.keys(): self.stats[k].append(stat[k]) if self.args.plots: self.confusion_matrix.process_batch(detections=None, gt_bboxes=bbox, gt_cls=cls) continue # Masks gt_masks = pbatch.pop("masks") # Predictions if self.args.single_cls: pred[:, 5] = 0 predn, pred_masks = self._prepare_pred(pred, pbatch, proto) stat["conf"] = predn[:, 4] stat["pred_cls"] = predn[:, 5] # Evaluate if nl: stat["tp"] = self._process_batch(predn, bbox, cls) stat["tp_m"] = self._process_batch( predn, bbox, cls, pred_masks, gt_masks, self.args.overlap_mask, masks=True ) if self.args.plots: self.confusion_matrix.process_batch(predn, bbox, cls) for k in self.stats.keys(): self.stats[k].append(stat[k]) pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) if self.args.plots and self.batch_i < 3: self.plot_masks.append(pred_masks[:15].cpu()) # filter top 15 to plot # Save if self.args.save_json: pred_masks = ops.scale_image( pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), pbatch["ori_shape"], ratio_pad=batch["ratio_pad"][si], ) self.pred_to_json(predn, batch["im_file"][si], pred_masks) # if self.args.save_txt: # save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') def finalize_metrics(self, *args, **kwargs): """Sets speed and confusion matrix for evaluation metrics.""" self.metrics.speed = self.speed self.metrics.confusion_matrix = self.confusion_matrix def _process_batch(self, detections, gt_bboxes, gt_cls, pred_masks=None, gt_masks=None, overlap=False, masks=False): """ Return correct prediction matrix. Args: detections (array[N, 6]), x1, y1, x2, y2, conf, class labels (array[M, 5]), class, x1, y1, x2, y2 Returns: correct (array[N, 10]), for 10 IoU levels """ if masks: if overlap: nl = len(gt_cls) index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes iou = box_iou(gt_bboxes, detections[:, :4]) return self.match_predictions(detections[:, 5], gt_cls, iou) def plot_val_samples(self, batch, ni): """Plots validation samples with bounding box labels.""" plot_images( batch["img"], batch["batch_idx"], batch["cls"].squeeze(-1), batch["bboxes"], masks=batch["masks"], paths=batch["im_file"], fname=self.save_dir / f"val_batch{ni}_labels.jpg", names=self.names, on_plot=self.on_plot, ) def plot_predictions(self, batch, preds, ni): """Plots batch predictions with masks and bounding boxes.""" plot_images( batch["img"], *output_to_target(preds[0], max_det=15), # not set to self.args.max_det due to slow plotting speed torch.cat(self.plot_masks, dim=0) if len(self.plot_masks) else self.plot_masks, paths=batch["im_file"], fname=self.save_dir / f"val_batch{ni}_pred.jpg", names=self.names, on_plot=self.on_plot, ) # pred self.plot_masks.clear() def pred_to_json(self, predn, filename, pred_masks): """ Save one JSON result. Examples: >>> result = {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} """ from pycocotools.mask import encode # noqa def single_encode(x): """Encode predicted masks as RLE and append results to jdict.""" rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] rle["counts"] = rle["counts"].decode("utf-8") return rle stem = Path(filename).stem image_id = int(stem) if stem.isnumeric() else stem box = ops.xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner pred_masks = np.transpose(pred_masks, (2, 0, 1)) with ThreadPool(NUM_THREADS) as pool: rles = pool.map(single_encode, pred_masks) for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): self.jdict.append( { "image_id": image_id, "category_id": self.class_map[int(p[5])], "bbox": [round(x, 3) for x in b], "score": round(p[4], 5), "segmentation": rles[i], } ) def eval_json(self, stats): """Return COCO-style object detection evaluation metrics.""" if self.args.save_json and self.is_coco and len(self.jdict): anno_json = self.data["path"] / "annotations/instances_val2017.json" # annotations pred_json = self.save_dir / "predictions.json" # predictions LOGGER.info(f"\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...") try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb check_requirements("pycocotools>=2.0.6") from pycocotools.coco import COCO # noqa from pycocotools.cocoeval import COCOeval # noqa for x in anno_json, pred_json: assert x.is_file(), f"{x} file not found" anno = COCO(str(anno_json)) # init annotations api pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path) for i, eval in enumerate([COCOeval(anno, pred, "bbox"), COCOeval(anno, pred, "segm")]): if self.is_coco: eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # im to eval eval.evaluate() eval.accumulate() eval.summarize() idx = i * 4 + 2 stats[self.metrics.keys[idx + 1]], stats[self.metrics.keys[idx]] = eval.stats[ :2 ] # update mAP50-95 and mAP50 except Exception as e: LOGGER.warning(f"pycocotools unable to run: {e}") return stats
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/models/yolo/segment/val.py
Python
unknown
11,745
# Ultralytics YOLO 🚀, AGPL-3.0 license from .tasks import ( BaseModel, ClassificationModel, DetectionModel, SegmentationModel, attempt_load_one_weight, attempt_load_weights, guess_model_scale, guess_model_task, parse_model, torch_safe_load, yaml_model_load, ) __all__ = ( "attempt_load_one_weight", "attempt_load_weights", "parse_model", "yaml_model_load", "guess_model_task", "guess_model_scale", "torch_safe_load", "DetectionModel", "SegmentationModel", "ClassificationModel", "BaseModel", )
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/nn/__init__.py
Python
unknown
587
# Ultralytics YOLO 🚀, AGPL-3.0 license import ast import contextlib import json import platform import zipfile from collections import OrderedDict, namedtuple from pathlib import Path import cv2 import numpy as np import torch import torch.nn as nn from PIL import Image from ultralytics.utils import ARM64, LINUX, LOGGER, ROOT, yaml_load from ultralytics.utils.checks import check_requirements, check_suffix, check_version, check_yaml from ultralytics.utils.downloads import attempt_download_asset, is_url def check_class_names(names): """ Check class names. Map imagenet class codes to human-readable names if required. Convert lists to dicts. """ if isinstance(names, list): # names is a list names = dict(enumerate(names)) # convert to dict if isinstance(names, dict): # Convert 1) string keys to int, i.e. '0' to 0, and non-string values to strings, i.e. True to 'True' names = {int(k): str(v) for k, v in names.items()} n = len(names) if max(names.keys()) >= n: raise KeyError( f"{n}-class dataset requires class indices 0-{n - 1}, but you have invalid class indices " f"{min(names.keys())}-{max(names.keys())} defined in your dataset YAML." ) if isinstance(names[0], str) and names[0].startswith("n0"): # imagenet class codes, i.e. 'n01440764' names_map = yaml_load(ROOT / "cfg/datasets/ImageNet.yaml")["map"] # human-readable names names = {k: names_map[v] for k, v in names.items()} return names def default_class_names(data=None): """Applies default class names to an input YAML file or returns numerical class names.""" if data: with contextlib.suppress(Exception): return yaml_load(check_yaml(data))["names"] return {i: f"class{i}" for i in range(999)} # return default if above errors class AutoBackend(nn.Module): """ Handles dynamic backend selection for running inference using Ultralytics YOLO models. The AutoBackend class is designed to provide an abstraction layer for various inference engines. It supports a wide range of formats, each with specific naming conventions as outlined below: Supported Formats and Naming Conventions: | Format | File Suffix | |-----------------------|------------------| | PyTorch | *.pt | | TorchScript | *.torchscript | | ONNX Runtime | *.onnx | | ONNX OpenCV DNN | *.onnx (dnn=True)| | OpenVINO | *openvino_model/ | | CoreML | *.mlpackage | | TensorRT | *.engine | | TensorFlow SavedModel | *_saved_model | | TensorFlow GraphDef | *.pb | | TensorFlow Lite | *.tflite | | TensorFlow Edge TPU | *_edgetpu.tflite | | PaddlePaddle | *_paddle_model | | ncnn | *_ncnn_model | This class offers dynamic backend switching capabilities based on the input model format, making it easier to deploy models across various platforms. """ @torch.no_grad() def __init__( self, weights="yolov8n.pt", device=torch.device("cpu"), dnn=False, data=None, fp16=False, fuse=True, verbose=True, ): """ Initialize the AutoBackend for inference. Args: weights (str): Path to the model weights file. Defaults to 'yolov8n.pt'. device (torch.device): Device to run the model on. Defaults to CPU. dnn (bool): Use OpenCV DNN module for ONNX inference. Defaults to False. data (str | Path | optional): Path to the additional data.yaml file containing class names. Optional. fp16 (bool): Enable half-precision inference. Supported only on specific backends. Defaults to False. fuse (bool): Fuse Conv2D + BatchNorm layers for optimization. Defaults to True. verbose (bool): Enable verbose logging. Defaults to True. """ super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) nn_module = isinstance(weights, torch.nn.Module) ( pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, ncnn, triton, ) = self._model_type(w) fp16 &= pt or jit or onnx or xml or engine or nn_module or triton # FP16 nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH) stride = 32 # default stride model, metadata = None, None # Set device cuda = torch.cuda.is_available() and device.type != "cpu" # use CUDA if cuda and not any([nn_module, pt, jit, engine, onnx]): # GPU dataloader formats device = torch.device("cpu") cuda = False # Download if not local if not (pt or triton or nn_module): w = attempt_download_asset(w) # Load model if nn_module: # in-memory PyTorch model model = weights.to(device) model = model.fuse(verbose=verbose) if fuse else model if hasattr(model, "kpt_shape"): kpt_shape = model.kpt_shape # pose-only stride = max(int(model.stride.max()), 32) # model stride names = model.module.names if hasattr(model, "module") else model.names # get class names model.half() if fp16 else model.float() self.model = model # explicitly assign for to(), cpu(), cuda(), half() pt = True elif pt: # PyTorch from ultralytics.nn.tasks import attempt_load_weights model = attempt_load_weights( weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse ) if hasattr(model, "kpt_shape"): kpt_shape = model.kpt_shape # pose-only stride = max(int(model.stride.max()), 32) # model stride names = model.module.names if hasattr(model, "module") else model.names # get class names model.half() if fp16 else model.float() self.model = model # explicitly assign for to(), cpu(), cuda(), half() elif jit: # TorchScript LOGGER.info(f"Loading {w} for TorchScript inference...") extra_files = {"config.txt": ""} # model metadata model = torch.jit.load(w, _extra_files=extra_files, map_location=device) model.half() if fp16 else model.float() if extra_files["config.txt"]: # load metadata dict metadata = json.loads(extra_files["config.txt"], object_hook=lambda x: dict(x.items())) elif dnn: # ONNX OpenCV DNN LOGGER.info(f"Loading {w} for ONNX OpenCV DNN inference...") check_requirements("opencv-python>=4.5.4") net = cv2.dnn.readNetFromONNX(w) elif onnx: # ONNX Runtime LOGGER.info(f"Loading {w} for ONNX Runtime inference...") check_requirements(("onnx", "onnxruntime-gpu" if cuda else "onnxruntime")) import onnxruntime providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] if cuda else ["CPUExecutionProvider"] session = onnxruntime.InferenceSession(w, providers=providers) output_names = [x.name for x in session.get_outputs()] metadata = session.get_modelmeta().custom_metadata_map # metadata elif xml: # OpenVINO LOGGER.info(f"Loading {w} for OpenVINO inference...") check_requirements("openvino>=2023.0") # requires openvino-dev: https://pypi.org/project/openvino-dev/ from openvino.runtime import Core, Layout, get_batch # noqa core = Core() w = Path(w) if not w.is_file(): # if not *.xml w = next(w.glob("*.xml")) # get *.xml file from *_openvino_model dir ov_model = core.read_model(model=str(w), weights=w.with_suffix(".bin")) if ov_model.get_parameters()[0].get_layout().empty: ov_model.get_parameters()[0].set_layout(Layout("NCHW")) batch_dim = get_batch(ov_model) if batch_dim.is_static: batch_size = batch_dim.get_length() ov_compiled_model = core.compile_model(ov_model, device_name="AUTO") # AUTO selects best available device metadata = w.parent / "metadata.yaml" elif engine: # TensorRT LOGGER.info(f"Loading {w} for TensorRT inference...") try: import tensorrt as trt # noqa https://developer.nvidia.com/nvidia-tensorrt-download except ImportError: if LINUX: check_requirements("nvidia-tensorrt", cmds="-U --index-url https://pypi.ngc.nvidia.com") import tensorrt as trt # noqa check_version(trt.__version__, "7.0.0", hard=True) # require tensorrt>=7.0.0 if device.type == "cpu": device = torch.device("cuda:0") Binding = namedtuple("Binding", ("name", "dtype", "shape", "data", "ptr")) logger = trt.Logger(trt.Logger.INFO) # Read file with open(w, "rb") as f, trt.Runtime(logger) as runtime: meta_len = int.from_bytes(f.read(4), byteorder="little") # read metadata length metadata = json.loads(f.read(meta_len).decode("utf-8")) # read metadata model = runtime.deserialize_cuda_engine(f.read()) # read engine context = model.create_execution_context() bindings = OrderedDict() output_names = [] fp16 = False # default updated below dynamic = False for i in range(model.num_bindings): name = model.get_binding_name(i) dtype = trt.nptype(model.get_binding_dtype(i)) if model.binding_is_input(i): if -1 in tuple(model.get_binding_shape(i)): # dynamic dynamic = True context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2])) if dtype == np.float16: fp16 = True else: # output output_names.append(name) shape = tuple(context.get_binding_shape(i)) im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) batch_size = bindings["images"].shape[0] # if dynamic, this is instead max batch size elif coreml: # CoreML LOGGER.info(f"Loading {w} for CoreML inference...") import coremltools as ct model = ct.models.MLModel(w) metadata = dict(model.user_defined_metadata) elif saved_model: # TF SavedModel LOGGER.info(f"Loading {w} for TensorFlow SavedModel inference...") import tensorflow as tf keras = False # assume TF1 saved_model model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w) metadata = Path(w) / "metadata.yaml" elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt LOGGER.info(f"Loading {w} for TensorFlow GraphDef inference...") import tensorflow as tf from ultralytics.engine.exporter import gd_outputs def wrap_frozen_graph(gd, inputs, outputs): """Wrap frozen graphs for deployment.""" x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped ge = x.graph.as_graph_element return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) gd = tf.Graph().as_graph_def() # TF GraphDef with open(w, "rb") as f: gd.ParseFromString(f.read()) frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs=gd_outputs(gd)) elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu from tflite_runtime.interpreter import Interpreter, load_delegate except ImportError: import tensorflow as tf Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime LOGGER.info(f"Loading {w} for TensorFlow Lite Edge TPU inference...") delegate = {"Linux": "libedgetpu.so.1", "Darwin": "libedgetpu.1.dylib", "Windows": "edgetpu.dll"}[ platform.system() ] interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) else: # TFLite LOGGER.info(f"Loading {w} for TensorFlow Lite inference...") interpreter = Interpreter(model_path=w) # load TFLite model interpreter.allocate_tensors() # allocate input_details = interpreter.get_input_details() # inputs output_details = interpreter.get_output_details() # outputs # Load metadata with contextlib.suppress(zipfile.BadZipFile): with zipfile.ZipFile(w, "r") as model: meta_file = model.namelist()[0] metadata = ast.literal_eval(model.read(meta_file).decode("utf-8")) elif tfjs: # TF.js raise NotImplementedError("YOLOv8 TF.js inference is not currently supported.") elif paddle: # PaddlePaddle LOGGER.info(f"Loading {w} for PaddlePaddle inference...") check_requirements("paddlepaddle-gpu" if cuda else "paddlepaddle") import paddle.inference as pdi # noqa w = Path(w) if not w.is_file(): # if not *.pdmodel w = next(w.rglob("*.pdmodel")) # get *.pdmodel file from *_paddle_model dir config = pdi.Config(str(w), str(w.with_suffix(".pdiparams"))) if cuda: config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0) predictor = pdi.create_predictor(config) input_handle = predictor.get_input_handle(predictor.get_input_names()[0]) output_names = predictor.get_output_names() metadata = w.parents[1] / "metadata.yaml" elif ncnn: # ncnn LOGGER.info(f"Loading {w} for ncnn inference...") check_requirements("git+https://github.com/Tencent/ncnn.git" if ARM64 else "ncnn") # requires ncnn import ncnn as pyncnn net = pyncnn.Net() net.opt.use_vulkan_compute = cuda w = Path(w) if not w.is_file(): # if not *.param w = next(w.glob("*.param")) # get *.param file from *_ncnn_model dir net.load_param(str(w)) net.load_model(str(w.with_suffix(".bin"))) metadata = w.parent / "metadata.yaml" elif triton: # NVIDIA Triton Inference Server check_requirements("tritonclient[all]") from ultralytics.utils.triton import TritonRemoteModel model = TritonRemoteModel(w) else: from ultralytics.engine.exporter import export_formats raise TypeError( f"model='{w}' is not a supported model format. " "See https://docs.ultralytics.com/modes/predict for help." f"\n\n{export_formats()}" ) # Load external metadata YAML if isinstance(metadata, (str, Path)) and Path(metadata).exists(): metadata = yaml_load(metadata) if metadata: for k, v in metadata.items(): if k in ("stride", "batch"): metadata[k] = int(v) elif k in ("imgsz", "names", "kpt_shape") and isinstance(v, str): metadata[k] = eval(v) stride = metadata["stride"] task = metadata["task"] batch = metadata["batch"] imgsz = metadata["imgsz"] names = metadata["names"] kpt_shape = metadata.get("kpt_shape") elif not (pt or triton or nn_module): LOGGER.warning(f"WARNING ⚠️ Metadata not found for 'model={weights}'") # Check names if "names" not in locals(): # names missing names = default_class_names(data) names = check_class_names(names) # Disable gradients if pt: for p in model.parameters(): p.requires_grad = False self.__dict__.update(locals()) # assign all variables to self def forward(self, im, augment=False, visualize=False, embed=None): """ Runs inference on the YOLOv8 MultiBackend model. Args: im (torch.Tensor): The image tensor to perform inference on. augment (bool): whether to perform data augmentation during inference, defaults to False visualize (bool): whether to visualize the output predictions, defaults to False embed (list, optional): A list of feature vectors/embeddings to return. Returns: (tuple): Tuple containing the raw output tensor, and processed output for visualization (if visualize=True) """ b, ch, h, w = im.shape # batch, channel, height, width if self.fp16 and im.dtype != torch.float16: im = im.half() # to FP16 if self.nhwc: im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3) if self.pt or self.nn_module: # PyTorch y = self.model(im, augment=augment, visualize=visualize, embed=embed) elif self.jit: # TorchScript y = self.model(im) elif self.dnn: # ONNX OpenCV DNN im = im.cpu().numpy() # torch to numpy self.net.setInput(im) y = self.net.forward() elif self.onnx: # ONNX Runtime im = im.cpu().numpy() # torch to numpy y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im}) elif self.xml: # OpenVINO im = im.cpu().numpy() # FP32 y = list(self.ov_compiled_model(im).values()) elif self.engine: # TensorRT if self.dynamic and im.shape != self.bindings["images"].shape: i = self.model.get_binding_index("images") self.context.set_binding_shape(i, im.shape) # reshape if dynamic self.bindings["images"] = self.bindings["images"]._replace(shape=im.shape) for name in self.output_names: i = self.model.get_binding_index(name) self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i))) s = self.bindings["images"].shape assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}" self.binding_addrs["images"] = int(im.data_ptr()) self.context.execute_v2(list(self.binding_addrs.values())) y = [self.bindings[x].data for x in sorted(self.output_names)] elif self.coreml: # CoreML im = im[0].cpu().numpy() im_pil = Image.fromarray((im * 255).astype("uint8")) # im = im.resize((192, 320), Image.BILINEAR) y = self.model.predict({"image": im_pil}) # coordinates are xywh normalized if "confidence" in y: raise TypeError( "Ultralytics only supports inference of non-pipelined CoreML models exported with " f"'nms=False', but 'model={w}' has an NMS pipeline created by an 'nms=True' export." ) # TODO: CoreML NMS inference handling # from ultralytics.utils.ops import xywh2xyxy # box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels # conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float32) # y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) elif len(y) == 1: # classification model y = list(y.values()) elif len(y) == 2: # segmentation model y = list(reversed(y.values())) # reversed for segmentation models (pred, proto) elif self.paddle: # PaddlePaddle im = im.cpu().numpy().astype(np.float32) self.input_handle.copy_from_cpu(im) self.predictor.run() y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names] elif self.ncnn: # ncnn mat_in = self.pyncnn.Mat(im[0].cpu().numpy()) ex = self.net.create_extractor() input_names, output_names = self.net.input_names(), self.net.output_names() ex.input(input_names[0], mat_in) y = [] for output_name in output_names: mat_out = self.pyncnn.Mat() ex.extract(output_name, mat_out) y.append(np.array(mat_out)[None]) elif self.triton: # NVIDIA Triton Inference Server im = im.cpu().numpy() # torch to numpy y = self.model(im) else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) im = im.cpu().numpy() if self.saved_model: # SavedModel y = self.model(im, training=False) if self.keras else self.model(im) if not isinstance(y, list): y = [y] elif self.pb: # GraphDef y = self.frozen_func(x=self.tf.constant(im)) if len(y) == 2 and len(self.names) == 999: # segments and names not defined ip, ib = (0, 1) if len(y[0].shape) == 4 else (1, 0) # index of protos, boxes nc = y[ib].shape[1] - y[ip].shape[3] - 4 # y = (1, 160, 160, 32), (1, 116, 8400) self.names = {i: f"class{i}" for i in range(nc)} else: # Lite or Edge TPU details = self.input_details[0] integer = details["dtype"] in (np.int8, np.int16) # is TFLite quantized int8 or int16 model if integer: scale, zero_point = details["quantization"] im = (im / scale + zero_point).astype(details["dtype"]) # de-scale self.interpreter.set_tensor(details["index"], im) self.interpreter.invoke() y = [] for output in self.output_details: x = self.interpreter.get_tensor(output["index"]) if integer: scale, zero_point = output["quantization"] x = (x.astype(np.float32) - zero_point) * scale # re-scale if x.ndim > 2: # if task is not classification # Denormalize xywh by image size. See https://github.com/ultralytics/ultralytics/pull/1695 # xywh are normalized in TFLite/EdgeTPU to mitigate quantization error of integer models x[:, [0, 2]] *= w x[:, [1, 3]] *= h y.append(x) # TF segment fixes: export is reversed vs ONNX export and protos are transposed if len(y) == 2: # segment with (det, proto) output order reversed if len(y[1].shape) != 4: y = list(reversed(y)) # should be y = (1, 116, 8400), (1, 160, 160, 32) y[1] = np.transpose(y[1], (0, 3, 1, 2)) # should be y = (1, 116, 8400), (1, 32, 160, 160) y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y] # for x in y: # print(type(x), len(x)) if isinstance(x, (list, tuple)) else print(type(x), x.shape) # debug shapes if isinstance(y, (list, tuple)): return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y] else: return self.from_numpy(y) def from_numpy(self, x): """ Convert a numpy array to a tensor. Args: x (np.ndarray): The array to be converted. Returns: (torch.Tensor): The converted tensor """ return torch.tensor(x).to(self.device) if isinstance(x, np.ndarray) else x def warmup(self, imgsz=(1, 3, 640, 640)): """ Warm up the model by running one forward pass with a dummy input. Args: imgsz (tuple): The shape of the dummy input tensor in the format (batch_size, channels, height, width) """ warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton, self.nn_module if any(warmup_types) and (self.device.type != "cpu" or self.triton): im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input for _ in range(2 if self.jit else 1): self.forward(im) # warmup @staticmethod def _model_type(p="path/to/model.pt"): """ This function takes a path to a model file and returns the model type. Possibles types are pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, ncnn or paddle. Args: p: path to the model file. Defaults to path/to/model.pt Examples: >>> model = AutoBackend(weights="path/to/model.onnx") >>> model_type = model._model_type() # returns "onnx" """ from ultralytics.engine.exporter import export_formats sf = list(export_formats().Suffix) # export suffixes if not is_url(p, check=False) and not isinstance(p, str): check_suffix(p, sf) # checks name = Path(p).name types = [s in name for s in sf] types[5] |= name.endswith(".mlmodel") # retain support for older Apple CoreML *.mlmodel formats types[8] &= not types[9] # tflite &= not edgetpu if any(types): triton = False else: from urllib.parse import urlsplit url = urlsplit(p) triton = url.netloc and url.path and url.scheme in {"http", "grpc"} return types + [triton]
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/nn/autobackend.py
Python
unknown
27,069
# Ultralytics YOLO 🚀, AGPL-3.0 license """ Ultralytics modules. Example: Visualize a module with Netron. ```python from ultralytics.nn.modules import * import torch import os x = torch.ones(1, 128, 40, 40) m = Conv(128, 128) f = f'{m._get_name()}.onnx' torch.onnx.export(m, x, f) os.system(f'onnxsim {f} {f} && open {f}') ``` """ from .block import ( C1, C2, C3, C3TR, DFL, SPP, SPPF, Bottleneck, BottleneckCSP, C2f, C3Ghost, C3x, GhostBottleneck, HGBlock, HGStem, Proto, RepC3, ResNetLayer, ) from .conv import ( CBAM, ChannelAttention, Concat, Conv, Conv2, ConvTranspose, DWConv, DWConvTranspose2d, Focus, GhostConv, LightConv, RepConv, SpatialAttention, ) from .head import OBB, Classify, Detect, Pose, RTDETRDecoder, Segment from .transformer import ( AIFI, MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer, LayerNorm2d, MLPBlock, MSDeformAttn, TransformerBlock, TransformerEncoderLayer, TransformerLayer, ) __all__ = ( "Conv", "Conv2", "LightConv", "RepConv", "DWConv", "DWConvTranspose2d", "ConvTranspose", "Focus", "GhostConv", "ChannelAttention", "SpatialAttention", "CBAM", "Concat", "TransformerLayer", "TransformerBlock", "MLPBlock", "LayerNorm2d", "DFL", "HGBlock", "HGStem", "SPP", "SPPF", "C1", "C2", "C3", "C2f", "C3x", "C3TR", "C3Ghost", "GhostBottleneck", "Bottleneck", "BottleneckCSP", "Proto", "Detect", "Segment", "Pose", "Classify", "TransformerEncoderLayer", "RepC3", "RTDETRDecoder", "AIFI", "DeformableTransformerDecoder", "DeformableTransformerDecoderLayer", "MSDeformAttn", "MLP", "ResNetLayer", "OBB", )
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/nn/modules/__init__.py
Python
unknown
1,954
# Ultralytics YOLO 🚀, AGPL-3.0 license """Block modules.""" import torch import torch.nn as nn import torch.nn.functional as F from .conv import Conv, DWConv, GhostConv, LightConv, RepConv from .transformer import TransformerBlock __all__ = ( "DFL", "HGBlock", "HGStem", "SPP", "SPPF", "C1", "C2", "C3", "C2f", "C3x", "C3TR", "C3Ghost", "GhostBottleneck", "Bottleneck", "BottleneckCSP", "Proto", "RepC3", "ResNetLayer", ) class DFL(nn.Module): """ Integral module of Distribution Focal Loss (DFL). Proposed in Generalized Focal Loss https://ieeexplore.ieee.org/document/9792391 """ def __init__(self, c1=16): """Initialize a convolutional layer with a given number of input channels.""" super().__init__() self.conv = nn.Conv2d(c1, 1, 1, bias=False).requires_grad_(False) x = torch.arange(c1, dtype=torch.float) self.conv.weight.data[:] = nn.Parameter(x.view(1, c1, 1, 1)) self.c1 = c1 def forward(self, x): """Applies a transformer layer on input tensor 'x' and returns a tensor.""" b, c, a = x.shape # batch, channels, anchors return self.conv(x.view(b, 4, self.c1, a).transpose(2, 1).softmax(1)).view(b, 4, a) # return self.conv(x.view(b, self.c1, 4, a).softmax(1)).view(b, 4, a) class Proto(nn.Module): """YOLOv8 mask Proto module for segmentation models.""" def __init__(self, c1, c_=256, c2=32): """ Initializes the YOLOv8 mask Proto module with specified number of protos and masks. Input arguments are ch_in, number of protos, number of masks. """ super().__init__() self.cv1 = Conv(c1, c_, k=3) self.upsample = nn.ConvTranspose2d(c_, c_, 2, 2, 0, bias=True) # nn.Upsample(scale_factor=2, mode='nearest') self.cv2 = Conv(c_, c_, k=3) self.cv3 = Conv(c_, c2) def forward(self, x): """Performs a forward pass through layers using an upsampled input image.""" return self.cv3(self.cv2(self.upsample(self.cv1(x)))) class HGStem(nn.Module): """ StemBlock of PPHGNetV2 with 5 convolutions and one maxpool2d. https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/backbones/hgnet_v2.py """ def __init__(self, c1, cm, c2): """Initialize the SPP layer with input/output channels and specified kernel sizes for max pooling.""" super().__init__() self.stem1 = Conv(c1, cm, 3, 2, act=nn.ReLU()) self.stem2a = Conv(cm, cm // 2, 2, 1, 0, act=nn.ReLU()) self.stem2b = Conv(cm // 2, cm, 2, 1, 0, act=nn.ReLU()) self.stem3 = Conv(cm * 2, cm, 3, 2, act=nn.ReLU()) self.stem4 = Conv(cm, c2, 1, 1, act=nn.ReLU()) self.pool = nn.MaxPool2d(kernel_size=2, stride=1, padding=0, ceil_mode=True) def forward(self, x): """Forward pass of a PPHGNetV2 backbone layer.""" x = self.stem1(x) x = F.pad(x, [0, 1, 0, 1]) x2 = self.stem2a(x) x2 = F.pad(x2, [0, 1, 0, 1]) x2 = self.stem2b(x2) x1 = self.pool(x) x = torch.cat([x1, x2], dim=1) x = self.stem3(x) x = self.stem4(x) return x class HGBlock(nn.Module): """ HG_Block of PPHGNetV2 with 2 convolutions and LightConv. https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/backbones/hgnet_v2.py """ def __init__(self, c1, cm, c2, k=3, n=6, lightconv=False, shortcut=False, act=nn.ReLU()): """Initializes a CSP Bottleneck with 1 convolution using specified input and output channels.""" super().__init__() block = LightConv if lightconv else Conv self.m = nn.ModuleList(block(c1 if i == 0 else cm, cm, k=k, act=act) for i in range(n)) self.sc = Conv(c1 + n * cm, c2 // 2, 1, 1, act=act) # squeeze conv self.ec = Conv(c2 // 2, c2, 1, 1, act=act) # excitation conv self.add = shortcut and c1 == c2 def forward(self, x): """Forward pass of a PPHGNetV2 backbone layer.""" y = [x] y.extend(m(y[-1]) for m in self.m) y = self.ec(self.sc(torch.cat(y, 1))) return y + x if self.add else y class SPP(nn.Module): """Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729.""" def __init__(self, c1, c2, k=(5, 9, 13)): """Initialize the SPP layer with input/output channels and pooling kernel sizes.""" super().__init__() c_ = c1 // 2 # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) def forward(self, x): """Forward pass of the SPP layer, performing spatial pyramid pooling.""" x = self.cv1(x) return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) class SPPF(nn.Module): """Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher.""" def __init__(self, c1, c2, k=5): """ Initializes the SPPF layer with given input/output channels and kernel size. This module is equivalent to SPP(k=(5, 9, 13)). """ super().__init__() c_ = c1 // 2 # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c_ * 4, c2, 1, 1) self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) def forward(self, x): """Forward pass through Ghost Convolution block.""" x = self.cv1(x) y1 = self.m(x) y2 = self.m(y1) return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1)) class C1(nn.Module): """CSP Bottleneck with 1 convolution.""" def __init__(self, c1, c2, n=1): """Initializes the CSP Bottleneck with configurations for 1 convolution with arguments ch_in, ch_out, number.""" super().__init__() self.cv1 = Conv(c1, c2, 1, 1) self.m = nn.Sequential(*(Conv(c2, c2, 3) for _ in range(n))) def forward(self, x): """Applies cross-convolutions to input in the C3 module.""" y = self.cv1(x) return self.m(y) + y class C2(nn.Module): """CSP Bottleneck with 2 convolutions.""" def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initializes the CSP Bottleneck with 2 convolutions module with arguments ch_in, ch_out, number, shortcut, groups, expansion. """ super().__init__() self.c = int(c2 * e) # hidden channels self.cv1 = Conv(c1, 2 * self.c, 1, 1) self.cv2 = Conv(2 * self.c, c2, 1) # optional act=FReLU(c2) # self.attention = ChannelAttention(2 * self.c) # or SpatialAttention() self.m = nn.Sequential(*(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n))) def forward(self, x): """Forward pass through the CSP bottleneck with 2 convolutions.""" a, b = self.cv1(x).chunk(2, 1) return self.cv2(torch.cat((self.m(a), b), 1)) class C2f(nn.Module): """Faster Implementation of CSP Bottleneck with 2 convolutions.""" def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): """Initialize CSP bottleneck layer with two convolutions with arguments ch_in, ch_out, number, shortcut, groups, expansion. """ super().__init__() self.c = int(c2 * e) # hidden channels self.cv1 = Conv(c1, 2 * self.c, 1, 1) self.cv2 = Conv((2 + n) * self.c, c2, 1) # optional act=FReLU(c2) self.m = nn.ModuleList(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n)) def forward(self, x): """Forward pass through C2f layer.""" y = list(self.cv1(x).chunk(2, 1)) y.extend(m(y[-1]) for m in self.m) return self.cv2(torch.cat(y, 1)) def forward_split(self, x): """Forward pass using split() instead of chunk().""" y = list(self.cv1(x).split((self.c, self.c), 1)) y.extend(m(y[-1]) for m in self.m) return self.cv2(torch.cat(y, 1)) class C3(nn.Module): """CSP Bottleneck with 3 convolutions.""" def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initialize the CSP Bottleneck with given channels, number, shortcut, groups, and expansion values.""" super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c1, c_, 1, 1) self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2) self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, k=((1, 1), (3, 3)), e=1.0) for _ in range(n))) def forward(self, x): """Forward pass through the CSP bottleneck with 2 convolutions.""" return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1)) class C3x(C3): """C3 module with cross-convolutions.""" def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initialize C3TR instance and set default parameters.""" super().__init__(c1, c2, n, shortcut, g, e) self.c_ = int(c2 * e) self.m = nn.Sequential(*(Bottleneck(self.c_, self.c_, shortcut, g, k=((1, 3), (3, 1)), e=1) for _ in range(n))) class RepC3(nn.Module): """Rep C3.""" def __init__(self, c1, c2, n=3, e=1.0): """Initialize CSP Bottleneck with a single convolution using input channels, output channels, and number.""" super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c2, 1, 1) self.cv2 = Conv(c1, c2, 1, 1) self.m = nn.Sequential(*[RepConv(c_, c_) for _ in range(n)]) self.cv3 = Conv(c_, c2, 1, 1) if c_ != c2 else nn.Identity() def forward(self, x): """Forward pass of RT-DETR neck layer.""" return self.cv3(self.m(self.cv1(x)) + self.cv2(x)) class C3TR(C3): """C3 module with TransformerBlock().""" def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initialize C3Ghost module with GhostBottleneck().""" super().__init__(c1, c2, n, shortcut, g, e) c_ = int(c2 * e) self.m = TransformerBlock(c_, c_, 4, n) class C3Ghost(C3): """C3 module with GhostBottleneck().""" def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initialize 'SPP' module with various pooling sizes for spatial pyramid pooling.""" super().__init__(c1, c2, n, shortcut, g, e) c_ = int(c2 * e) # hidden channels self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n))) class GhostBottleneck(nn.Module): """Ghost Bottleneck https://github.com/huawei-noah/ghostnet.""" def __init__(self, c1, c2, k=3, s=1): """Initializes GhostBottleneck module with arguments ch_in, ch_out, kernel, stride.""" super().__init__() c_ = c2 // 2 self.conv = nn.Sequential( GhostConv(c1, c_, 1, 1), # pw DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw GhostConv(c_, c2, 1, 1, act=False), # pw-linear ) self.shortcut = ( nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() ) def forward(self, x): """Applies skip connection and concatenation to input tensor.""" return self.conv(x) + self.shortcut(x) class Bottleneck(nn.Module): """Standard bottleneck.""" def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5): """Initializes a bottleneck module with given input/output channels, shortcut option, group, kernels, and expansion. """ super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, k[0], 1) self.cv2 = Conv(c_, c2, k[1], 1, g=g) self.add = shortcut and c1 == c2 def forward(self, x): """'forward()' applies the YOLO FPN to input data.""" return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) class BottleneckCSP(nn.Module): """CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks.""" def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initializes the CSP Bottleneck given arguments for ch_in, ch_out, number, shortcut, groups, expansion.""" super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) self.cv4 = Conv(2 * c_, c2, 1, 1) self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) self.act = nn.SiLU() self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) def forward(self, x): """Applies a CSP bottleneck with 3 convolutions.""" y1 = self.cv3(self.m(self.cv1(x))) y2 = self.cv2(x) return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1)))) class ResNetBlock(nn.Module): """ResNet block with standard convolution layers.""" def __init__(self, c1, c2, s=1, e=4): """Initialize convolution with given parameters.""" super().__init__() c3 = e * c2 self.cv1 = Conv(c1, c2, k=1, s=1, act=True) self.cv2 = Conv(c2, c2, k=3, s=s, p=1, act=True) self.cv3 = Conv(c2, c3, k=1, act=False) self.shortcut = nn.Sequential(Conv(c1, c3, k=1, s=s, act=False)) if s != 1 or c1 != c3 else nn.Identity() def forward(self, x): """Forward pass through the ResNet block.""" return F.relu(self.cv3(self.cv2(self.cv1(x))) + self.shortcut(x)) class ResNetLayer(nn.Module): """ResNet layer with multiple ResNet blocks.""" def __init__(self, c1, c2, s=1, is_first=False, n=1, e=4): """Initializes the ResNetLayer given arguments.""" super().__init__() self.is_first = is_first if self.is_first: self.layer = nn.Sequential( Conv(c1, c2, k=7, s=2, p=3, act=True), nn.MaxPool2d(kernel_size=3, stride=2, padding=1) ) else: blocks = [ResNetBlock(c1, c2, s, e=e)] blocks.extend([ResNetBlock(e * c2, c2, 1, e=e) for _ in range(n - 1)]) self.layer = nn.Sequential(*blocks) def forward(self, x): """Forward pass through the ResNet layer.""" return self.layer(x)
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/nn/modules/block.py
Python
unknown
14,488
# Ultralytics YOLO 🚀, AGPL-3.0 license """Convolution modules.""" import math import numpy as np import torch import torch.nn as nn __all__ = ( "Conv", "Conv2", "LightConv", "DWConv", "DWConvTranspose2d", "ConvTranspose", "Focus", "GhostConv", "ChannelAttention", "SpatialAttention", "CBAM", "Concat", "RepConv", ) def autopad(k, p=None, d=1): # kernel, padding, dilation """Pad to 'same' shape outputs.""" if d > 1: k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size if p is None: p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad return p class Conv(nn.Module): """Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation).""" default_act = nn.SiLU() # default activation def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True): """Initialize Conv layer with given arguments including activation.""" super().__init__() self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False) self.bn = nn.BatchNorm2d(c2) self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity() def forward(self, x): """Apply convolution, batch normalization and activation to input tensor.""" return self.act(self.bn(self.conv(x))) def forward_fuse(self, x): """Perform transposed convolution of 2D data.""" return self.act(self.conv(x)) class Conv2(Conv): """Simplified RepConv module with Conv fusing.""" def __init__(self, c1, c2, k=3, s=1, p=None, g=1, d=1, act=True): """Initialize Conv layer with given arguments including activation.""" super().__init__(c1, c2, k, s, p, g=g, d=d, act=act) self.cv2 = nn.Conv2d(c1, c2, 1, s, autopad(1, p, d), groups=g, dilation=d, bias=False) # add 1x1 conv def forward(self, x): """Apply convolution, batch normalization and activation to input tensor.""" return self.act(self.bn(self.conv(x) + self.cv2(x))) def forward_fuse(self, x): """Apply fused convolution, batch normalization and activation to input tensor.""" return self.act(self.bn(self.conv(x))) def fuse_convs(self): """Fuse parallel convolutions.""" w = torch.zeros_like(self.conv.weight.data) i = [x // 2 for x in w.shape[2:]] w[:, :, i[0] : i[0] + 1, i[1] : i[1] + 1] = self.cv2.weight.data.clone() self.conv.weight.data += w self.__delattr__("cv2") self.forward = self.forward_fuse class LightConv(nn.Module): """ Light convolution with args(ch_in, ch_out, kernel). https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/backbones/hgnet_v2.py """ def __init__(self, c1, c2, k=1, act=nn.ReLU()): """Initialize Conv layer with given arguments including activation.""" super().__init__() self.conv1 = Conv(c1, c2, 1, act=False) self.conv2 = DWConv(c2, c2, k, act=act) def forward(self, x): """Apply 2 convolutions to input tensor.""" return self.conv2(self.conv1(x)) class DWConv(Conv): """Depth-wise convolution.""" def __init__(self, c1, c2, k=1, s=1, d=1, act=True): # ch_in, ch_out, kernel, stride, dilation, activation """Initialize Depth-wise convolution with given parameters.""" super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act) class DWConvTranspose2d(nn.ConvTranspose2d): """Depth-wise transpose convolution.""" def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out """Initialize DWConvTranspose2d class with given parameters.""" super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2)) class ConvTranspose(nn.Module): """Convolution transpose 2d layer.""" default_act = nn.SiLU() # default activation def __init__(self, c1, c2, k=2, s=2, p=0, bn=True, act=True): """Initialize ConvTranspose2d layer with batch normalization and activation function.""" super().__init__() self.conv_transpose = nn.ConvTranspose2d(c1, c2, k, s, p, bias=not bn) self.bn = nn.BatchNorm2d(c2) if bn else nn.Identity() self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity() def forward(self, x): """Applies transposed convolutions, batch normalization and activation to input.""" return self.act(self.bn(self.conv_transpose(x))) def forward_fuse(self, x): """Applies activation and convolution transpose operation to input.""" return self.act(self.conv_transpose(x)) class Focus(nn.Module): """Focus wh information into c-space.""" def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): """Initializes Focus object with user defined channel, convolution, padding, group and activation values.""" super().__init__() self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act) # self.contract = Contract(gain=2) def forward(self, x): """ Applies convolution to concatenated tensor and returns the output. Input shape is (b,c,w,h) and output shape is (b,4c,w/2,h/2). """ return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1)) # return self.conv(self.contract(x)) class GhostConv(nn.Module): """Ghost Convolution https://github.com/huawei-noah/ghostnet.""" def __init__(self, c1, c2, k=1, s=1, g=1, act=True): """Initializes the GhostConv object with input channels, output channels, kernel size, stride, groups and activation. """ super().__init__() c_ = c2 // 2 # hidden channels self.cv1 = Conv(c1, c_, k, s, None, g, act=act) self.cv2 = Conv(c_, c_, 5, 1, None, c_, act=act) def forward(self, x): """Forward propagation through a Ghost Bottleneck layer with skip connection.""" y = self.cv1(x) return torch.cat((y, self.cv2(y)), 1) class RepConv(nn.Module): """ RepConv is a basic rep-style block, including training and deploy status. This module is used in RT-DETR. Based on https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py """ default_act = nn.SiLU() # default activation def __init__(self, c1, c2, k=3, s=1, p=1, g=1, d=1, act=True, bn=False, deploy=False): """Initializes Light Convolution layer with inputs, outputs & optional activation function.""" super().__init__() assert k == 3 and p == 1 self.g = g self.c1 = c1 self.c2 = c2 self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity() self.bn = nn.BatchNorm2d(num_features=c1) if bn and c2 == c1 and s == 1 else None self.conv1 = Conv(c1, c2, k, s, p=p, g=g, act=False) self.conv2 = Conv(c1, c2, 1, s, p=(p - k // 2), g=g, act=False) def forward_fuse(self, x): """Forward process.""" return self.act(self.conv(x)) def forward(self, x): """Forward process.""" id_out = 0 if self.bn is None else self.bn(x) return self.act(self.conv1(x) + self.conv2(x) + id_out) def get_equivalent_kernel_bias(self): """Returns equivalent kernel and bias by adding 3x3 kernel, 1x1 kernel and identity kernel with their biases.""" kernel3x3, bias3x3 = self._fuse_bn_tensor(self.conv1) kernel1x1, bias1x1 = self._fuse_bn_tensor(self.conv2) kernelid, biasid = self._fuse_bn_tensor(self.bn) return kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid def _pad_1x1_to_3x3_tensor(self, kernel1x1): """Pads a 1x1 tensor to a 3x3 tensor.""" if kernel1x1 is None: return 0 else: return torch.nn.functional.pad(kernel1x1, [1, 1, 1, 1]) def _fuse_bn_tensor(self, branch): """Generates appropriate kernels and biases for convolution by fusing branches of the neural network.""" if branch is None: return 0, 0 if isinstance(branch, Conv): kernel = branch.conv.weight running_mean = branch.bn.running_mean running_var = branch.bn.running_var gamma = branch.bn.weight beta = branch.bn.bias eps = branch.bn.eps elif isinstance(branch, nn.BatchNorm2d): if not hasattr(self, "id_tensor"): input_dim = self.c1 // self.g kernel_value = np.zeros((self.c1, input_dim, 3, 3), dtype=np.float32) for i in range(self.c1): kernel_value[i, i % input_dim, 1, 1] = 1 self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device) kernel = self.id_tensor running_mean = branch.running_mean running_var = branch.running_var gamma = branch.weight beta = branch.bias eps = branch.eps std = (running_var + eps).sqrt() t = (gamma / std).reshape(-1, 1, 1, 1) return kernel * t, beta - running_mean * gamma / std def fuse_convs(self): """Combines two convolution layers into a single layer and removes unused attributes from the class.""" if hasattr(self, "conv"): return kernel, bias = self.get_equivalent_kernel_bias() self.conv = nn.Conv2d( in_channels=self.conv1.conv.in_channels, out_channels=self.conv1.conv.out_channels, kernel_size=self.conv1.conv.kernel_size, stride=self.conv1.conv.stride, padding=self.conv1.conv.padding, dilation=self.conv1.conv.dilation, groups=self.conv1.conv.groups, bias=True, ).requires_grad_(False) self.conv.weight.data = kernel self.conv.bias.data = bias for para in self.parameters(): para.detach_() self.__delattr__("conv1") self.__delattr__("conv2") if hasattr(self, "nm"): self.__delattr__("nm") if hasattr(self, "bn"): self.__delattr__("bn") if hasattr(self, "id_tensor"): self.__delattr__("id_tensor") class ChannelAttention(nn.Module): """Channel-attention module https://github.com/open-mmlab/mmdetection/tree/v3.0.0rc1/configs/rtmdet.""" def __init__(self, channels: int) -> None: """Initializes the class and sets the basic configurations and instance variables required.""" super().__init__() self.pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Conv2d(channels, channels, 1, 1, 0, bias=True) self.act = nn.Sigmoid() def forward(self, x: torch.Tensor) -> torch.Tensor: """Applies forward pass using activation on convolutions of the input, optionally using batch normalization.""" return x * self.act(self.fc(self.pool(x))) class SpatialAttention(nn.Module): """Spatial-attention module.""" def __init__(self, kernel_size=7): """Initialize Spatial-attention module with kernel size argument.""" super().__init__() assert kernel_size in (3, 7), "kernel size must be 3 or 7" padding = 3 if kernel_size == 7 else 1 self.cv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False) self.act = nn.Sigmoid() def forward(self, x): """Apply channel and spatial attention on input for feature recalibration.""" return x * self.act(self.cv1(torch.cat([torch.mean(x, 1, keepdim=True), torch.max(x, 1, keepdim=True)[0]], 1))) class CBAM(nn.Module): """Convolutional Block Attention Module.""" def __init__(self, c1, kernel_size=7): """Initialize CBAM with given input channel (c1) and kernel size.""" super().__init__() self.channel_attention = ChannelAttention(c1) self.spatial_attention = SpatialAttention(kernel_size) def forward(self, x): """Applies the forward pass through C1 module.""" return self.spatial_attention(self.channel_attention(x)) class Concat(nn.Module): """Concatenate a list of tensors along dimension.""" def __init__(self, dimension=1): """Concatenates a list of tensors along a specified dimension.""" super().__init__() self.d = dimension def forward(self, x): """Forward pass for the YOLOv8 mask Proto module.""" return torch.cat(x, self.d)
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/nn/modules/conv.py
Python
unknown
12,722
# Ultralytics YOLO 🚀, AGPL-3.0 license """Model head modules.""" import math import torch import torch.nn as nn from torch.nn.init import constant_, xavier_uniform_ from ultralytics.utils.tal import TORCH_1_10, dist2bbox, dist2rbox, make_anchors from .block import DFL, Proto from .conv import Conv from .transformer import MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer from .utils import bias_init_with_prob, linear_init __all__ = "Detect", "Segment", "Pose", "Classify", "OBB", "RTDETRDecoder" class Detect(nn.Module): """YOLOv8 Detect head for detection models.""" dynamic = False # force grid reconstruction export = False # export mode shape = None anchors = torch.empty(0) # init strides = torch.empty(0) # init def __init__(self, nc=80, ch=()): """Initializes the YOLOv8 detection layer with specified number of classes and channels.""" super().__init__() self.nc = nc # number of classes self.nl = len(ch) # number of detection layers self.reg_max = 16 # DFL channels (ch[0] // 16 to scale 4/8/12/16/20 for n/s/m/l/x) self.no = nc + self.reg_max * 4 # number of outputs per anchor self.stride = torch.zeros(self.nl) # strides computed during build c2, c3 = max((16, ch[0] // 4, self.reg_max * 4)), max(ch[0], min(self.nc, 100)) # channels self.cv2 = nn.ModuleList( nn.Sequential(Conv(x, c2, 3), Conv(c2, c2, 3), nn.Conv2d(c2, 4 * self.reg_max, 1)) for x in ch ) self.cv3 = nn.ModuleList(nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, self.nc, 1)) for x in ch) self.dfl = DFL(self.reg_max) if self.reg_max > 1 else nn.Identity() def forward(self, x): """Concatenates and returns predicted bounding boxes and class probabilities.""" for i in range(self.nl): x[i] = torch.cat((self.cv2[i](x[i]), self.cv3[i](x[i])), 1) if self.training: # Training path return x # Inference path shape = x[0].shape # BCHW x_cat = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2) if self.dynamic or self.shape != shape: self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5)) self.shape = shape if self.export and self.format in ("saved_model", "pb", "tflite", "edgetpu", "tfjs"): # avoid TF FlexSplitV ops box = x_cat[:, : self.reg_max * 4] cls = x_cat[:, self.reg_max * 4 :] else: box, cls = x_cat.split((self.reg_max * 4, self.nc), 1) if self.export and self.format in ("tflite", "edgetpu"): # Precompute normalization factor to increase numerical stability # See https://github.com/ultralytics/ultralytics/issues/7371 grid_h = shape[2] grid_w = shape[3] grid_size = torch.tensor([grid_w, grid_h, grid_w, grid_h], device=box.device).reshape(1, 4, 1) norm = self.strides / (self.stride[0] * grid_size) dbox = self.decode_bboxes(self.dfl(box) * norm, self.anchors.unsqueeze(0) * norm[:, :2]) else: dbox = self.decode_bboxes(self.dfl(box), self.anchors.unsqueeze(0)) * self.strides y = torch.cat((dbox, cls.sigmoid()), 1) return y if self.export else (y, x) def bias_init(self): """Initialize Detect() biases, WARNING: requires stride availability.""" m = self # self.model[-1] # Detect() module # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1 # ncf = math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # nominal class frequency for a, b, s in zip(m.cv2, m.cv3, m.stride): # from a[-1].bias.data[:] = 1.0 # box b[-1].bias.data[: m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (.01 objects, 80 classes, 640 img) def decode_bboxes(self, bboxes, anchors): """Decode bounding boxes.""" return dist2bbox(bboxes, anchors, xywh=True, dim=1) class Segment(Detect): """YOLOv8 Segment head for segmentation models.""" def __init__(self, nc=80, nm=32, npr=256, ch=()): """Initialize the YOLO model attributes such as the number of masks, prototypes, and the convolution layers.""" super().__init__(nc, ch) self.nm = nm # number of masks self.npr = npr # number of protos self.proto = Proto(ch[0], self.npr, self.nm) # protos self.detect = Detect.forward c4 = max(ch[0] // 4, self.nm) self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, self.nm, 1)) for x in ch) def forward(self, x): """Return model outputs and mask coefficients if training, otherwise return outputs and mask coefficients.""" p = self.proto(x[0]) # mask protos bs = p.shape[0] # batch size mc = torch.cat([self.cv4[i](x[i]).view(bs, self.nm, -1) for i in range(self.nl)], 2) # mask coefficients x = self.detect(self, x) if self.training: return x, mc, p return (torch.cat([x, mc], 1), p) if self.export else (torch.cat([x[0], mc], 1), (x[1], mc, p)) class OBB(Detect): """YOLOv8 OBB detection head for detection with rotation models.""" def __init__(self, nc=80, ne=1, ch=()): """Initialize OBB with number of classes `nc` and layer channels `ch`.""" super().__init__(nc, ch) self.ne = ne # number of extra parameters self.detect = Detect.forward c4 = max(ch[0] // 4, self.ne) self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, self.ne, 1)) for x in ch) def forward(self, x): """Concatenates and returns predicted bounding boxes and class probabilities.""" bs = x[0].shape[0] # batch size angle = torch.cat([self.cv4[i](x[i]).view(bs, self.ne, -1) for i in range(self.nl)], 2) # OBB theta logits # NOTE: set `angle` as an attribute so that `decode_bboxes` could use it. angle = (angle.sigmoid() - 0.25) * math.pi # [-pi/4, 3pi/4] # angle = angle.sigmoid() * math.pi / 2 # [0, pi/2] if not self.training: self.angle = angle x = self.detect(self, x) if self.training: return x, angle return torch.cat([x, angle], 1) if self.export else (torch.cat([x[0], angle], 1), (x[1], angle)) def decode_bboxes(self, bboxes, anchors): """Decode rotated bounding boxes.""" return dist2rbox(bboxes, self.angle, anchors, dim=1) class Pose(Detect): """YOLOv8 Pose head for keypoints models.""" def __init__(self, nc=80, kpt_shape=(17, 3), ch=()): """Initialize YOLO network with default parameters and Convolutional Layers.""" super().__init__(nc, ch) self.kpt_shape = kpt_shape # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) self.nk = kpt_shape[0] * kpt_shape[1] # number of keypoints total self.detect = Detect.forward c4 = max(ch[0] // 4, self.nk) self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, self.nk, 1)) for x in ch) def forward(self, x): """Perform forward pass through YOLO model and return predictions.""" bs = x[0].shape[0] # batch size kpt = torch.cat([self.cv4[i](x[i]).view(bs, self.nk, -1) for i in range(self.nl)], -1) # (bs, 17*3, h*w) x = self.detect(self, x) if self.training: return x, kpt pred_kpt = self.kpts_decode(bs, kpt) return torch.cat([x, pred_kpt], 1) if self.export else (torch.cat([x[0], pred_kpt], 1), (x[1], kpt)) def kpts_decode(self, bs, kpts): """Decodes keypoints.""" ndim = self.kpt_shape[1] if self.export: # required for TFLite export to avoid 'PLACEHOLDER_FOR_GREATER_OP_CODES' bug y = kpts.view(bs, *self.kpt_shape, -1) a = (y[:, :, :2] * 2.0 + (self.anchors - 0.5)) * self.strides if ndim == 3: a = torch.cat((a, y[:, :, 2:3].sigmoid()), 2) return a.view(bs, self.nk, -1) else: y = kpts.clone() if ndim == 3: y[:, 2::3] = y[:, 2::3].sigmoid() # sigmoid (WARNING: inplace .sigmoid_() Apple MPS bug) y[:, 0::ndim] = (y[:, 0::ndim] * 2.0 + (self.anchors[0] - 0.5)) * self.strides y[:, 1::ndim] = (y[:, 1::ndim] * 2.0 + (self.anchors[1] - 0.5)) * self.strides return y class Classify(nn.Module): """YOLOv8 classification head, i.e. x(b,c1,20,20) to x(b,c2).""" def __init__(self, c1, c2, k=1, s=1, p=None, g=1): """Initializes YOLOv8 classification head with specified input and output channels, kernel size, stride, padding, and groups. """ super().__init__() c_ = 1280 # efficientnet_b0 size self.conv = Conv(c1, c_, k, s, p, g) self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1) self.drop = nn.Dropout(p=0.0, inplace=True) self.linear = nn.Linear(c_, c2) # to x(b,c2) def forward(self, x): """Performs a forward pass of the YOLO model on input image data.""" if isinstance(x, list): x = torch.cat(x, 1) x = self.linear(self.drop(self.pool(self.conv(x)).flatten(1))) return x if self.training else x.softmax(1) class RTDETRDecoder(nn.Module): """ Real-Time Deformable Transformer Decoder (RTDETRDecoder) module for object detection. This decoder module utilizes Transformer architecture along with deformable convolutions to predict bounding boxes and class labels for objects in an image. It integrates features from multiple layers and runs through a series of Transformer decoder layers to output the final predictions. """ export = False # export mode def __init__( self, nc=80, ch=(512, 1024, 2048), hd=256, # hidden dim nq=300, # num queries ndp=4, # num decoder points nh=8, # num head ndl=6, # num decoder layers d_ffn=1024, # dim of feedforward dropout=0.0, act=nn.ReLU(), eval_idx=-1, # Training args nd=100, # num denoising label_noise_ratio=0.5, box_noise_scale=1.0, learnt_init_query=False, ): """ Initializes the RTDETRDecoder module with the given parameters. Args: nc (int): Number of classes. Default is 80. ch (tuple): Channels in the backbone feature maps. Default is (512, 1024, 2048). hd (int): Dimension of hidden layers. Default is 256. nq (int): Number of query points. Default is 300. ndp (int): Number of decoder points. Default is 4. nh (int): Number of heads in multi-head attention. Default is 8. ndl (int): Number of decoder layers. Default is 6. d_ffn (int): Dimension of the feed-forward networks. Default is 1024. dropout (float): Dropout rate. Default is 0. act (nn.Module): Activation function. Default is nn.ReLU. eval_idx (int): Evaluation index. Default is -1. nd (int): Number of denoising. Default is 100. label_noise_ratio (float): Label noise ratio. Default is 0.5. box_noise_scale (float): Box noise scale. Default is 1.0. learnt_init_query (bool): Whether to learn initial query embeddings. Default is False. """ super().__init__() self.hidden_dim = hd self.nhead = nh self.nl = len(ch) # num level self.nc = nc self.num_queries = nq self.num_decoder_layers = ndl # Backbone feature projection self.input_proj = nn.ModuleList(nn.Sequential(nn.Conv2d(x, hd, 1, bias=False), nn.BatchNorm2d(hd)) for x in ch) # NOTE: simplified version but it's not consistent with .pt weights. # self.input_proj = nn.ModuleList(Conv(x, hd, act=False) for x in ch) # Transformer module decoder_layer = DeformableTransformerDecoderLayer(hd, nh, d_ffn, dropout, act, self.nl, ndp) self.decoder = DeformableTransformerDecoder(hd, decoder_layer, ndl, eval_idx) # Denoising part self.denoising_class_embed = nn.Embedding(nc, hd) self.num_denoising = nd self.label_noise_ratio = label_noise_ratio self.box_noise_scale = box_noise_scale # Decoder embedding self.learnt_init_query = learnt_init_query if learnt_init_query: self.tgt_embed = nn.Embedding(nq, hd) self.query_pos_head = MLP(4, 2 * hd, hd, num_layers=2) # Encoder head self.enc_output = nn.Sequential(nn.Linear(hd, hd), nn.LayerNorm(hd)) self.enc_score_head = nn.Linear(hd, nc) self.enc_bbox_head = MLP(hd, hd, 4, num_layers=3) # Decoder head self.dec_score_head = nn.ModuleList([nn.Linear(hd, nc) for _ in range(ndl)]) self.dec_bbox_head = nn.ModuleList([MLP(hd, hd, 4, num_layers=3) for _ in range(ndl)]) self._reset_parameters() def forward(self, x, batch=None): """Runs the forward pass of the module, returning bounding box and classification scores for the input.""" from ultralytics.models.utils.ops import get_cdn_group # Input projection and embedding feats, shapes = self._get_encoder_input(x) # Prepare denoising training dn_embed, dn_bbox, attn_mask, dn_meta = get_cdn_group( batch, self.nc, self.num_queries, self.denoising_class_embed.weight, self.num_denoising, self.label_noise_ratio, self.box_noise_scale, self.training, ) embed, refer_bbox, enc_bboxes, enc_scores = self._get_decoder_input(feats, shapes, dn_embed, dn_bbox) # Decoder dec_bboxes, dec_scores = self.decoder( embed, refer_bbox, feats, shapes, self.dec_bbox_head, self.dec_score_head, self.query_pos_head, attn_mask=attn_mask, ) x = dec_bboxes, dec_scores, enc_bboxes, enc_scores, dn_meta if self.training: return x # (bs, 300, 4+nc) y = torch.cat((dec_bboxes.squeeze(0), dec_scores.squeeze(0).sigmoid()), -1) return y if self.export else (y, x) def _generate_anchors(self, shapes, grid_size=0.05, dtype=torch.float32, device="cpu", eps=1e-2): """Generates anchor bounding boxes for given shapes with specific grid size and validates them.""" anchors = [] for i, (h, w) in enumerate(shapes): sy = torch.arange(end=h, dtype=dtype, device=device) sx = torch.arange(end=w, dtype=dtype, device=device) grid_y, grid_x = torch.meshgrid(sy, sx, indexing="ij") if TORCH_1_10 else torch.meshgrid(sy, sx) grid_xy = torch.stack([grid_x, grid_y], -1) # (h, w, 2) valid_WH = torch.tensor([w, h], dtype=dtype, device=device) grid_xy = (grid_xy.unsqueeze(0) + 0.5) / valid_WH # (1, h, w, 2) wh = torch.ones_like(grid_xy, dtype=dtype, device=device) * grid_size * (2.0**i) anchors.append(torch.cat([grid_xy, wh], -1).view(-1, h * w, 4)) # (1, h*w, 4) anchors = torch.cat(anchors, 1) # (1, h*w*nl, 4) valid_mask = ((anchors > eps) * (anchors < 1 - eps)).all(-1, keepdim=True) # 1, h*w*nl, 1 anchors = torch.log(anchors / (1 - anchors)) anchors = anchors.masked_fill(~valid_mask, float("inf")) return anchors, valid_mask def _get_encoder_input(self, x): """Processes and returns encoder inputs by getting projection features from input and concatenating them.""" # Get projection features x = [self.input_proj[i](feat) for i, feat in enumerate(x)] # Get encoder inputs feats = [] shapes = [] for feat in x: h, w = feat.shape[2:] # [b, c, h, w] -> [b, h*w, c] feats.append(feat.flatten(2).permute(0, 2, 1)) # [nl, 2] shapes.append([h, w]) # [b, h*w, c] feats = torch.cat(feats, 1) return feats, shapes def _get_decoder_input(self, feats, shapes, dn_embed=None, dn_bbox=None): """Generates and prepares the input required for the decoder from the provided features and shapes.""" bs = feats.shape[0] # Prepare input for decoder anchors, valid_mask = self._generate_anchors(shapes, dtype=feats.dtype, device=feats.device) features = self.enc_output(valid_mask * feats) # bs, h*w, 256 enc_outputs_scores = self.enc_score_head(features) # (bs, h*w, nc) # Query selection # (bs, num_queries) topk_ind = torch.topk(enc_outputs_scores.max(-1).values, self.num_queries, dim=1).indices.view(-1) # (bs, num_queries) batch_ind = torch.arange(end=bs, dtype=topk_ind.dtype).unsqueeze(-1).repeat(1, self.num_queries).view(-1) # (bs, num_queries, 256) top_k_features = features[batch_ind, topk_ind].view(bs, self.num_queries, -1) # (bs, num_queries, 4) top_k_anchors = anchors[:, topk_ind].view(bs, self.num_queries, -1) # Dynamic anchors + static content refer_bbox = self.enc_bbox_head(top_k_features) + top_k_anchors enc_bboxes = refer_bbox.sigmoid() if dn_bbox is not None: refer_bbox = torch.cat([dn_bbox, refer_bbox], 1) enc_scores = enc_outputs_scores[batch_ind, topk_ind].view(bs, self.num_queries, -1) embeddings = self.tgt_embed.weight.unsqueeze(0).repeat(bs, 1, 1) if self.learnt_init_query else top_k_features if self.training: refer_bbox = refer_bbox.detach() if not self.learnt_init_query: embeddings = embeddings.detach() if dn_embed is not None: embeddings = torch.cat([dn_embed, embeddings], 1) return embeddings, refer_bbox, enc_bboxes, enc_scores # TODO def _reset_parameters(self): """Initializes or resets the parameters of the model's various components with predefined weights and biases.""" # Class and bbox head init bias_cls = bias_init_with_prob(0.01) / 80 * self.nc # NOTE: the weight initialization in `linear_init` would cause NaN when training with custom datasets. # linear_init(self.enc_score_head) constant_(self.enc_score_head.bias, bias_cls) constant_(self.enc_bbox_head.layers[-1].weight, 0.0) constant_(self.enc_bbox_head.layers[-1].bias, 0.0) for cls_, reg_ in zip(self.dec_score_head, self.dec_bbox_head): # linear_init(cls_) constant_(cls_.bias, bias_cls) constant_(reg_.layers[-1].weight, 0.0) constant_(reg_.layers[-1].bias, 0.0) linear_init(self.enc_output[0]) xavier_uniform_(self.enc_output[0].weight) if self.learnt_init_query: xavier_uniform_(self.tgt_embed.weight) xavier_uniform_(self.query_pos_head.layers[0].weight) xavier_uniform_(self.query_pos_head.layers[1].weight) for layer in self.input_proj: xavier_uniform_(layer[0].weight)
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/nn/modules/head.py
Python
unknown
19,535
# Ultralytics YOLO 🚀, AGPL-3.0 license """Transformer modules.""" import math import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.init import constant_, xavier_uniform_ from .conv import Conv from .utils import _get_clones, inverse_sigmoid, multi_scale_deformable_attn_pytorch __all__ = ( "TransformerEncoderLayer", "TransformerLayer", "TransformerBlock", "MLPBlock", "LayerNorm2d", "AIFI", "DeformableTransformerDecoder", "DeformableTransformerDecoderLayer", "MSDeformAttn", "MLP", ) class TransformerEncoderLayer(nn.Module): """Defines a single layer of the transformer encoder.""" def __init__(self, c1, cm=2048, num_heads=8, dropout=0.0, act=nn.GELU(), normalize_before=False): """Initialize the TransformerEncoderLayer with specified parameters.""" super().__init__() from ...utils.torch_utils import TORCH_1_9 if not TORCH_1_9: raise ModuleNotFoundError( "TransformerEncoderLayer() requires torch>=1.9 to use nn.MultiheadAttention(batch_first=True)." ) self.ma = nn.MultiheadAttention(c1, num_heads, dropout=dropout, batch_first=True) # Implementation of Feedforward model self.fc1 = nn.Linear(c1, cm) self.fc2 = nn.Linear(cm, c1) self.norm1 = nn.LayerNorm(c1) self.norm2 = nn.LayerNorm(c1) self.dropout = nn.Dropout(dropout) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.act = act self.normalize_before = normalize_before @staticmethod def with_pos_embed(tensor, pos=None): """Add position embeddings to the tensor if provided.""" return tensor if pos is None else tensor + pos def forward_post(self, src, src_mask=None, src_key_padding_mask=None, pos=None): """Performs forward pass with post-normalization.""" q = k = self.with_pos_embed(src, pos) src2 = self.ma(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] src = src + self.dropout1(src2) src = self.norm1(src) src2 = self.fc2(self.dropout(self.act(self.fc1(src)))) src = src + self.dropout2(src2) return self.norm2(src) def forward_pre(self, src, src_mask=None, src_key_padding_mask=None, pos=None): """Performs forward pass with pre-normalization.""" src2 = self.norm1(src) q = k = self.with_pos_embed(src2, pos) src2 = self.ma(q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] src = src + self.dropout1(src2) src2 = self.norm2(src) src2 = self.fc2(self.dropout(self.act(self.fc1(src2)))) return src + self.dropout2(src2) def forward(self, src, src_mask=None, src_key_padding_mask=None, pos=None): """Forward propagates the input through the encoder module.""" if self.normalize_before: return self.forward_pre(src, src_mask, src_key_padding_mask, pos) return self.forward_post(src, src_mask, src_key_padding_mask, pos) class AIFI(TransformerEncoderLayer): """Defines the AIFI transformer layer.""" def __init__(self, c1, cm=2048, num_heads=8, dropout=0, act=nn.GELU(), normalize_before=False): """Initialize the AIFI instance with specified parameters.""" super().__init__(c1, cm, num_heads, dropout, act, normalize_before) def forward(self, x): """Forward pass for the AIFI transformer layer.""" c, h, w = x.shape[1:] pos_embed = self.build_2d_sincos_position_embedding(w, h, c) # Flatten [B, C, H, W] to [B, HxW, C] x = super().forward(x.flatten(2).permute(0, 2, 1), pos=pos_embed.to(device=x.device, dtype=x.dtype)) return x.permute(0, 2, 1).view([-1, c, h, w]).contiguous() @staticmethod def build_2d_sincos_position_embedding(w, h, embed_dim=256, temperature=10000.0): """Builds 2D sine-cosine position embedding.""" assert embed_dim % 4 == 0, "Embed dimension must be divisible by 4 for 2D sin-cos position embedding" grid_w = torch.arange(w, dtype=torch.float32) grid_h = torch.arange(h, dtype=torch.float32) grid_w, grid_h = torch.meshgrid(grid_w, grid_h, indexing="ij") pos_dim = embed_dim // 4 omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim omega = 1.0 / (temperature**omega) out_w = grid_w.flatten()[..., None] @ omega[None] out_h = grid_h.flatten()[..., None] @ omega[None] return torch.cat([torch.sin(out_w), torch.cos(out_w), torch.sin(out_h), torch.cos(out_h)], 1)[None] class TransformerLayer(nn.Module): """Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance).""" def __init__(self, c, num_heads): """Initializes a self-attention mechanism using linear transformations and multi-head attention.""" super().__init__() self.q = nn.Linear(c, c, bias=False) self.k = nn.Linear(c, c, bias=False) self.v = nn.Linear(c, c, bias=False) self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads) self.fc1 = nn.Linear(c, c, bias=False) self.fc2 = nn.Linear(c, c, bias=False) def forward(self, x): """Apply a transformer block to the input x and return the output.""" x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x return self.fc2(self.fc1(x)) + x class TransformerBlock(nn.Module): """Vision Transformer https://arxiv.org/abs/2010.11929.""" def __init__(self, c1, c2, num_heads, num_layers): """Initialize a Transformer module with position embedding and specified number of heads and layers.""" super().__init__() self.conv = None if c1 != c2: self.conv = Conv(c1, c2) self.linear = nn.Linear(c2, c2) # learnable position embedding self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers))) self.c2 = c2 def forward(self, x): """Forward propagates the input through the bottleneck module.""" if self.conv is not None: x = self.conv(x) b, _, w, h = x.shape p = x.flatten(2).permute(2, 0, 1) return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h) class MLPBlock(nn.Module): """Implements a single block of a multi-layer perceptron.""" def __init__(self, embedding_dim, mlp_dim, act=nn.GELU): """Initialize the MLPBlock with specified embedding dimension, MLP dimension, and activation function.""" super().__init__() self.lin1 = nn.Linear(embedding_dim, mlp_dim) self.lin2 = nn.Linear(mlp_dim, embedding_dim) self.act = act() def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass for the MLPBlock.""" return self.lin2(self.act(self.lin1(x))) class MLP(nn.Module): """Implements a simple multi-layer perceptron (also called FFN).""" def __init__(self, input_dim, hidden_dim, output_dim, num_layers): """Initialize the MLP with specified input, hidden, output dimensions and number of layers.""" super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): """Forward pass for the entire MLP.""" for i, layer in enumerate(self.layers): x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x class LayerNorm2d(nn.Module): """ 2D Layer Normalization module inspired by Detectron2 and ConvNeXt implementations. Original implementations in https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py and https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py. """ def __init__(self, num_channels, eps=1e-6): """Initialize LayerNorm2d with the given parameters.""" super().__init__() self.weight = nn.Parameter(torch.ones(num_channels)) self.bias = nn.Parameter(torch.zeros(num_channels)) self.eps = eps def forward(self, x): """Perform forward pass for 2D layer normalization.""" u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) return self.weight[:, None, None] * x + self.bias[:, None, None] class MSDeformAttn(nn.Module): """ Multi-Scale Deformable Attention Module based on Deformable-DETR and PaddleDetection implementations. https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/modules/ms_deform_attn.py """ def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4): """Initialize MSDeformAttn with the given parameters.""" super().__init__() if d_model % n_heads != 0: raise ValueError(f"d_model must be divisible by n_heads, but got {d_model} and {n_heads}") _d_per_head = d_model // n_heads # Better to set _d_per_head to a power of 2 which is more efficient in a CUDA implementation assert _d_per_head * n_heads == d_model, "`d_model` must be divisible by `n_heads`" self.im2col_step = 64 self.d_model = d_model self.n_levels = n_levels self.n_heads = n_heads self.n_points = n_points self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2) self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points) self.value_proj = nn.Linear(d_model, d_model) self.output_proj = nn.Linear(d_model, d_model) self._reset_parameters() def _reset_parameters(self): """Reset module parameters.""" constant_(self.sampling_offsets.weight.data, 0.0) thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads) grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) grid_init = ( (grid_init / grid_init.abs().max(-1, keepdim=True)[0]) .view(self.n_heads, 1, 1, 2) .repeat(1, self.n_levels, self.n_points, 1) ) for i in range(self.n_points): grid_init[:, :, i, :] *= i + 1 with torch.no_grad(): self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) constant_(self.attention_weights.weight.data, 0.0) constant_(self.attention_weights.bias.data, 0.0) xavier_uniform_(self.value_proj.weight.data) constant_(self.value_proj.bias.data, 0.0) xavier_uniform_(self.output_proj.weight.data) constant_(self.output_proj.bias.data, 0.0) def forward(self, query, refer_bbox, value, value_shapes, value_mask=None): """ Perform forward pass for multiscale deformable attention. https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py Args: query (torch.Tensor): [bs, query_length, C] refer_bbox (torch.Tensor): [bs, query_length, n_levels, 2], range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area value (torch.Tensor): [bs, value_length, C] value_shapes (List): [n_levels, 2], [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})] value_mask (Tensor): [bs, value_length], True for non-padding elements, False for padding elements Returns: output (Tensor): [bs, Length_{query}, C] """ bs, len_q = query.shape[:2] len_v = value.shape[1] assert sum(s[0] * s[1] for s in value_shapes) == len_v value = self.value_proj(value) if value_mask is not None: value = value.masked_fill(value_mask[..., None], float(0)) value = value.view(bs, len_v, self.n_heads, self.d_model // self.n_heads) sampling_offsets = self.sampling_offsets(query).view(bs, len_q, self.n_heads, self.n_levels, self.n_points, 2) attention_weights = self.attention_weights(query).view(bs, len_q, self.n_heads, self.n_levels * self.n_points) attention_weights = F.softmax(attention_weights, -1).view(bs, len_q, self.n_heads, self.n_levels, self.n_points) # N, Len_q, n_heads, n_levels, n_points, 2 num_points = refer_bbox.shape[-1] if num_points == 2: offset_normalizer = torch.as_tensor(value_shapes, dtype=query.dtype, device=query.device).flip(-1) add = sampling_offsets / offset_normalizer[None, None, None, :, None, :] sampling_locations = refer_bbox[:, :, None, :, None, :] + add elif num_points == 4: add = sampling_offsets / self.n_points * refer_bbox[:, :, None, :, None, 2:] * 0.5 sampling_locations = refer_bbox[:, :, None, :, None, :2] + add else: raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {num_points}.") output = multi_scale_deformable_attn_pytorch(value, value_shapes, sampling_locations, attention_weights) return self.output_proj(output) class DeformableTransformerDecoderLayer(nn.Module): """ Deformable Transformer Decoder Layer inspired by PaddleDetection and Deformable-DETR implementations. https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/deformable_transformer.py """ def __init__(self, d_model=256, n_heads=8, d_ffn=1024, dropout=0.0, act=nn.ReLU(), n_levels=4, n_points=4): """Initialize the DeformableTransformerDecoderLayer with the given parameters.""" super().__init__() # Self attention self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout) self.dropout1 = nn.Dropout(dropout) self.norm1 = nn.LayerNorm(d_model) # Cross attention self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points) self.dropout2 = nn.Dropout(dropout) self.norm2 = nn.LayerNorm(d_model) # FFN self.linear1 = nn.Linear(d_model, d_ffn) self.act = act self.dropout3 = nn.Dropout(dropout) self.linear2 = nn.Linear(d_ffn, d_model) self.dropout4 = nn.Dropout(dropout) self.norm3 = nn.LayerNorm(d_model) @staticmethod def with_pos_embed(tensor, pos): """Add positional embeddings to the input tensor, if provided.""" return tensor if pos is None else tensor + pos def forward_ffn(self, tgt): """Perform forward pass through the Feed-Forward Network part of the layer.""" tgt2 = self.linear2(self.dropout3(self.act(self.linear1(tgt)))) tgt = tgt + self.dropout4(tgt2) return self.norm3(tgt) def forward(self, embed, refer_bbox, feats, shapes, padding_mask=None, attn_mask=None, query_pos=None): """Perform the forward pass through the entire decoder layer.""" # Self attention q = k = self.with_pos_embed(embed, query_pos) tgt = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), embed.transpose(0, 1), attn_mask=attn_mask)[ 0 ].transpose(0, 1) embed = embed + self.dropout1(tgt) embed = self.norm1(embed) # Cross attention tgt = self.cross_attn( self.with_pos_embed(embed, query_pos), refer_bbox.unsqueeze(2), feats, shapes, padding_mask ) embed = embed + self.dropout2(tgt) embed = self.norm2(embed) # FFN return self.forward_ffn(embed) class DeformableTransformerDecoder(nn.Module): """ Implementation of Deformable Transformer Decoder based on PaddleDetection. https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py """ def __init__(self, hidden_dim, decoder_layer, num_layers, eval_idx=-1): """Initialize the DeformableTransformerDecoder with the given parameters.""" super().__init__() self.layers = _get_clones(decoder_layer, num_layers) self.num_layers = num_layers self.hidden_dim = hidden_dim self.eval_idx = eval_idx if eval_idx >= 0 else num_layers + eval_idx def forward( self, embed, # decoder embeddings refer_bbox, # anchor feats, # image features shapes, # feature shapes bbox_head, score_head, pos_mlp, attn_mask=None, padding_mask=None, ): """Perform the forward pass through the entire decoder.""" output = embed dec_bboxes = [] dec_cls = [] last_refined_bbox = None refer_bbox = refer_bbox.sigmoid() for i, layer in enumerate(self.layers): output = layer(output, refer_bbox, feats, shapes, padding_mask, attn_mask, pos_mlp(refer_bbox)) bbox = bbox_head[i](output) refined_bbox = torch.sigmoid(bbox + inverse_sigmoid(refer_bbox)) if self.training: dec_cls.append(score_head[i](output)) if i == 0: dec_bboxes.append(refined_bbox) else: dec_bboxes.append(torch.sigmoid(bbox + inverse_sigmoid(last_refined_bbox))) elif i == self.eval_idx: dec_cls.append(score_head[i](output)) dec_bboxes.append(refined_bbox) break last_refined_bbox = refined_bbox refer_bbox = refined_bbox.detach() if self.training else refined_bbox return torch.stack(dec_bboxes), torch.stack(dec_cls)
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/nn/modules/transformer.py
Python
unknown
17,910
# Ultralytics YOLO 🚀, AGPL-3.0 license """Module utils.""" import copy import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.init import uniform_ __all__ = "multi_scale_deformable_attn_pytorch", "inverse_sigmoid" def _get_clones(module, n): """Create a list of cloned modules from the given module.""" return nn.ModuleList([copy.deepcopy(module) for _ in range(n)]) def bias_init_with_prob(prior_prob=0.01): """Initialize conv/fc bias value according to a given probability value.""" return float(-np.log((1 - prior_prob) / prior_prob)) # return bias_init def linear_init(module): """Initialize the weights and biases of a linear module.""" bound = 1 / math.sqrt(module.weight.shape[0]) uniform_(module.weight, -bound, bound) if hasattr(module, "bias") and module.bias is not None: uniform_(module.bias, -bound, bound) def inverse_sigmoid(x, eps=1e-5): """Calculate the inverse sigmoid function for a tensor.""" x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1 / x2) def multi_scale_deformable_attn_pytorch( value: torch.Tensor, value_spatial_shapes: torch.Tensor, sampling_locations: torch.Tensor, attention_weights: torch.Tensor, ) -> torch.Tensor: """ Multi-scale deformable attention. https://github.com/IDEA-Research/detrex/blob/main/detrex/layers/multi_scale_deform_attn.py """ bs, _, num_heads, embed_dims = value.shape _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1) sampling_grids = 2 * sampling_locations - 1 sampling_value_list = [] for level, (H_, W_) in enumerate(value_spatial_shapes): # bs, H_*W_, num_heads, embed_dims -> # bs, H_*W_, num_heads*embed_dims -> # bs, num_heads*embed_dims, H_*W_ -> # bs*num_heads, embed_dims, H_, W_ value_l_ = value_list[level].flatten(2).transpose(1, 2).reshape(bs * num_heads, embed_dims, H_, W_) # bs, num_queries, num_heads, num_points, 2 -> # bs, num_heads, num_queries, num_points, 2 -> # bs*num_heads, num_queries, num_points, 2 sampling_grid_l_ = sampling_grids[:, :, :, level].transpose(1, 2).flatten(0, 1) # bs*num_heads, embed_dims, num_queries, num_points sampling_value_l_ = F.grid_sample( value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False ) sampling_value_list.append(sampling_value_l_) # (bs, num_queries, num_heads, num_levels, num_points) -> # (bs, num_heads, num_queries, num_levels, num_points) -> # (bs, num_heads, 1, num_queries, num_levels*num_points) attention_weights = attention_weights.transpose(1, 2).reshape( bs * num_heads, 1, num_queries, num_levels * num_points ) output = ( (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights) .sum(-1) .view(bs, num_heads * embed_dims, num_queries) ) return output.transpose(1, 2).contiguous()
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/nn/modules/utils.py
Python
unknown
3,197
# Ultralytics YOLO 🚀, AGPL-3.0 license import contextlib from copy import deepcopy from pathlib import Path import torch import torch.nn as nn from ultralytics.nn.modules import ( AIFI, C1, C2, C3, C3TR, OBB, SPP, SPPF, Bottleneck, BottleneckCSP, C2f, C3Ghost, C3x, Classify, Concat, Conv, Conv2, ConvTranspose, Detect, DWConv, DWConvTranspose2d, Focus, GhostBottleneck, GhostConv, HGBlock, HGStem, Pose, RepC3, RepConv, ResNetLayer, RTDETRDecoder, Segment, ) from ultralytics.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, colorstr, emojis, yaml_load from ultralytics.utils.checks import check_requirements, check_suffix, check_yaml from ultralytics.utils.loss import v8ClassificationLoss, v8DetectionLoss, v8OBBLoss, v8PoseLoss, v8SegmentationLoss from ultralytics.utils.plotting import feature_visualization from ultralytics.utils.torch_utils import ( fuse_conv_and_bn, fuse_deconv_and_bn, initialize_weights, intersect_dicts, make_divisible, model_info, scale_img, time_sync, ) try: import thop except ImportError: thop = None class BaseModel(nn.Module): """The BaseModel class serves as a base class for all the models in the Ultralytics YOLO family.""" def forward(self, x, *args, **kwargs): """ Forward pass of the model on a single scale. Wrapper for `_forward_once` method. Args: x (torch.Tensor | dict): The input image tensor or a dict including image tensor and gt labels. Returns: (torch.Tensor): The output of the network. """ if isinstance(x, dict): # for cases of training and validating while training. return self.loss(x, *args, **kwargs) return self.predict(x, *args, **kwargs) def predict(self, x, profile=False, visualize=False, augment=False, embed=None): """ Perform a forward pass through the network. Args: x (torch.Tensor): The input tensor to the model. profile (bool): Print the computation time of each layer if True, defaults to False. visualize (bool): Save the feature maps of the model if True, defaults to False. augment (bool): Augment image during prediction, defaults to False. embed (list, optional): A list of feature vectors/embeddings to return. Returns: (torch.Tensor): The last output of the model. """ if augment: return self._predict_augment(x) return self._predict_once(x, profile, visualize, embed) def _predict_once(self, x, profile=False, visualize=False, embed=None): """ Perform a forward pass through the network. Args: x (torch.Tensor): The input tensor to the model. profile (bool): Print the computation time of each layer if True, defaults to False. visualize (bool): Save the feature maps of the model if True, defaults to False. embed (list, optional): A list of feature vectors/embeddings to return. Returns: (torch.Tensor): The last output of the model. """ y, dt, embeddings = [], [], [] # outputs for m in self.model: if m.f != -1: # if not from previous layer x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers if profile: self._profile_one_layer(m, x, dt) x = m(x) # run y.append(x if m.i in self.save else None) # save output if visualize: feature_visualization(x, m.type, m.i, save_dir=visualize) if embed and m.i in embed: embeddings.append(nn.functional.adaptive_avg_pool2d(x, (1, 1)).squeeze(-1).squeeze(-1)) # flatten if m.i == max(embed): return torch.unbind(torch.cat(embeddings, 1), dim=0) return x def _predict_augment(self, x): """Perform augmentations on input image x and return augmented inference.""" LOGGER.warning( f"WARNING ⚠️ {self.__class__.__name__} does not support augmented inference yet. " f"Reverting to single-scale inference instead." ) return self._predict_once(x) def _profile_one_layer(self, m, x, dt): """ Profile the computation time and FLOPs of a single layer of the model on a given input. Appends the results to the provided list. Args: m (nn.Module): The layer to be profiled. x (torch.Tensor): The input data to the layer. dt (list): A list to store the computation time of the layer. Returns: None """ c = m == self.model[-1] and isinstance(x, list) # is final layer list, copy input as inplace fix flops = thop.profile(m, inputs=[x.copy() if c else x], verbose=False)[0] / 1e9 * 2 if thop else 0 # FLOPs t = time_sync() for _ in range(10): m(x.copy() if c else x) dt.append((time_sync() - t) * 100) if m == self.model[0]: LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module") LOGGER.info(f"{dt[-1]:10.2f} {flops:10.2f} {m.np:10.0f} {m.type}") if c: LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") def fuse(self, verbose=True): """ Fuse the `Conv2d()` and `BatchNorm2d()` layers of the model into a single layer, in order to improve the computation efficiency. Returns: (nn.Module): The fused model is returned. """ if not self.is_fused(): for m in self.model.modules(): if isinstance(m, (Conv, Conv2, DWConv)) and hasattr(m, "bn"): if isinstance(m, Conv2): m.fuse_convs() m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv delattr(m, "bn") # remove batchnorm m.forward = m.forward_fuse # update forward if isinstance(m, ConvTranspose) and hasattr(m, "bn"): m.conv_transpose = fuse_deconv_and_bn(m.conv_transpose, m.bn) delattr(m, "bn") # remove batchnorm m.forward = m.forward_fuse # update forward if isinstance(m, RepConv): m.fuse_convs() m.forward = m.forward_fuse # update forward self.info(verbose=verbose) return self def is_fused(self, thresh=10): """ Check if the model has less than a certain threshold of BatchNorm layers. Args: thresh (int, optional): The threshold number of BatchNorm layers. Default is 10. Returns: (bool): True if the number of BatchNorm layers in the model is less than the threshold, False otherwise. """ bn = tuple(v for k, v in nn.__dict__.items() if "Norm" in k) # normalization layers, i.e. BatchNorm2d() return sum(isinstance(v, bn) for v in self.modules()) < thresh # True if < 'thresh' BatchNorm layers in model def info(self, detailed=False, verbose=True, imgsz=640): """ Prints model information. Args: detailed (bool): if True, prints out detailed information about the model. Defaults to False verbose (bool): if True, prints out the model information. Defaults to False imgsz (int): the size of the image that the model will be trained on. Defaults to 640 """ return model_info(self, detailed=detailed, verbose=verbose, imgsz=imgsz) def _apply(self, fn): """ Applies a function to all the tensors in the model that are not parameters or registered buffers. Args: fn (function): the function to apply to the model Returns: (BaseModel): An updated BaseModel object. """ self = super()._apply(fn) m = self.model[-1] # Detect() if isinstance(m, (Detect, Segment)): m.stride = fn(m.stride) m.anchors = fn(m.anchors) m.strides = fn(m.strides) return self def load(self, weights, verbose=True): """ Load the weights into the model. Args: weights (dict | torch.nn.Module): The pre-trained weights to be loaded. verbose (bool, optional): Whether to log the transfer progress. Defaults to True. """ model = weights["model"] if isinstance(weights, dict) else weights # torchvision models are not dicts csd = model.float().state_dict() # checkpoint state_dict as FP32 csd = intersect_dicts(csd, self.state_dict()) # intersect self.load_state_dict(csd, strict=False) # load if verbose: LOGGER.info(f"Transferred {len(csd)}/{len(self.model.state_dict())} items from pretrained weights") def loss(self, batch, preds=None): """ Compute loss. Args: batch (dict): Batch to compute loss on preds (torch.Tensor | List[torch.Tensor]): Predictions. """ if not hasattr(self, "criterion"): self.criterion = self.init_criterion() preds = self.forward(batch["img"]) if preds is None else preds return self.criterion(preds, batch) def init_criterion(self): """Initialize the loss criterion for the BaseModel.""" raise NotImplementedError("compute_loss() needs to be implemented by task heads") class DetectionModel(BaseModel): """YOLOv8 detection model.""" def __init__(self, cfg="yolov8n.yaml", ch=3, nc=None, verbose=True): # model, input channels, number of classes """Initialize the YOLOv8 detection model with the given config and parameters.""" super().__init__() self.yaml = cfg if isinstance(cfg, dict) else yaml_model_load(cfg) # cfg dict # Define model ch = self.yaml["ch"] = self.yaml.get("ch", ch) # input channels if nc and nc != self.yaml["nc"]: LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") self.yaml["nc"] = nc # override YAML value self.model, self.save = parse_model(deepcopy(self.yaml), ch=ch, verbose=verbose) # model, savelist self.names = {i: f"{i}" for i in range(self.yaml["nc"])} # default names dict self.inplace = self.yaml.get("inplace", True) # Build strides m = self.model[-1] # Detect() if isinstance(m, (Detect, Segment, Pose, OBB)): s = 256 # 2x min stride m.inplace = self.inplace forward = lambda x: self.forward(x)[0] if isinstance(m, (Segment, Pose, OBB)) else self.forward(x) m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward self.stride = m.stride m.bias_init() # only run once else: self.stride = torch.Tensor([32]) # default stride for i.e. RTDETR # Init weights, biases initialize_weights(self) if verbose: self.info() LOGGER.info("") def _predict_augment(self, x): """Perform augmentations on input image x and return augmented inference and train outputs.""" img_size = x.shape[-2:] # height, width s = [1, 0.83, 0.67] # scales f = [None, 3, None] # flips (2-ud, 3-lr) y = [] # outputs for si, fi in zip(s, f): xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) yi = super().predict(xi)[0] # forward yi = self._descale_pred(yi, fi, si, img_size) y.append(yi) y = self._clip_augmented(y) # clip augmented tails return torch.cat(y, -1), None # augmented inference, train @staticmethod def _descale_pred(p, flips, scale, img_size, dim=1): """De-scale predictions following augmented inference (inverse operation).""" p[:, :4] /= scale # de-scale x, y, wh, cls = p.split((1, 1, 2, p.shape[dim] - 4), dim) if flips == 2: y = img_size[0] - y # de-flip ud elif flips == 3: x = img_size[1] - x # de-flip lr return torch.cat((x, y, wh, cls), dim) def _clip_augmented(self, y): """Clip YOLO augmented inference tails.""" nl = self.model[-1].nl # number of detection layers (P3-P5) g = sum(4**x for x in range(nl)) # grid points e = 1 # exclude layer count i = (y[0].shape[-1] // g) * sum(4**x for x in range(e)) # indices y[0] = y[0][..., :-i] # large i = (y[-1].shape[-1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices y[-1] = y[-1][..., i:] # small return y def init_criterion(self): """Initialize the loss criterion for the DetectionModel.""" return v8DetectionLoss(self) class OBBModel(DetectionModel): """YOLOv8 Oriented Bounding Box (OBB) model.""" def __init__(self, cfg="yolov8n-obb.yaml", ch=3, nc=None, verbose=True): """Initialize YOLOv8 OBB model with given config and parameters.""" super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose) def init_criterion(self): """Initialize the loss criterion for the model.""" return v8OBBLoss(self) class SegmentationModel(DetectionModel): """YOLOv8 segmentation model.""" def __init__(self, cfg="yolov8n-seg.yaml", ch=3, nc=None, verbose=True): """Initialize YOLOv8 segmentation model with given config and parameters.""" super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose) def init_criterion(self): """Initialize the loss criterion for the SegmentationModel.""" return v8SegmentationLoss(self) class PoseModel(DetectionModel): """YOLOv8 pose model.""" def __init__(self, cfg="yolov8n-pose.yaml", ch=3, nc=None, data_kpt_shape=(None, None), verbose=True): """Initialize YOLOv8 Pose model.""" if not isinstance(cfg, dict): cfg = yaml_model_load(cfg) # load model YAML if any(data_kpt_shape) and list(data_kpt_shape) != list(cfg["kpt_shape"]): LOGGER.info(f"Overriding model.yaml kpt_shape={cfg['kpt_shape']} with kpt_shape={data_kpt_shape}") cfg["kpt_shape"] = data_kpt_shape super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose) def init_criterion(self): """Initialize the loss criterion for the PoseModel.""" return v8PoseLoss(self) class ClassificationModel(BaseModel): """YOLOv8 classification model.""" def __init__(self, cfg="yolov8n-cls.yaml", ch=3, nc=None, verbose=True): """Init ClassificationModel with YAML, channels, number of classes, verbose flag.""" super().__init__() self._from_yaml(cfg, ch, nc, verbose) def _from_yaml(self, cfg, ch, nc, verbose): """Set YOLOv8 model configurations and define the model architecture.""" self.yaml = cfg if isinstance(cfg, dict) else yaml_model_load(cfg) # cfg dict # Define model ch = self.yaml["ch"] = self.yaml.get("ch", ch) # input channels if nc and nc != self.yaml["nc"]: LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") self.yaml["nc"] = nc # override YAML value elif not nc and not self.yaml.get("nc", None): raise ValueError("nc not specified. Must specify nc in model.yaml or function arguments.") self.model, self.save = parse_model(deepcopy(self.yaml), ch=ch, verbose=verbose) # model, savelist self.stride = torch.Tensor([1]) # no stride constraints self.names = {i: f"{i}" for i in range(self.yaml["nc"])} # default names dict self.info() @staticmethod def reshape_outputs(model, nc): """Update a TorchVision classification model to class count 'n' if required.""" name, m = list((model.model if hasattr(model, "model") else model).named_children())[-1] # last module if isinstance(m, Classify): # YOLO Classify() head if m.linear.out_features != nc: m.linear = nn.Linear(m.linear.in_features, nc) elif isinstance(m, nn.Linear): # ResNet, EfficientNet if m.out_features != nc: setattr(model, name, nn.Linear(m.in_features, nc)) elif isinstance(m, nn.Sequential): types = [type(x) for x in m] if nn.Linear in types: i = types.index(nn.Linear) # nn.Linear index if m[i].out_features != nc: m[i] = nn.Linear(m[i].in_features, nc) elif nn.Conv2d in types: i = types.index(nn.Conv2d) # nn.Conv2d index if m[i].out_channels != nc: m[i] = nn.Conv2d(m[i].in_channels, nc, m[i].kernel_size, m[i].stride, bias=m[i].bias is not None) def init_criterion(self): """Initialize the loss criterion for the ClassificationModel.""" return v8ClassificationLoss() class RTDETRDetectionModel(DetectionModel): """ RTDETR (Real-time DEtection and Tracking using Transformers) Detection Model class. This class is responsible for constructing the RTDETR architecture, defining loss functions, and facilitating both the training and inference processes. RTDETR is an object detection and tracking model that extends from the DetectionModel base class. Attributes: cfg (str): The configuration file path or preset string. Default is 'rtdetr-l.yaml'. ch (int): Number of input channels. Default is 3 (RGB). nc (int, optional): Number of classes for object detection. Default is None. verbose (bool): Specifies if summary statistics are shown during initialization. Default is True. Methods: init_criterion: Initializes the criterion used for loss calculation. loss: Computes and returns the loss during training. predict: Performs a forward pass through the network and returns the output. """ def __init__(self, cfg="rtdetr-l.yaml", ch=3, nc=None, verbose=True): """ Initialize the RTDETRDetectionModel. Args: cfg (str): Configuration file name or path. ch (int): Number of input channels. nc (int, optional): Number of classes. Defaults to None. verbose (bool, optional): Print additional information during initialization. Defaults to True. """ super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose) def init_criterion(self): """Initialize the loss criterion for the RTDETRDetectionModel.""" from ultralytics.models.utils.loss import RTDETRDetectionLoss return RTDETRDetectionLoss(nc=self.nc, use_vfl=True) def loss(self, batch, preds=None): """ Compute the loss for the given batch of data. Args: batch (dict): Dictionary containing image and label data. preds (torch.Tensor, optional): Precomputed model predictions. Defaults to None. Returns: (tuple): A tuple containing the total loss and main three losses in a tensor. """ if not hasattr(self, "criterion"): self.criterion = self.init_criterion() img = batch["img"] # NOTE: preprocess gt_bbox and gt_labels to list. bs = len(img) batch_idx = batch["batch_idx"] gt_groups = [(batch_idx == i).sum().item() for i in range(bs)] targets = { "cls": batch["cls"].to(img.device, dtype=torch.long).view(-1), "bboxes": batch["bboxes"].to(device=img.device), "batch_idx": batch_idx.to(img.device, dtype=torch.long).view(-1), "gt_groups": gt_groups, } preds = self.predict(img, batch=targets) if preds is None else preds dec_bboxes, dec_scores, enc_bboxes, enc_scores, dn_meta = preds if self.training else preds[1] if dn_meta is None: dn_bboxes, dn_scores = None, None else: dn_bboxes, dec_bboxes = torch.split(dec_bboxes, dn_meta["dn_num_split"], dim=2) dn_scores, dec_scores = torch.split(dec_scores, dn_meta["dn_num_split"], dim=2) dec_bboxes = torch.cat([enc_bboxes.unsqueeze(0), dec_bboxes]) # (7, bs, 300, 4) dec_scores = torch.cat([enc_scores.unsqueeze(0), dec_scores]) loss = self.criterion( (dec_bboxes, dec_scores), targets, dn_bboxes=dn_bboxes, dn_scores=dn_scores, dn_meta=dn_meta ) # NOTE: There are like 12 losses in RTDETR, backward with all losses but only show the main three losses. return sum(loss.values()), torch.as_tensor( [loss[k].detach() for k in ["loss_giou", "loss_class", "loss_bbox"]], device=img.device ) def predict(self, x, profile=False, visualize=False, batch=None, augment=False, embed=None): """ Perform a forward pass through the model. Args: x (torch.Tensor): The input tensor. profile (bool, optional): If True, profile the computation time for each layer. Defaults to False. visualize (bool, optional): If True, save feature maps for visualization. Defaults to False. batch (dict, optional): Ground truth data for evaluation. Defaults to None. augment (bool, optional): If True, perform data augmentation during inference. Defaults to False. embed (list, optional): A list of feature vectors/embeddings to return. Returns: (torch.Tensor): Model's output tensor. """ y, dt, embeddings = [], [], [] # outputs for m in self.model[:-1]: # except the head part if m.f != -1: # if not from previous layer x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers if profile: self._profile_one_layer(m, x, dt) x = m(x) # run y.append(x if m.i in self.save else None) # save output if visualize: feature_visualization(x, m.type, m.i, save_dir=visualize) if embed and m.i in embed: embeddings.append(nn.functional.adaptive_avg_pool2d(x, (1, 1)).squeeze(-1).squeeze(-1)) # flatten if m.i == max(embed): return torch.unbind(torch.cat(embeddings, 1), dim=0) head = self.model[-1] x = head([y[j] for j in head.f], batch) # head inference return x class Ensemble(nn.ModuleList): """Ensemble of models.""" def __init__(self): """Initialize an ensemble of models.""" super().__init__() def forward(self, x, augment=False, profile=False, visualize=False): """Function generates the YOLO network's final layer.""" y = [module(x, augment, profile, visualize)[0] for module in self] # y = torch.stack(y).max(0)[0] # max ensemble # y = torch.stack(y).mean(0) # mean ensemble y = torch.cat(y, 2) # nms ensemble, y shape(B, HW, C) return y, None # inference, train output # Functions ------------------------------------------------------------------------------------------------------------ @contextlib.contextmanager def temporary_modules(modules=None): """ Context manager for temporarily adding or modifying modules in Python's module cache (`sys.modules`). This function can be used to change the module paths during runtime. It's useful when refactoring code, where you've moved a module from one location to another, but you still want to support the old import paths for backwards compatibility. Args: modules (dict, optional): A dictionary mapping old module paths to new module paths. Example: ```python with temporary_modules({'old.module.path': 'new.module.path'}): import old.module.path # this will now import new.module.path ``` Note: The changes are only in effect inside the context manager and are undone once the context manager exits. Be aware that directly manipulating `sys.modules` can lead to unpredictable results, especially in larger applications or libraries. Use this function with caution. """ if not modules: modules = {} import importlib import sys try: # Set modules in sys.modules under their old name for old, new in modules.items(): sys.modules[old] = importlib.import_module(new) yield finally: # Remove the temporary module paths for old in modules: if old in sys.modules: del sys.modules[old] def torch_safe_load(weight): """ This function attempts to load a PyTorch model with the torch.load() function. If a ModuleNotFoundError is raised, it catches the error, logs a warning message, and attempts to install the missing module via the check_requirements() function. After installation, the function again attempts to load the model using torch.load(). Args: weight (str): The file path of the PyTorch model. Returns: (dict): The loaded PyTorch model. """ from ultralytics.utils.downloads import attempt_download_asset check_suffix(file=weight, suffix=".pt") file = attempt_download_asset(weight) # search online if missing locally try: with temporary_modules( { "ultralytics.yolo.utils": "ultralytics.utils", "ultralytics.yolo.v8": "ultralytics.models.yolo", "ultralytics.yolo.data": "ultralytics.data", } ): # for legacy 8.0 Classify and Pose models ckpt = torch.load(file, map_location="cpu") except ModuleNotFoundError as e: # e.name is missing module name if e.name == "models": raise TypeError( emojis( f"ERROR ❌️ {weight} appears to be an Ultralytics YOLOv5 model originally trained " f"with https://github.com/ultralytics/yolov5.\nThis model is NOT forwards compatible with " f"YOLOv8 at https://github.com/ultralytics/ultralytics." f"\nRecommend fixes are to train a new model using the latest 'ultralytics' package or to " f"run a command with an official YOLOv8 model, i.e. 'yolo predict model=yolov8n.pt'" ) ) from e LOGGER.warning( f"WARNING ⚠️ {weight} appears to require '{e.name}', which is not in ultralytics requirements." f"\nAutoInstall will run now for '{e.name}' but this feature will be removed in the future." f"\nRecommend fixes are to train a new model using the latest 'ultralytics' package or to " f"run a command with an official YOLOv8 model, i.e. 'yolo predict model=yolov8n.pt'" ) check_requirements(e.name) # install missing module ckpt = torch.load(file, map_location="cpu") if not isinstance(ckpt, dict): # File is likely a YOLO instance saved with i.e. torch.save(model, "saved_model.pt") LOGGER.warning( f"WARNING ⚠️ The file '{weight}' appears to be improperly saved or formatted. " f"For optimal results, use model.save('filename.pt') to correctly save YOLO models." ) ckpt = {"model": ckpt.model} return ckpt, file # load def attempt_load_weights(weights, device=None, inplace=True, fuse=False): """Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a.""" ensemble = Ensemble() for w in weights if isinstance(weights, list) else [weights]: ckpt, w = torch_safe_load(w) # load ckpt args = {**DEFAULT_CFG_DICT, **ckpt["train_args"]} if "train_args" in ckpt else None # combined args model = (ckpt.get("ema") or ckpt["model"]).to(device).float() # FP32 model # Model compatibility updates model.args = args # attach args to model model.pt_path = w # attach *.pt file path to model model.task = guess_model_task(model) if not hasattr(model, "stride"): model.stride = torch.tensor([32.0]) # Append ensemble.append(model.fuse().eval() if fuse and hasattr(model, "fuse") else model.eval()) # model in eval mode # Module updates for m in ensemble.modules(): t = type(m) if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Segment, Pose, OBB): m.inplace = inplace elif t is nn.Upsample and not hasattr(m, "recompute_scale_factor"): m.recompute_scale_factor = None # torch 1.11.0 compatibility # Return model if len(ensemble) == 1: return ensemble[-1] # Return ensemble LOGGER.info(f"Ensemble created with {weights}\n") for k in "names", "nc", "yaml": setattr(ensemble, k, getattr(ensemble[0], k)) ensemble.stride = ensemble[torch.argmax(torch.tensor([m.stride.max() for m in ensemble])).int()].stride assert all(ensemble[0].nc == m.nc for m in ensemble), f"Models differ in class counts {[m.nc for m in ensemble]}" return ensemble def attempt_load_one_weight(weight, device=None, inplace=True, fuse=False): """Loads a single model weights.""" ckpt, weight = torch_safe_load(weight) # load ckpt args = {**DEFAULT_CFG_DICT, **(ckpt.get("train_args", {}))} # combine model and default args, preferring model args model = (ckpt.get("ema") or ckpt["model"]).to(device).float() # FP32 model # Model compatibility updates model.args = {k: v for k, v in args.items() if k in DEFAULT_CFG_KEYS} # attach args to model model.pt_path = weight # attach *.pt file path to model model.task = guess_model_task(model) if not hasattr(model, "stride"): model.stride = torch.tensor([32.0]) model = model.fuse().eval() if fuse and hasattr(model, "fuse") else model.eval() # model in eval mode # Module updates for m in model.modules(): t = type(m) if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Segment, Pose, OBB): m.inplace = inplace elif t is nn.Upsample and not hasattr(m, "recompute_scale_factor"): m.recompute_scale_factor = None # torch 1.11.0 compatibility # Return model and ckpt return model, ckpt def parse_model(d, ch, verbose=True): # model_dict, input_channels(3) """Parse a YOLO model.yaml dictionary into a PyTorch model.""" import ast # Args max_channels = float("inf") nc, act, scales = (d.get(x) for x in ("nc", "activation", "scales")) depth, width, kpt_shape = (d.get(x, 1.0) for x in ("depth_multiple", "width_multiple", "kpt_shape")) if scales: scale = d.get("scale") if not scale: scale = tuple(scales.keys())[0] LOGGER.warning(f"WARNING ⚠️ no model scale passed. Assuming scale='{scale}'.") depth, width, max_channels = scales[scale] if act: Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU() if verbose: LOGGER.info(f"{colorstr('activation:')} {act}") # print if verbose: LOGGER.info(f"\n{'':>3}{'from':>20}{'n':>3}{'params':>10} {'module':<45}{'arguments':<30}") ch = [ch] layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out for i, (f, n, m, args) in enumerate(d["backbone"] + d["head"]): # from, number, module, args m = getattr(torch.nn, m[3:]) if "nn." in m else globals()[m] # get module for j, a in enumerate(args): if isinstance(a, str): with contextlib.suppress(ValueError): args[j] = locals()[a] if a in locals() else ast.literal_eval(a) n = n_ = max(round(n * depth), 1) if n > 1 else n # depth gain if m in ( Classify, Conv, ConvTranspose, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, Focus, BottleneckCSP, C1, C2, C2f, C3, C3TR, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x, RepC3, ): c1, c2 = ch[f], args[0] if c2 != nc: # if c2 not equal to number of classes (i.e. for Classify() output) c2 = make_divisible(min(c2, max_channels) * width, 8) args = [c1, c2, *args[1:]] if m in (BottleneckCSP, C1, C2, C2f, C3, C3TR, C3Ghost, C3x, RepC3): args.insert(2, n) # number of repeats n = 1 elif m is AIFI: args = [ch[f], *args] elif m in (HGStem, HGBlock): c1, cm, c2 = ch[f], args[0], args[1] args = [c1, cm, c2, *args[2:]] if m is HGBlock: args.insert(4, n) # number of repeats n = 1 elif m is ResNetLayer: c2 = args[1] if args[3] else args[1] * 4 elif m is nn.BatchNorm2d: args = [ch[f]] elif m is Concat: c2 = sum(ch[x] for x in f) elif m in (Detect, Segment, Pose, OBB): args.append([ch[x] for x in f]) if m is Segment: args[2] = make_divisible(min(args[2], max_channels) * width, 8) elif m is RTDETRDecoder: # special case, channels arg must be passed in index 1 args.insert(1, [ch[x] for x in f]) else: c2 = ch[f] m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module t = str(m)[8:-2].replace("__main__.", "") # module type m.np = sum(x.numel() for x in m_.parameters()) # number params m_.i, m_.f, m_.type = i, f, t # attach index, 'from' index, type if verbose: LOGGER.info(f"{i:>3}{str(f):>20}{n_:>3}{m.np:10.0f} {t:<45}{str(args):<30}") # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) if i == 0: ch = [] ch.append(c2) return nn.Sequential(*layers), sorted(save) def yaml_model_load(path): """Load a YOLOv8 model from a YAML file.""" import re path = Path(path) if path.stem in (f"yolov{d}{x}6" for x in "nsmlx" for d in (5, 8)): new_stem = re.sub(r"(\d+)([nslmx])6(.+)?$", r"\1\2-p6\3", path.stem) LOGGER.warning(f"WARNING ⚠️ Ultralytics YOLO P6 models now use -p6 suffix. Renaming {path.stem} to {new_stem}.") path = path.with_name(new_stem + path.suffix) unified_path = re.sub(r"(\d+)([nslmx])(.+)?$", r"\1\3", str(path)) # i.e. yolov8x.yaml -> yolov8.yaml yaml_file = check_yaml(unified_path, hard=False) or check_yaml(path) d = yaml_load(yaml_file) # model dict d["scale"] = guess_model_scale(path) d["yaml_file"] = str(path) return d def guess_model_scale(model_path): """ Takes a path to a YOLO model's YAML file as input and extracts the size character of the model's scale. The function uses regular expression matching to find the pattern of the model scale in the YAML file name, which is denoted by n, s, m, l, or x. The function returns the size character of the model scale as a string. Args: model_path (str | Path): The path to the YOLO model's YAML file. Returns: (str): The size character of the model's scale, which can be n, s, m, l, or x. """ with contextlib.suppress(AttributeError): import re return re.search(r"yolov\d+([nslmx])", Path(model_path).stem).group(1) # n, s, m, l, or x return "" def guess_model_task(model): """ Guess the task of a PyTorch model from its architecture or configuration. Args: model (nn.Module | dict): PyTorch model or model configuration in YAML format. Returns: (str): Task of the model ('detect', 'segment', 'classify', 'pose'). Raises: SyntaxError: If the task of the model could not be determined. """ def cfg2task(cfg): """Guess from YAML dictionary.""" m = cfg["head"][-1][-2].lower() # output module name if m in ("classify", "classifier", "cls", "fc"): return "classify" if m == "detect": return "detect" if m == "segment": return "segment" if m == "pose": return "pose" if m == "obb": return "obb" # Guess from model cfg if isinstance(model, dict): with contextlib.suppress(Exception): return cfg2task(model) # Guess from PyTorch model if isinstance(model, nn.Module): # PyTorch model for x in "model.args", "model.model.args", "model.model.model.args": with contextlib.suppress(Exception): return eval(x)["task"] for x in "model.yaml", "model.model.yaml", "model.model.model.yaml": with contextlib.suppress(Exception): return cfg2task(eval(x)) for m in model.modules(): if isinstance(m, Detect): return "detect" elif isinstance(m, Segment): return "segment" elif isinstance(m, Classify): return "classify" elif isinstance(m, Pose): return "pose" elif isinstance(m, OBB): return "obb" # Guess from model filename if isinstance(model, (str, Path)): model = Path(model) if "-seg" in model.stem or "segment" in model.parts: return "segment" elif "-cls" in model.stem or "classify" in model.parts: return "classify" elif "-pose" in model.stem or "pose" in model.parts: return "pose" elif "-obb" in model.stem or "obb" in model.parts: return "obb" elif "detect" in model.parts: return "detect" # Unable to determine task from model LOGGER.warning( "WARNING ⚠️ Unable to automatically guess model task, assuming 'task=detect'. " "Explicitly define task for your model, i.e. 'task=detect', 'segment', 'classify','pose' or 'obb'." ) return "detect" # assume detect
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/nn/tasks.py
Python
unknown
38,765
# Ultralytics YOLO 🚀, AGPL-3.0 license
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/solutions/__init__.py
Python
unknown
42
# Ultralytics YOLO 🚀, AGPL-3.0 license import cv2 from ultralytics.utils.checks import check_imshow from ultralytics.utils.plotting import Annotator class AIGym: """A class to manage the gym steps of people in a real-time video stream based on their poses.""" def __init__(self): """Initializes the AIGym with default values for Visual and Image parameters.""" # Image and line thickness self.im0 = None self.tf = None # Keypoints and count information self.keypoints = None self.poseup_angle = None self.posedown_angle = None self.threshold = 0.001 # Store stage, count and angle information self.angle = None self.count = None self.stage = None self.pose_type = "pushup" self.kpts_to_check = None # Visual Information self.view_img = False self.annotator = None # Check if environment support imshow self.env_check = check_imshow(warn=True) def set_args( self, kpts_to_check, line_thickness=2, view_img=False, pose_up_angle=145.0, pose_down_angle=90.0, pose_type="pullup", ): """ Configures the AIGym line_thickness, save image and view image parameters. Args: kpts_to_check (list): 3 keypoints for counting line_thickness (int): Line thickness for bounding boxes. view_img (bool): display the im0 pose_up_angle (float): Angle to set pose position up pose_down_angle (float): Angle to set pose position down pose_type (str): "pushup", "pullup" or "abworkout" """ self.kpts_to_check = kpts_to_check self.tf = line_thickness self.view_img = view_img self.poseup_angle = pose_up_angle self.posedown_angle = pose_down_angle self.pose_type = pose_type def start_counting(self, im0, results, frame_count): """ Function used to count the gym steps. Args: im0 (ndarray): Current frame from the video stream. results (list): Pose estimation data frame_count (int): store current frame count """ self.im0 = im0 if frame_count == 1: self.count = [0] * len(results[0]) self.angle = [0] * len(results[0]) self.stage = ["-" for _ in results[0]] self.keypoints = results[0].keypoints.data self.annotator = Annotator(im0, line_width=2) num_keypoints = len(results[0]) # Resize self.angle, self.count, and self.stage if the number of keypoints has changed if len(self.angle) != num_keypoints: self.angle = [0] * num_keypoints self.count = [0] * num_keypoints self.stage = ["-" for _ in range(num_keypoints)] for ind, k in enumerate(reversed(self.keypoints)): if self.pose_type in ["pushup", "pullup"]: self.angle[ind] = self.annotator.estimate_pose_angle( k[int(self.kpts_to_check[0])].cpu(), k[int(self.kpts_to_check[1])].cpu(), k[int(self.kpts_to_check[2])].cpu(), ) self.im0 = self.annotator.draw_specific_points(k, self.kpts_to_check, shape=(640, 640), radius=10) if self.pose_type == "abworkout": self.angle[ind] = self.annotator.estimate_pose_angle( k[int(self.kpts_to_check[0])].cpu(), k[int(self.kpts_to_check[1])].cpu(), k[int(self.kpts_to_check[2])].cpu(), ) self.im0 = self.annotator.draw_specific_points(k, self.kpts_to_check, shape=(640, 640), radius=10) if self.angle[ind] > self.poseup_angle: self.stage[ind] = "down" if self.angle[ind] < self.posedown_angle and self.stage[ind] == "down": self.stage[ind] = "up" self.count[ind] += 1 self.annotator.plot_angle_and_count_and_stage( angle_text=self.angle[ind], count_text=self.count[ind], stage_text=self.stage[ind], center_kpt=k[int(self.kpts_to_check[1])], line_thickness=self.tf, ) if self.pose_type == "pushup": if self.angle[ind] > self.poseup_angle: self.stage[ind] = "up" if self.angle[ind] < self.posedown_angle and self.stage[ind] == "up": self.stage[ind] = "down" self.count[ind] += 1 self.annotator.plot_angle_and_count_and_stage( angle_text=self.angle[ind], count_text=self.count[ind], stage_text=self.stage[ind], center_kpt=k[int(self.kpts_to_check[1])], line_thickness=self.tf, ) if self.pose_type == "pullup": if self.angle[ind] > self.poseup_angle: self.stage[ind] = "down" if self.angle[ind] < self.posedown_angle and self.stage[ind] == "down": self.stage[ind] = "up" self.count[ind] += 1 self.annotator.plot_angle_and_count_and_stage( angle_text=self.angle[ind], count_text=self.count[ind], stage_text=self.stage[ind], center_kpt=k[int(self.kpts_to_check[1])], line_thickness=self.tf, ) self.annotator.kpts(k, shape=(640, 640), radius=1, kpt_line=True) if self.env_check and self.view_img: cv2.imshow("Ultralytics YOLOv8 AI GYM", self.im0) if cv2.waitKey(1) & 0xFF == ord("q"): return return self.im0 if __name__ == "__main__": AIGym()
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/solutions/ai_gym.py
Python
unknown
6,029
# Ultralytics YOLO 🚀, AGPL-3.0 license import math import cv2 from ultralytics.utils.checks import check_imshow from ultralytics.utils.plotting import Annotator, colors class DistanceCalculation: """A class to calculate distance between two objects in real-time video stream based on their tracks.""" def __init__(self): """Initializes the distance calculation class with default values for Visual, Image, track and distance parameters. """ # Visual & im0 information self.im0 = None self.annotator = None self.view_img = False self.line_color = (255, 255, 0) self.centroid_color = (255, 0, 255) # Predict/track information self.clss = None self.names = None self.boxes = None self.line_thickness = 2 self.trk_ids = None # Distance calculation information self.centroids = [] self.pixel_per_meter = 10 # Mouse event self.left_mouse_count = 0 self.selected_boxes = {} # Check if environment support imshow self.env_check = check_imshow(warn=True) def set_args( self, names, pixels_per_meter=10, view_img=False, line_thickness=2, line_color=(255, 255, 0), centroid_color=(255, 0, 255), ): """ Configures the distance calculation and display parameters. Args: names (dict): object detection classes names pixels_per_meter (int): Number of pixels in meter view_img (bool): Flag indicating frame display line_thickness (int): Line thickness for bounding boxes. line_color (RGB): color of centroids line centroid_color (RGB): colors of bbox centroids """ self.names = names self.pixel_per_meter = pixels_per_meter self.view_img = view_img self.line_thickness = line_thickness self.line_color = line_color self.centroid_color = centroid_color def mouse_event_for_distance(self, event, x, y, flags, param): """ This function is designed to move region with mouse events in a real-time video stream. Args: event (int): The type of mouse event (e.g., cv2.EVENT_MOUSEMOVE, cv2.EVENT_LBUTTONDOWN, etc.). x (int): The x-coordinate of the mouse pointer. y (int): The y-coordinate of the mouse pointer. flags (int): Any flags associated with the event (e.g., cv2.EVENT_FLAG_CTRLKEY, cv2.EVENT_FLAG_SHIFTKEY, etc.). param (dict): Additional parameters you may want to pass to the function. """ global selected_boxes global left_mouse_count if event == cv2.EVENT_LBUTTONDOWN: self.left_mouse_count += 1 if self.left_mouse_count <= 2: for box, track_id in zip(self.boxes, self.trk_ids): if box[0] < x < box[2] and box[1] < y < box[3] and track_id not in self.selected_boxes: self.selected_boxes[track_id] = [] self.selected_boxes[track_id] = box if event == cv2.EVENT_RBUTTONDOWN: self.selected_boxes = {} self.left_mouse_count = 0 def extract_tracks(self, tracks): """ Extracts results from the provided data. Args: tracks (list): List of tracks obtained from the object tracking process. """ self.boxes = tracks[0].boxes.xyxy.cpu() self.clss = tracks[0].boxes.cls.cpu().tolist() self.trk_ids = tracks[0].boxes.id.int().cpu().tolist() def calculate_centroid(self, box): """ Calculate the centroid of bounding box. Args: box (list): Bounding box data """ return int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2) def calculate_distance(self, centroid1, centroid2): """ Calculate distance between two centroids. Args: centroid1 (point): First bounding box data centroid2 (point): Second bounding box data """ pixel_distance = math.sqrt((centroid1[0] - centroid2[0]) ** 2 + (centroid1[1] - centroid2[1]) ** 2) return pixel_distance / self.pixel_per_meter, (pixel_distance / self.pixel_per_meter) * 1000 def start_process(self, im0, tracks): """ Calculate distance between two bounding boxes based on tracking data. Args: im0 (nd array): Image tracks (list): List of tracks obtained from the object tracking process. """ self.im0 = im0 if tracks[0].boxes.id is None: if self.view_img: self.display_frames() return self.extract_tracks(tracks) self.annotator = Annotator(self.im0, line_width=2) for box, cls, track_id in zip(self.boxes, self.clss, self.trk_ids): self.annotator.box_label(box, color=colors(int(cls), True), label=self.names[int(cls)]) if len(self.selected_boxes) == 2: for trk_id, _ in self.selected_boxes.items(): if trk_id == track_id: self.selected_boxes[track_id] = box if len(self.selected_boxes) == 2: for trk_id, box in self.selected_boxes.items(): centroid = self.calculate_centroid(self.selected_boxes[trk_id]) self.centroids.append(centroid) distance_m, distance_mm = self.calculate_distance(self.centroids[0], self.centroids[1]) self.annotator.plot_distance_and_line( distance_m, distance_mm, self.centroids, self.line_color, self.centroid_color ) self.centroids = [] if self.view_img and self.env_check: self.display_frames() return im0 def display_frames(self): """Display frame.""" cv2.namedWindow("Ultralytics Distance Estimation") cv2.setMouseCallback("Ultralytics Distance Estimation", self.mouse_event_for_distance) cv2.imshow("Ultralytics Distance Estimation", self.im0) if cv2.waitKey(1) & 0xFF == ord("q"): return if __name__ == "__main__": DistanceCalculation()
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/solutions/distance_calculation.py
Python
unknown
6,334
# Ultralytics YOLO 🚀, AGPL-3.0 license from collections import defaultdict import cv2 import numpy as np from ultralytics.utils.checks import check_imshow, check_requirements from ultralytics.utils.plotting import Annotator check_requirements("shapely>=2.0.0") from shapely.geometry import LineString, Point, Polygon class Heatmap: """A class to draw heatmaps in real-time video stream based on their tracks.""" def __init__(self): """Initializes the heatmap class with default values for Visual, Image, track, count and heatmap parameters.""" # Visual information self.annotator = None self.view_img = False self.shape = "circle" # Image information self.imw = None self.imh = None self.im0 = None self.view_in_counts = True self.view_out_counts = True # Heatmap colormap and heatmap np array self.colormap = None self.heatmap = None self.heatmap_alpha = 0.5 # Predict/track information self.boxes = None self.track_ids = None self.clss = None self.track_history = defaultdict(list) # Region & Line Information self.count_reg_pts = None self.counting_region = None self.line_dist_thresh = 15 self.region_thickness = 5 self.region_color = (255, 0, 255) # Object Counting Information self.in_counts = 0 self.out_counts = 0 self.counting_list = [] self.count_txt_thickness = 0 self.count_txt_color = (0, 0, 0) self.count_color = (255, 255, 255) # Decay factor self.decay_factor = 0.99 # Check if environment support imshow self.env_check = check_imshow(warn=True) def set_args( self, imw, imh, colormap=cv2.COLORMAP_JET, heatmap_alpha=0.5, view_img=False, view_in_counts=True, view_out_counts=True, count_reg_pts=None, count_txt_thickness=2, count_txt_color=(0, 0, 0), count_color=(255, 255, 255), count_reg_color=(255, 0, 255), region_thickness=5, line_dist_thresh=15, decay_factor=0.99, shape="circle", ): """ Configures the heatmap colormap, width, height and display parameters. Args: colormap (cv2.COLORMAP): The colormap to be set. imw (int): The width of the frame. imh (int): The height of the frame. heatmap_alpha (float): alpha value for heatmap display view_img (bool): Flag indicating frame display view_in_counts (bool): Flag to control whether to display the incounts on video stream. view_out_counts (bool): Flag to control whether to display the outcounts on video stream. count_reg_pts (list): Object counting region points count_txt_thickness (int): Text thickness for object counting display count_txt_color (RGB color): count text color value count_color (RGB color): count text background color value count_reg_color (RGB color): Color of object counting region region_thickness (int): Object counting Region thickness line_dist_thresh (int): Euclidean Distance threshold for line counter decay_factor (float): value for removing heatmap area after object passed shape (str): Heatmap shape, rect or circle shape supported """ self.imw = imw self.imh = imh self.heatmap_alpha = heatmap_alpha self.view_img = view_img self.view_in_counts = view_in_counts self.view_out_counts = view_out_counts self.colormap = colormap # Region and line selection if count_reg_pts is not None: if len(count_reg_pts) == 2: print("Line Counter Initiated.") self.count_reg_pts = count_reg_pts self.counting_region = LineString(count_reg_pts) elif len(count_reg_pts) == 4: print("Region Counter Initiated.") self.count_reg_pts = count_reg_pts self.counting_region = Polygon(self.count_reg_pts) else: print("Region or line points Invalid, 2 or 4 points supported") print("Using Line Counter Now") self.counting_region = Polygon([(20, 400), (1260, 400)]) # dummy points # Heatmap new frame self.heatmap = np.zeros((int(self.imh), int(self.imw)), dtype=np.float32) self.count_txt_thickness = count_txt_thickness self.count_txt_color = count_txt_color self.count_color = count_color self.region_color = count_reg_color self.region_thickness = region_thickness self.decay_factor = decay_factor self.line_dist_thresh = line_dist_thresh self.shape = shape # shape of heatmap, if not selected if self.shape not in ["circle", "rect"]: print("Unknown shape value provided, 'circle' & 'rect' supported") print("Using Circular shape now") self.shape = "circle" def extract_results(self, tracks): """ Extracts results from the provided data. Args: tracks (list): List of tracks obtained from the object tracking process. """ self.boxes = tracks[0].boxes.xyxy.cpu() self.clss = tracks[0].boxes.cls.cpu().tolist() self.track_ids = tracks[0].boxes.id.int().cpu().tolist() def generate_heatmap(self, im0, tracks): """ Generate heatmap based on tracking data. Args: im0 (nd array): Image tracks (list): List of tracks obtained from the object tracking process. """ self.im0 = im0 if tracks[0].boxes.id is None: self.heatmap = np.zeros((int(self.imh), int(self.imw)), dtype=np.float32) if self.view_img and self.env_check: self.display_frames() return im0 self.heatmap *= self.decay_factor # decay factor self.extract_results(tracks) self.annotator = Annotator(self.im0, self.count_txt_thickness, None) if self.count_reg_pts is not None: # Draw counting region if self.view_in_counts or self.view_out_counts: self.annotator.draw_region( reg_pts=self.count_reg_pts, color=self.region_color, thickness=self.region_thickness ) for box, cls, track_id in zip(self.boxes, self.clss, self.track_ids): if self.shape == "circle": center = (int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2)) radius = min(int(box[2]) - int(box[0]), int(box[3]) - int(box[1])) // 2 y, x = np.ogrid[0 : self.heatmap.shape[0], 0 : self.heatmap.shape[1]] mask = (x - center[0]) ** 2 + (y - center[1]) ** 2 <= radius**2 self.heatmap[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])] += ( 2 * mask[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])] ) else: self.heatmap[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])] += 2 # Store tracking hist track_line = self.track_history[track_id] track_line.append((float((box[0] + box[2]) / 2), float((box[1] + box[3]) / 2))) if len(track_line) > 30: track_line.pop(0) # Count objects if len(self.count_reg_pts) == 4: if self.counting_region.contains(Point(track_line[-1])) and track_id not in self.counting_list: self.counting_list.append(track_id) if box[0] < self.counting_region.centroid.x: self.out_counts += 1 else: self.in_counts += 1 elif len(self.count_reg_pts) == 2: distance = Point(track_line[-1]).distance(self.counting_region) if distance < self.line_dist_thresh and track_id not in self.counting_list: self.counting_list.append(track_id) if box[0] < self.counting_region.centroid.x: self.out_counts += 1 else: self.in_counts += 1 else: for box, cls in zip(self.boxes, self.clss): if self.shape == "circle": center = (int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2)) radius = min(int(box[2]) - int(box[0]), int(box[3]) - int(box[1])) // 2 y, x = np.ogrid[0 : self.heatmap.shape[0], 0 : self.heatmap.shape[1]] mask = (x - center[0]) ** 2 + (y - center[1]) ** 2 <= radius**2 self.heatmap[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])] += ( 2 * mask[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])] ) else: self.heatmap[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])] += 2 # Normalize, apply colormap to heatmap and combine with original image heatmap_normalized = cv2.normalize(self.heatmap, None, 0, 255, cv2.NORM_MINMAX) heatmap_colored = cv2.applyColorMap(heatmap_normalized.astype(np.uint8), self.colormap) incount_label = f"In Count : {self.in_counts}" outcount_label = f"OutCount : {self.out_counts}" # Display counts based on user choice counts_label = None if not self.view_in_counts and not self.view_out_counts: counts_label = None elif not self.view_in_counts: counts_label = outcount_label elif not self.view_out_counts: counts_label = incount_label else: counts_label = f"{incount_label} {outcount_label}" if self.count_reg_pts is not None and counts_label is not None: self.annotator.count_labels( counts=counts_label, count_txt_size=self.count_txt_thickness, txt_color=self.count_txt_color, color=self.count_color, ) self.im0 = cv2.addWeighted(self.im0, 1 - self.heatmap_alpha, heatmap_colored, self.heatmap_alpha, 0) if self.env_check and self.view_img: self.display_frames() return self.im0 def display_frames(self): """Display frame.""" cv2.imshow("Ultralytics Heatmap", self.im0) if cv2.waitKey(1) & 0xFF == ord("q"): return if __name__ == "__main__": Heatmap()
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/solutions/heatmap.py
Python
unknown
10,928
# Ultralytics YOLO 🚀, AGPL-3.0 license from collections import defaultdict import cv2 from ultralytics.utils.checks import check_imshow, check_requirements from ultralytics.utils.plotting import Annotator, colors check_requirements("shapely>=2.0.0") from shapely.geometry import LineString, Point, Polygon class ObjectCounter: """A class to manage the counting of objects in a real-time video stream based on their tracks.""" def __init__(self): """Initializes the Counter with default values for various tracking and counting parameters.""" # Mouse events self.is_drawing = False self.selected_point = None # Region & Line Information self.reg_pts = [(20, 400), (1260, 400)] self.line_dist_thresh = 15 self.counting_region = None self.region_color = (255, 0, 255) self.region_thickness = 5 # Image and annotation Information self.im0 = None self.tf = None self.view_img = False self.view_in_counts = True self.view_out_counts = True self.names = None # Classes names self.annotator = None # Annotator # Object counting Information self.in_counts = 0 self.out_counts = 0 self.counting_list = [] self.count_txt_thickness = 0 self.count_txt_color = (0, 0, 0) self.count_color = (255, 255, 255) # Tracks info self.track_history = defaultdict(list) self.track_thickness = 2 self.draw_tracks = False self.track_color = (0, 255, 0) # Check if environment support imshow self.env_check = check_imshow(warn=True) def set_args( self, classes_names, reg_pts, count_reg_color=(255, 0, 255), line_thickness=2, track_thickness=2, view_img=False, view_in_counts=True, view_out_counts=True, draw_tracks=False, count_txt_thickness=2, count_txt_color=(0, 0, 0), count_color=(255, 255, 255), track_color=(0, 255, 0), region_thickness=5, line_dist_thresh=15, ): """ Configures the Counter's image, bounding box line thickness, and counting region points. Args: line_thickness (int): Line thickness for bounding boxes. view_img (bool): Flag to control whether to display the video stream. view_in_counts (bool): Flag to control whether to display the incounts on video stream. view_out_counts (bool): Flag to control whether to display the outcounts on video stream. reg_pts (list): Initial list of points defining the counting region. classes_names (dict): Classes names track_thickness (int): Track thickness draw_tracks (Bool): draw tracks count_txt_thickness (int): Text thickness for object counting display count_txt_color (RGB color): count text color value count_color (RGB color): count text background color value count_reg_color (RGB color): Color of object counting region track_color (RGB color): color for tracks region_thickness (int): Object counting Region thickness line_dist_thresh (int): Euclidean Distance threshold for line counter """ self.tf = line_thickness self.view_img = view_img self.view_in_counts = view_in_counts self.view_out_counts = view_out_counts self.track_thickness = track_thickness self.draw_tracks = draw_tracks # Region and line selection if len(reg_pts) == 2: print("Line Counter Initiated.") self.reg_pts = reg_pts self.counting_region = LineString(self.reg_pts) elif len(reg_pts) == 4: print("Region Counter Initiated.") self.reg_pts = reg_pts self.counting_region = Polygon(self.reg_pts) else: print("Invalid Region points provided, region_points can be 2 or 4") print("Using Line Counter Now") self.counting_region = LineString(self.reg_pts) self.names = classes_names self.track_color = track_color self.count_txt_thickness = count_txt_thickness self.count_txt_color = count_txt_color self.count_color = count_color self.region_color = count_reg_color self.region_thickness = region_thickness self.line_dist_thresh = line_dist_thresh def mouse_event_for_region(self, event, x, y, flags, params): """ This function is designed to move region with mouse events in a real-time video stream. Args: event (int): The type of mouse event (e.g., cv2.EVENT_MOUSEMOVE, cv2.EVENT_LBUTTONDOWN, etc.). x (int): The x-coordinate of the mouse pointer. y (int): The y-coordinate of the mouse pointer. flags (int): Any flags associated with the event (e.g., cv2.EVENT_FLAG_CTRLKEY, cv2.EVENT_FLAG_SHIFTKEY, etc.). params (dict): Additional parameters you may want to pass to the function. """ if event == cv2.EVENT_LBUTTONDOWN: for i, point in enumerate(self.reg_pts): if ( isinstance(point, (tuple, list)) and len(point) >= 2 and (abs(x - point[0]) < 10 and abs(y - point[1]) < 10) ): self.selected_point = i self.is_drawing = True break elif event == cv2.EVENT_MOUSEMOVE: if self.is_drawing and self.selected_point is not None: self.reg_pts[self.selected_point] = (x, y) self.counting_region = Polygon(self.reg_pts) elif event == cv2.EVENT_LBUTTONUP: self.is_drawing = False self.selected_point = None def extract_and_process_tracks(self, tracks): """Extracts and processes tracks for object counting in a video stream.""" boxes = tracks[0].boxes.xyxy.cpu() clss = tracks[0].boxes.cls.cpu().tolist() track_ids = tracks[0].boxes.id.int().cpu().tolist() # Annotator Init and region drawing self.annotator = Annotator(self.im0, self.tf, self.names) self.annotator.draw_region(reg_pts=self.reg_pts, color=self.region_color, thickness=self.region_thickness) # Extract tracks for box, track_id, cls in zip(boxes, track_ids, clss): # Draw bounding box self.annotator.box_label(box, label=f"{track_id}:{self.names[cls]}", color=colors(int(cls), True)) # Draw Tracks track_line = self.track_history[track_id] track_line.append((float((box[0] + box[2]) / 2), float((box[1] + box[3]) / 2))) if len(track_line) > 30: track_line.pop(0) # Draw track trails if self.draw_tracks: self.annotator.draw_centroid_and_tracks( track_line, color=self.track_color, track_thickness=self.track_thickness ) prev_position = self.track_history[track_id][-2] if len(self.track_history[track_id]) > 1 else None # Count objects if len(self.reg_pts) == 4: if ( prev_position is not None and self.counting_region.contains(Point(track_line[-1])) and track_id not in self.counting_list ): self.counting_list.append(track_id) if (box[0] - prev_position[0]) * (self.counting_region.centroid.x - prev_position[0]) > 0: self.in_counts += 1 else: self.out_counts += 1 elif len(self.reg_pts) == 2: if prev_position is not None: distance = Point(track_line[-1]).distance(self.counting_region) if distance < self.line_dist_thresh and track_id not in self.counting_list: self.counting_list.append(track_id) if (box[0] - prev_position[0]) * (self.counting_region.centroid.x - prev_position[0]) > 0: self.in_counts += 1 else: self.out_counts += 1 incount_label = f"In Count : {self.in_counts}" outcount_label = f"OutCount : {self.out_counts}" # Display counts based on user choice counts_label = None if not self.view_in_counts and not self.view_out_counts: counts_label = None elif not self.view_in_counts: counts_label = outcount_label elif not self.view_out_counts: counts_label = incount_label else: counts_label = f"{incount_label} {outcount_label}" if counts_label is not None: self.annotator.count_labels( counts=counts_label, count_txt_size=self.count_txt_thickness, txt_color=self.count_txt_color, color=self.count_color, ) def display_frames(self): """Display frame.""" if self.env_check: cv2.namedWindow("Ultralytics YOLOv8 Object Counter") if len(self.reg_pts) == 4: # only add mouse event If user drawn region cv2.setMouseCallback( "Ultralytics YOLOv8 Object Counter", self.mouse_event_for_region, {"region_points": self.reg_pts} ) cv2.imshow("Ultralytics YOLOv8 Object Counter", self.im0) # Break Window if cv2.waitKey(1) & 0xFF == ord("q"): return def start_counting(self, im0, tracks): """ Main function to start the object counting process. Args: im0 (ndarray): Current frame from the video stream. tracks (list): List of tracks obtained from the object tracking process. """ self.im0 = im0 # store image if tracks[0].boxes.id is None: if self.view_img: self.display_frames() return im0 self.extract_and_process_tracks(tracks) if self.view_img: self.display_frames() return self.im0 if __name__ == "__main__": ObjectCounter()
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/solutions/object_counter.py
Python
unknown
10,474
# Ultralytics YOLO 🚀, AGPL-3.0 license from collections import defaultdict from time import time import cv2 import numpy as np from ultralytics.utils.checks import check_imshow from ultralytics.utils.plotting import Annotator, colors class SpeedEstimator: """A class to estimation speed of objects in real-time video stream based on their tracks.""" def __init__(self): """Initializes the speed-estimator class with default values for Visual, Image, track and speed parameters.""" # Visual & im0 information self.im0 = None self.annotator = None self.view_img = False # Region information self.reg_pts = [(20, 400), (1260, 400)] self.region_thickness = 3 # Predict/track information self.clss = None self.names = None self.boxes = None self.trk_ids = None self.trk_pts = None self.line_thickness = 2 self.trk_history = defaultdict(list) # Speed estimator information self.current_time = 0 self.dist_data = {} self.trk_idslist = [] self.spdl_dist_thresh = 10 self.trk_previous_times = {} self.trk_previous_points = {} # Check if environment support imshow self.env_check = check_imshow(warn=True) def set_args( self, reg_pts, names, view_img=False, line_thickness=2, region_thickness=5, spdl_dist_thresh=10, ): """ Configures the speed estimation and display parameters. Args: reg_pts (list): Initial list of points defining the speed calculation region. names (dict): object detection classes names view_img (bool): Flag indicating frame display line_thickness (int): Line thickness for bounding boxes. region_thickness (int): Speed estimation region thickness spdl_dist_thresh (int): Euclidean distance threshold for speed line """ if reg_pts is None: print("Region points not provided, using default values") else: self.reg_pts = reg_pts self.names = names self.view_img = view_img self.line_thickness = line_thickness self.region_thickness = region_thickness self.spdl_dist_thresh = spdl_dist_thresh def extract_tracks(self, tracks): """ Extracts results from the provided data. Args: tracks (list): List of tracks obtained from the object tracking process. """ self.boxes = tracks[0].boxes.xyxy.cpu() self.clss = tracks[0].boxes.cls.cpu().tolist() self.trk_ids = tracks[0].boxes.id.int().cpu().tolist() def store_track_info(self, track_id, box): """ Store track data. Args: track_id (int): object track id. box (list): object bounding box data """ track = self.trk_history[track_id] bbox_center = (float((box[0] + box[2]) / 2), float((box[1] + box[3]) / 2)) track.append(bbox_center) if len(track) > 30: track.pop(0) self.trk_pts = np.hstack(track).astype(np.int32).reshape((-1, 1, 2)) return track def plot_box_and_track(self, track_id, box, cls, track): """ Plot track and bounding box. Args: track_id (int): object track id. box (list): object bounding box data cls (str): object class name track (list): tracking history for tracks path drawing """ speed_label = f"{int(self.dist_data[track_id])}km/ph" if track_id in self.dist_data else self.names[int(cls)] bbox_color = colors(int(track_id)) if track_id in self.dist_data else (255, 0, 255) self.annotator.box_label(box, speed_label, bbox_color) cv2.polylines(self.im0, [self.trk_pts], isClosed=False, color=(0, 255, 0), thickness=1) cv2.circle(self.im0, (int(track[-1][0]), int(track[-1][1])), 5, bbox_color, -1) def calculate_speed(self, trk_id, track): """ Calculation of object speed. Args: trk_id (int): object track id. track (list): tracking history for tracks path drawing """ if not self.reg_pts[0][0] < track[-1][0] < self.reg_pts[1][0]: return if self.reg_pts[1][1] - self.spdl_dist_thresh < track[-1][1] < self.reg_pts[1][1] + self.spdl_dist_thresh: direction = "known" elif self.reg_pts[0][1] - self.spdl_dist_thresh < track[-1][1] < self.reg_pts[0][1] + self.spdl_dist_thresh: direction = "known" else: direction = "unknown" if self.trk_previous_times[trk_id] != 0 and direction != "unknown" and trk_id not in self.trk_idslist: self.trk_idslist.append(trk_id) time_difference = time() - self.trk_previous_times[trk_id] if time_difference > 0: dist_difference = np.abs(track[-1][1] - self.trk_previous_points[trk_id][1]) speed = dist_difference / time_difference self.dist_data[trk_id] = speed self.trk_previous_times[trk_id] = time() self.trk_previous_points[trk_id] = track[-1] def estimate_speed(self, im0, tracks): """ Calculate object based on tracking data. Args: im0 (nd array): Image tracks (list): List of tracks obtained from the object tracking process. """ self.im0 = im0 if tracks[0].boxes.id is None: if self.view_img and self.env_check: self.display_frames() return im0 self.extract_tracks(tracks) self.annotator = Annotator(self.im0, line_width=2) self.annotator.draw_region(reg_pts=self.reg_pts, color=(255, 0, 0), thickness=self.region_thickness) for box, trk_id, cls in zip(self.boxes, self.trk_ids, self.clss): track = self.store_track_info(trk_id, box) if trk_id not in self.trk_previous_times: self.trk_previous_times[trk_id] = 0 self.plot_box_and_track(trk_id, box, cls, track) self.calculate_speed(trk_id, track) if self.view_img and self.env_check: self.display_frames() return im0 def display_frames(self): """Display frame.""" cv2.imshow("Ultralytics Speed Estimation", self.im0) if cv2.waitKey(1) & 0xFF == ord("q"): return if __name__ == "__main__": SpeedEstimator()
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/solutions/speed_estimation.py
Python
unknown
6,618
# Ultralytics YOLO 🚀, AGPL-3.0 license from .bot_sort import BOTSORT from .byte_tracker import BYTETracker from .track import register_tracker __all__ = "register_tracker", "BOTSORT", "BYTETracker" # allow simpler import
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/trackers/__init__.py
Python
unknown
227
# Ultralytics YOLO 🚀, AGPL-3.0 license """This module defines the base classes and structures for object tracking in YOLO.""" from collections import OrderedDict import numpy as np class TrackState: """ Enumeration class representing the possible states of an object being tracked. Attributes: New (int): State when the object is newly detected. Tracked (int): State when the object is successfully tracked in subsequent frames. Lost (int): State when the object is no longer tracked. Removed (int): State when the object is removed from tracking. """ New = 0 Tracked = 1 Lost = 2 Removed = 3 class BaseTrack: """ Base class for object tracking, providing foundational attributes and methods. Attributes: _count (int): Class-level counter for unique track IDs. track_id (int): Unique identifier for the track. is_activated (bool): Flag indicating whether the track is currently active. state (TrackState): Current state of the track. history (OrderedDict): Ordered history of the track's states. features (list): List of features extracted from the object for tracking. curr_feature (any): The current feature of the object being tracked. score (float): The confidence score of the tracking. start_frame (int): The frame number where tracking started. frame_id (int): The most recent frame ID processed by the track. time_since_update (int): Frames passed since the last update. location (tuple): The location of the object in the context of multi-camera tracking. Methods: end_frame: Returns the ID of the last frame where the object was tracked. next_id: Increments and returns the next global track ID. activate: Abstract method to activate the track. predict: Abstract method to predict the next state of the track. update: Abstract method to update the track with new data. mark_lost: Marks the track as lost. mark_removed: Marks the track as removed. reset_id: Resets the global track ID counter. """ _count = 0 def __init__(self): """Initializes a new track with unique ID and foundational tracking attributes.""" self.track_id = 0 self.is_activated = False self.state = TrackState.New self.history = OrderedDict() self.features = [] self.curr_feature = None self.score = 0 self.start_frame = 0 self.frame_id = 0 self.time_since_update = 0 self.location = (np.inf, np.inf) @property def end_frame(self): """Return the last frame ID of the track.""" return self.frame_id @staticmethod def next_id(): """Increment and return the global track ID counter.""" BaseTrack._count += 1 return BaseTrack._count def activate(self, *args): """Abstract method to activate the track with provided arguments.""" raise NotImplementedError def predict(self): """Abstract method to predict the next state of the track.""" raise NotImplementedError def update(self, *args, **kwargs): """Abstract method to update the track with new observations.""" raise NotImplementedError def mark_lost(self): """Mark the track as lost.""" self.state = TrackState.Lost def mark_removed(self): """Mark the track as removed.""" self.state = TrackState.Removed @staticmethod def reset_id(): """Reset the global track ID counter.""" BaseTrack._count = 0
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/trackers/basetrack.py
Python
unknown
3,675
# Ultralytics YOLO 🚀, AGPL-3.0 license from collections import deque import numpy as np from .basetrack import TrackState from .byte_tracker import BYTETracker, STrack from .utils import matching from .utils.gmc import GMC from .utils.kalman_filter import KalmanFilterXYWH class BOTrack(STrack): """ An extended version of the STrack class for YOLOv8, adding object tracking features. Attributes: shared_kalman (KalmanFilterXYWH): A shared Kalman filter for all instances of BOTrack. smooth_feat (np.ndarray): Smoothed feature vector. curr_feat (np.ndarray): Current feature vector. features (deque): A deque to store feature vectors with a maximum length defined by `feat_history`. alpha (float): Smoothing factor for the exponential moving average of features. mean (np.ndarray): The mean state of the Kalman filter. covariance (np.ndarray): The covariance matrix of the Kalman filter. Methods: update_features(feat): Update features vector and smooth it using exponential moving average. predict(): Predicts the mean and covariance using Kalman filter. re_activate(new_track, frame_id, new_id): Reactivates a track with updated features and optionally new ID. update(new_track, frame_id): Update the YOLOv8 instance with new track and frame ID. tlwh: Property that gets the current position in tlwh format `(top left x, top left y, width, height)`. multi_predict(stracks): Predicts the mean and covariance of multiple object tracks using shared Kalman filter. convert_coords(tlwh): Converts tlwh bounding box coordinates to xywh format. tlwh_to_xywh(tlwh): Convert bounding box to xywh format `(center x, center y, width, height)`. Usage: bo_track = BOTrack(tlwh, score, cls, feat) bo_track.predict() bo_track.update(new_track, frame_id) """ shared_kalman = KalmanFilterXYWH() def __init__(self, tlwh, score, cls, feat=None, feat_history=50): """Initialize YOLOv8 object with temporal parameters, such as feature history, alpha and current features.""" super().__init__(tlwh, score, cls) self.smooth_feat = None self.curr_feat = None if feat is not None: self.update_features(feat) self.features = deque([], maxlen=feat_history) self.alpha = 0.9 def update_features(self, feat): """Update features vector and smooth it using exponential moving average.""" feat /= np.linalg.norm(feat) self.curr_feat = feat if self.smooth_feat is None: self.smooth_feat = feat else: self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat self.features.append(feat) self.smooth_feat /= np.linalg.norm(self.smooth_feat) def predict(self): """Predicts the mean and covariance using Kalman filter.""" mean_state = self.mean.copy() if self.state != TrackState.Tracked: mean_state[6] = 0 mean_state[7] = 0 self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance) def re_activate(self, new_track, frame_id, new_id=False): """Reactivates a track with updated features and optionally assigns a new ID.""" if new_track.curr_feat is not None: self.update_features(new_track.curr_feat) super().re_activate(new_track, frame_id, new_id) def update(self, new_track, frame_id): """Update the YOLOv8 instance with new track and frame ID.""" if new_track.curr_feat is not None: self.update_features(new_track.curr_feat) super().update(new_track, frame_id) @property def tlwh(self): """Get current position in bounding box format `(top left x, top left y, width, height)`.""" if self.mean is None: return self._tlwh.copy() ret = self.mean[:4].copy() ret[:2] -= ret[2:] / 2 return ret @staticmethod def multi_predict(stracks): """Predicts the mean and covariance of multiple object tracks using shared Kalman filter.""" if len(stracks) <= 0: return multi_mean = np.asarray([st.mean.copy() for st in stracks]) multi_covariance = np.asarray([st.covariance for st in stracks]) for i, st in enumerate(stracks): if st.state != TrackState.Tracked: multi_mean[i][6] = 0 multi_mean[i][7] = 0 multi_mean, multi_covariance = BOTrack.shared_kalman.multi_predict(multi_mean, multi_covariance) for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): stracks[i].mean = mean stracks[i].covariance = cov def convert_coords(self, tlwh): """Converts Top-Left-Width-Height bounding box coordinates to X-Y-Width-Height format.""" return self.tlwh_to_xywh(tlwh) @staticmethod def tlwh_to_xywh(tlwh): """Convert bounding box to format `(center x, center y, width, height)`.""" ret = np.asarray(tlwh).copy() ret[:2] += ret[2:] / 2 return ret class BOTSORT(BYTETracker): """ An extended version of the BYTETracker class for YOLOv8, designed for object tracking with ReID and GMC algorithm. Attributes: proximity_thresh (float): Threshold for spatial proximity (IoU) between tracks and detections. appearance_thresh (float): Threshold for appearance similarity (ReID embeddings) between tracks and detections. encoder (object): Object to handle ReID embeddings, set to None if ReID is not enabled. gmc (GMC): An instance of the GMC algorithm for data association. args (object): Parsed command-line arguments containing tracking parameters. Methods: get_kalmanfilter(): Returns an instance of KalmanFilterXYWH for object tracking. init_track(dets, scores, cls, img): Initialize track with detections, scores, and classes. get_dists(tracks, detections): Get distances between tracks and detections using IoU and (optionally) ReID. multi_predict(tracks): Predict and track multiple objects with YOLOv8 model. Usage: bot_sort = BOTSORT(args, frame_rate) bot_sort.init_track(dets, scores, cls, img) bot_sort.multi_predict(tracks) Note: The class is designed to work with the YOLOv8 object detection model and supports ReID only if enabled via args. """ def __init__(self, args, frame_rate=30): """Initialize YOLOv8 object with ReID module and GMC algorithm.""" super().__init__(args, frame_rate) # ReID module self.proximity_thresh = args.proximity_thresh self.appearance_thresh = args.appearance_thresh if args.with_reid: # Haven't supported BoT-SORT(reid) yet self.encoder = None self.gmc = GMC(method=args.gmc_method) def get_kalmanfilter(self): """Returns an instance of KalmanFilterXYWH for object tracking.""" return KalmanFilterXYWH() def init_track(self, dets, scores, cls, img=None): """Initialize track with detections, scores, and classes.""" if len(dets) == 0: return [] if self.args.with_reid and self.encoder is not None: features_keep = self.encoder.inference(img, dets) return [BOTrack(xyxy, s, c, f) for (xyxy, s, c, f) in zip(dets, scores, cls, features_keep)] # detections else: return [BOTrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] # detections def get_dists(self, tracks, detections): """Get distances between tracks and detections using IoU and (optionally) ReID embeddings.""" dists = matching.iou_distance(tracks, detections) dists_mask = dists > self.proximity_thresh # TODO: mot20 # if not self.args.mot20: dists = matching.fuse_score(dists, detections) if self.args.with_reid and self.encoder is not None: emb_dists = matching.embedding_distance(tracks, detections) / 2.0 emb_dists[emb_dists > self.appearance_thresh] = 1.0 emb_dists[dists_mask] = 1.0 dists = np.minimum(dists, emb_dists) return dists def multi_predict(self, tracks): """Predict and track multiple objects with YOLOv8 model.""" BOTrack.multi_predict(tracks) def reset(self): """Reset tracker.""" super().reset() self.gmc.reset_params()
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/trackers/bot_sort.py
Python
unknown
8,601
# Ultralytics YOLO 🚀, AGPL-3.0 license import numpy as np from .basetrack import BaseTrack, TrackState from .utils import matching from .utils.kalman_filter import KalmanFilterXYAH from ..utils.ops import xywh2ltwh from ..utils import LOGGER class STrack(BaseTrack): """ Single object tracking representation that uses Kalman filtering for state estimation. This class is responsible for storing all the information regarding individual tracklets and performs state updates and predictions based on Kalman filter. Attributes: shared_kalman (KalmanFilterXYAH): Shared Kalman filter that is used across all STrack instances for prediction. _tlwh (np.ndarray): Private attribute to store top-left corner coordinates and width and height of bounding box. kalman_filter (KalmanFilterXYAH): Instance of Kalman filter used for this particular object track. mean (np.ndarray): Mean state estimate vector. covariance (np.ndarray): Covariance of state estimate. is_activated (bool): Boolean flag indicating if the track has been activated. score (float): Confidence score of the track. tracklet_len (int): Length of the tracklet. cls (any): Class label for the object. idx (int): Index or identifier for the object. frame_id (int): Current frame ID. start_frame (int): Frame where the object was first detected. Methods: predict(): Predict the next state of the object using Kalman filter. multi_predict(stracks): Predict the next states for multiple tracks. multi_gmc(stracks, H): Update multiple track states using a homography matrix. activate(kalman_filter, frame_id): Activate a new tracklet. re_activate(new_track, frame_id, new_id): Reactivate a previously lost tracklet. update(new_track, frame_id): Update the state of a matched track. convert_coords(tlwh): Convert bounding box to x-y-aspect-height format. tlwh_to_xyah(tlwh): Convert tlwh bounding box to xyah format. """ shared_kalman = KalmanFilterXYAH() def __init__(self, xywh, score, cls): """Initialize new STrack instance.""" super().__init__() # xywh+idx or xywha+idx assert len(xywh) in [5, 6], f"expected 5 or 6 values but got {len(xywh)}" self._tlwh = np.asarray(xywh2ltwh(xywh[:4]), dtype=np.float32) self.kalman_filter = None self.mean, self.covariance = None, None self.is_activated = False self.score = score self.tracklet_len = 0 self.cls = cls self.idx = xywh[-1] self.angle = xywh[4] if len(xywh) == 6 else None def predict(self): """Predicts mean and covariance using Kalman filter.""" mean_state = self.mean.copy() if self.state != TrackState.Tracked: mean_state[7] = 0 self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance) @staticmethod def multi_predict(stracks): """Perform multi-object predictive tracking using Kalman filter for given stracks.""" if len(stracks) <= 0: return multi_mean = np.asarray([st.mean.copy() for st in stracks]) multi_covariance = np.asarray([st.covariance for st in stracks]) for i, st in enumerate(stracks): if st.state != TrackState.Tracked: multi_mean[i][7] = 0 multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance) for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): stracks[i].mean = mean stracks[i].covariance = cov @staticmethod def multi_gmc(stracks, H=np.eye(2, 3)): """Update state tracks positions and covariances using a homography matrix.""" if len(stracks) > 0: multi_mean = np.asarray([st.mean.copy() for st in stracks]) multi_covariance = np.asarray([st.covariance for st in stracks]) R = H[:2, :2] R8x8 = np.kron(np.eye(4, dtype=float), R) t = H[:2, 2] for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): mean = R8x8.dot(mean) mean[:2] += t cov = R8x8.dot(cov).dot(R8x8.transpose()) stracks[i].mean = mean stracks[i].covariance = cov def activate(self, kalman_filter, frame_id): """Start a new tracklet.""" self.kalman_filter = kalman_filter self.track_id = self.next_id() self.mean, self.covariance = self.kalman_filter.initiate(self.convert_coords(self._tlwh)) self.tracklet_len = 0 self.state = TrackState.Tracked if frame_id == 1: self.is_activated = True self.frame_id = frame_id self.start_frame = frame_id def re_activate(self, new_track, frame_id, new_id=False): """Reactivates a previously lost track with a new detection.""" self.mean, self.covariance = self.kalman_filter.update( self.mean, self.covariance, self.convert_coords(new_track.tlwh) ) self.tracklet_len = 0 self.state = TrackState.Tracked self.is_activated = True self.frame_id = frame_id if new_id: self.track_id = self.next_id() self.score = new_track.score self.cls = new_track.cls self.angle = new_track.angle self.idx = new_track.idx def update(self, new_track, frame_id): """ Update the state of a matched track. Args: new_track (STrack): The new track containing updated information. frame_id (int): The ID of the current frame. """ self.frame_id = frame_id self.tracklet_len += 1 new_tlwh = new_track.tlwh self.mean, self.covariance = self.kalman_filter.update( self.mean, self.covariance, self.convert_coords(new_tlwh) ) self.state = TrackState.Tracked self.is_activated = True self.score = new_track.score self.cls = new_track.cls self.angle = new_track.angle self.idx = new_track.idx def convert_coords(self, tlwh): """Convert a bounding box's top-left-width-height format to its x-y-aspect-height equivalent.""" return self.tlwh_to_xyah(tlwh) @property def tlwh(self): """Get current position in bounding box format (top left x, top left y, width, height).""" if self.mean is None: return self._tlwh.copy() ret = self.mean[:4].copy() ret[2] *= ret[3] ret[:2] -= ret[2:] / 2 return ret @property def xyxy(self): """Convert bounding box to format (min x, min y, max x, max y), i.e., (top left, bottom right).""" ret = self.tlwh.copy() ret[2:] += ret[:2] return ret @staticmethod def tlwh_to_xyah(tlwh): """Convert bounding box to format (center x, center y, aspect ratio, height), where the aspect ratio is width / height. """ ret = np.asarray(tlwh).copy() ret[:2] += ret[2:] / 2 ret[2] /= ret[3] return ret @property def xywh(self): """Get current position in bounding box format (center x, center y, width, height).""" ret = np.asarray(self.tlwh).copy() ret[:2] += ret[2:] / 2 return ret @property def xywha(self): """Get current position in bounding box format (center x, center y, width, height, angle).""" if self.angle is None: LOGGER.warning("WARNING ⚠️ `angle` attr not found, returning `xywh` instead.") return self.xywh return np.concatenate([self.xywh, self.angle[None]]) @property def result(self): """Get current tracking results.""" coords = self.xyxy if self.angle is None else self.xywha return coords.tolist() + [self.track_id, self.score, self.cls, self.idx] def __repr__(self): """Return a string representation of the BYTETracker object with start and end frames and track ID.""" return f"OT_{self.track_id}_({self.start_frame}-{self.end_frame})" class BYTETracker: """ BYTETracker: A tracking algorithm built on top of YOLOv8 for object detection and tracking. The class is responsible for initializing, updating, and managing the tracks for detected objects in a video sequence. It maintains the state of tracked, lost, and removed tracks over frames, utilizes Kalman filtering for predicting the new object locations, and performs data association. Attributes: tracked_stracks (list[STrack]): List of successfully activated tracks. lost_stracks (list[STrack]): List of lost tracks. removed_stracks (list[STrack]): List of removed tracks. frame_id (int): The current frame ID. args (namespace): Command-line arguments. max_time_lost (int): The maximum frames for a track to be considered as 'lost'. kalman_filter (object): Kalman Filter object. Methods: update(results, img=None): Updates object tracker with new detections. get_kalmanfilter(): Returns a Kalman filter object for tracking bounding boxes. init_track(dets, scores, cls, img=None): Initialize object tracking with detections. get_dists(tracks, detections): Calculates the distance between tracks and detections. multi_predict(tracks): Predicts the location of tracks. reset_id(): Resets the ID counter of STrack. joint_stracks(tlista, tlistb): Combines two lists of stracks. sub_stracks(tlista, tlistb): Filters out the stracks present in the second list from the first list. remove_duplicate_stracks(stracksa, stracksb): Removes duplicate stracks based on IOU. """ def __init__(self, args, frame_rate=30): """Initialize a YOLOv8 object to track objects with given arguments and frame rate.""" self.tracked_stracks = [] # type: list[STrack] self.lost_stracks = [] # type: list[STrack] self.removed_stracks = [] # type: list[STrack] self.frame_id = 0 self.args = args self.max_time_lost = int(frame_rate / 30.0 * args.track_buffer) self.kalman_filter = self.get_kalmanfilter() self.reset_id() def update(self, results, img=None): """Updates object tracker with new detections and returns tracked object bounding boxes.""" self.frame_id += 1 activated_stracks = [] refind_stracks = [] lost_stracks = [] removed_stracks = [] scores = results.conf bboxes = results.xywhr if hasattr(results, "xywhr") else results.xywh # Add index bboxes = np.concatenate([bboxes, np.arange(len(bboxes)).reshape(-1, 1)], axis=-1) cls = results.cls remain_inds = scores > self.args.track_high_thresh inds_low = scores > self.args.track_low_thresh inds_high = scores < self.args.track_high_thresh inds_second = np.logical_and(inds_low, inds_high) dets_second = bboxes[inds_second] dets = bboxes[remain_inds] scores_keep = scores[remain_inds] scores_second = scores[inds_second] cls_keep = cls[remain_inds] cls_second = cls[inds_second] detections = self.init_track(dets, scores_keep, cls_keep, img) # Add newly detected tracklets to tracked_stracks unconfirmed = [] tracked_stracks = [] # type: list[STrack] for track in self.tracked_stracks: if not track.is_activated: unconfirmed.append(track) else: tracked_stracks.append(track) # Step 2: First association, with high score detection boxes strack_pool = self.joint_stracks(tracked_stracks, self.lost_stracks) # Predict the current location with KF self.multi_predict(strack_pool) if hasattr(self, "gmc") and img is not None: warp = self.gmc.apply(img, dets) STrack.multi_gmc(strack_pool, warp) STrack.multi_gmc(unconfirmed, warp) dists = self.get_dists(strack_pool, detections) matches, u_track, u_detection = matching.linear_assignment(dists, thresh=self.args.match_thresh) for itracked, idet in matches: track = strack_pool[itracked] det = detections[idet] if track.state == TrackState.Tracked: track.update(det, self.frame_id) activated_stracks.append(track) else: track.re_activate(det, self.frame_id, new_id=False) refind_stracks.append(track) # Step 3: Second association, with low score detection boxes association the untrack to the low score detections detections_second = self.init_track(dets_second, scores_second, cls_second, img) r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked] # TODO dists = matching.iou_distance(r_tracked_stracks, detections_second) matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5) for itracked, idet in matches: track = r_tracked_stracks[itracked] det = detections_second[idet] if track.state == TrackState.Tracked: track.update(det, self.frame_id) activated_stracks.append(track) else: track.re_activate(det, self.frame_id, new_id=False) refind_stracks.append(track) for it in u_track: track = r_tracked_stracks[it] if track.state != TrackState.Lost: track.mark_lost() lost_stracks.append(track) # Deal with unconfirmed tracks, usually tracks with only one beginning frame detections = [detections[i] for i in u_detection] dists = self.get_dists(unconfirmed, detections) matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7) for itracked, idet in matches: unconfirmed[itracked].update(detections[idet], self.frame_id) activated_stracks.append(unconfirmed[itracked]) for it in u_unconfirmed: track = unconfirmed[it] track.mark_removed() removed_stracks.append(track) # Step 4: Init new stracks for inew in u_detection: track = detections[inew] if track.score < self.args.new_track_thresh: continue track.activate(self.kalman_filter, self.frame_id) activated_stracks.append(track) # Step 5: Update state for track in self.lost_stracks: if self.frame_id - track.end_frame > self.max_time_lost: track.mark_removed() removed_stracks.append(track) self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked] self.tracked_stracks = self.joint_stracks(self.tracked_stracks, activated_stracks) self.tracked_stracks = self.joint_stracks(self.tracked_stracks, refind_stracks) self.lost_stracks = self.sub_stracks(self.lost_stracks, self.tracked_stracks) self.lost_stracks.extend(lost_stracks) self.lost_stracks = self.sub_stracks(self.lost_stracks, self.removed_stracks) self.tracked_stracks, self.lost_stracks = self.remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks) self.removed_stracks.extend(removed_stracks) if len(self.removed_stracks) > 1000: self.removed_stracks = self.removed_stracks[-999:] # clip remove stracks to 1000 maximum return np.asarray([x.result for x in self.tracked_stracks if x.is_activated], dtype=np.float32) def get_kalmanfilter(self): """Returns a Kalman filter object for tracking bounding boxes.""" return KalmanFilterXYAH() def init_track(self, dets, scores, cls, img=None): """Initialize object tracking with detections and scores using STrack algorithm.""" return [STrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] if len(dets) else [] # detections def get_dists(self, tracks, detections): """Calculates the distance between tracks and detections using IOU and fuses scores.""" dists = matching.iou_distance(tracks, detections) # TODO: mot20 # if not self.args.mot20: dists = matching.fuse_score(dists, detections) return dists def multi_predict(self, tracks): """Returns the predicted tracks using the YOLOv8 network.""" STrack.multi_predict(tracks) @staticmethod def reset_id(): """Resets the ID counter of STrack.""" STrack.reset_id() def reset(self): """Reset tracker.""" self.tracked_stracks = [] # type: list[STrack] self.lost_stracks = [] # type: list[STrack] self.removed_stracks = [] # type: list[STrack] self.frame_id = 0 self.kalman_filter = self.get_kalmanfilter() self.reset_id() @staticmethod def joint_stracks(tlista, tlistb): """Combine two lists of stracks into a single one.""" exists = {} res = [] for t in tlista: exists[t.track_id] = 1 res.append(t) for t in tlistb: tid = t.track_id if not exists.get(tid, 0): exists[tid] = 1 res.append(t) return res @staticmethod def sub_stracks(tlista, tlistb): """DEPRECATED CODE in https://github.com/ultralytics/ultralytics/pull/1890/ stracks = {t.track_id: t for t in tlista} for t in tlistb: tid = t.track_id if stracks.get(tid, 0): del stracks[tid] return list(stracks.values()) """ track_ids_b = {t.track_id for t in tlistb} return [t for t in tlista if t.track_id not in track_ids_b] @staticmethod def remove_duplicate_stracks(stracksa, stracksb): """Remove duplicate stracks with non-maximum IOU distance.""" pdist = matching.iou_distance(stracksa, stracksb) pairs = np.where(pdist < 0.15) dupa, dupb = [], [] for p, q in zip(*pairs): timep = stracksa[p].frame_id - stracksa[p].start_frame timeq = stracksb[q].frame_id - stracksb[q].start_frame if timep > timeq: dupb.append(q) else: dupa.append(p) resa = [t for i, t in enumerate(stracksa) if i not in dupa] resb = [t for i, t in enumerate(stracksb) if i not in dupb] return resa, resb
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/trackers/byte_tracker.py
Python
unknown
18,871
# Ultralytics YOLO 🚀, AGPL-3.0 license from functools import partial from pathlib import Path import torch from ultralytics.utils import IterableSimpleNamespace, yaml_load from ultralytics.utils.checks import check_yaml from .bot_sort import BOTSORT from .byte_tracker import BYTETracker # A mapping of tracker types to corresponding tracker classes TRACKER_MAP = {"bytetrack": BYTETracker, "botsort": BOTSORT} def on_predict_start(predictor: object, persist: bool = False) -> None: """ Initialize trackers for object tracking during prediction. Args: predictor (object): The predictor object to initialize trackers for. persist (bool, optional): Whether to persist the trackers if they already exist. Defaults to False. Raises: AssertionError: If the tracker_type is not 'bytetrack' or 'botsort'. """ if hasattr(predictor, "trackers") and persist: return tracker = check_yaml(predictor.args.tracker) cfg = IterableSimpleNamespace(**yaml_load(tracker)) if cfg.tracker_type not in ["bytetrack", "botsort"]: raise AssertionError(f"Only 'bytetrack' and 'botsort' are supported for now, but got '{cfg.tracker_type}'") trackers = [] for _ in range(predictor.dataset.bs): tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30) trackers.append(tracker) predictor.trackers = trackers def on_predict_postprocess_end(predictor: object, persist: bool = False) -> None: """ Postprocess detected boxes and update with object tracking. Args: predictor (object): The predictor object containing the predictions. persist (bool, optional): Whether to persist the trackers if they already exist. Defaults to False. """ bs = predictor.dataset.bs path, im0s = predictor.batch[:2] is_obb = predictor.args.task == "obb" for i in range(bs): if not persist and predictor.vid_path[i] != str(predictor.save_dir / Path(path[i]).name): # new video predictor.trackers[i].reset() det = (predictor.results[i].obb if is_obb else predictor.results[i].boxes).cpu().numpy() if len(det) == 0: continue tracks = predictor.trackers[i].update(det, im0s[i]) if len(tracks) == 0: continue idx = tracks[:, -1].astype(int) predictor.results[i] = predictor.results[i][idx] update_args = dict() update_args["obb" if is_obb else "boxes"] = torch.as_tensor(tracks[:, :-1]) predictor.results[i].update(**update_args) def register_tracker(model: object, persist: bool) -> None: """ Register tracking callbacks to the model for object tracking during prediction. Args: model (object): The model object to register tracking callbacks for. persist (bool): Whether to persist the trackers if they already exist. """ model.add_callback("on_predict_start", partial(on_predict_start, persist=persist)) model.add_callback("on_predict_postprocess_end", partial(on_predict_postprocess_end, persist=persist))
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/trackers/track.py
Python
unknown
3,091
# Ultralytics YOLO 🚀, AGPL-3.0 license
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/trackers/utils/__init__.py
Python
unknown
42
# Ultralytics YOLO 🚀, AGPL-3.0 license import copy import cv2 import numpy as np from ultralytics.utils import LOGGER class GMC: """ Generalized Motion Compensation (GMC) class for tracking and object detection in video frames. This class provides methods for tracking and detecting objects based on several tracking algorithms including ORB, SIFT, ECC, and Sparse Optical Flow. It also supports downscaling of frames for computational efficiency. Attributes: method (str): The method used for tracking. Options include 'orb', 'sift', 'ecc', 'sparseOptFlow', 'none'. downscale (int): Factor by which to downscale the frames for processing. prevFrame (np.array): Stores the previous frame for tracking. prevKeyPoints (list): Stores the keypoints from the previous frame. prevDescriptors (np.array): Stores the descriptors from the previous frame. initializedFirstFrame (bool): Flag to indicate if the first frame has been processed. Methods: __init__(self, method='sparseOptFlow', downscale=2): Initializes a GMC object with the specified method and downscale factor. apply(self, raw_frame, detections=None): Applies the chosen method to a raw frame and optionally uses provided detections. applyEcc(self, raw_frame, detections=None): Applies the ECC algorithm to a raw frame. applyFeatures(self, raw_frame, detections=None): Applies feature-based methods like ORB or SIFT to a raw frame. applySparseOptFlow(self, raw_frame, detections=None): Applies the Sparse Optical Flow method to a raw frame. """ def __init__(self, method: str = "sparseOptFlow", downscale: int = 2) -> None: """ Initialize a video tracker with specified parameters. Args: method (str): The method used for tracking. Options include 'orb', 'sift', 'ecc', 'sparseOptFlow', 'none'. downscale (int): Downscale factor for processing frames. """ super().__init__() self.method = method self.downscale = max(1, int(downscale)) if self.method == "orb": self.detector = cv2.FastFeatureDetector_create(20) self.extractor = cv2.ORB_create() self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING) elif self.method == "sift": self.detector = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20) self.extractor = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20) self.matcher = cv2.BFMatcher(cv2.NORM_L2) elif self.method == "ecc": number_of_iterations = 5000 termination_eps = 1e-6 self.warp_mode = cv2.MOTION_EUCLIDEAN self.criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) elif self.method == "sparseOptFlow": self.feature_params = dict( maxCorners=1000, qualityLevel=0.01, minDistance=1, blockSize=3, useHarrisDetector=False, k=0.04 ) elif self.method in {"none", "None", None}: self.method = None else: raise ValueError(f"Error: Unknown GMC method:{method}") self.prevFrame = None self.prevKeyPoints = None self.prevDescriptors = None self.initializedFirstFrame = False def apply(self, raw_frame: np.array, detections: list = None) -> np.array: """ Apply object detection on a raw frame using specified method. Args: raw_frame (np.array): The raw frame to be processed. detections (list): List of detections to be used in the processing. Returns: (np.array): Processed frame. Examples: >>> gmc = GMC() >>> gmc.apply(np.array([[1, 2, 3], [4, 5, 6]])) array([[1, 2, 3], [4, 5, 6]]) """ if self.method in ["orb", "sift"]: return self.applyFeatures(raw_frame, detections) elif self.method == "ecc": return self.applyEcc(raw_frame) elif self.method == "sparseOptFlow": return self.applySparseOptFlow(raw_frame) else: return np.eye(2, 3) def applyEcc(self, raw_frame: np.array) -> np.array: """ Apply ECC algorithm to a raw frame. Args: raw_frame (np.array): The raw frame to be processed. Returns: (np.array): Processed frame. Examples: >>> gmc = GMC() >>> gmc.applyEcc(np.array([[1, 2, 3], [4, 5, 6]])) array([[1, 2, 3], [4, 5, 6]]) """ height, width, _ = raw_frame.shape frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) H = np.eye(2, 3, dtype=np.float32) # Downscale image if self.downscale > 1.0: frame = cv2.GaussianBlur(frame, (3, 3), 1.5) frame = cv2.resize(frame, (width // self.downscale, height // self.downscale)) width = width // self.downscale height = height // self.downscale # Handle first frame if not self.initializedFirstFrame: # Initialize data self.prevFrame = frame.copy() # Initialization done self.initializedFirstFrame = True return H # Run the ECC algorithm. The results are stored in warp_matrix. # (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria) try: (_, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria, None, 1) except Exception as e: LOGGER.warning(f"WARNING: find transform failed. Set warp as identity {e}") return H def applyFeatures(self, raw_frame: np.array, detections: list = None) -> np.array: """ Apply feature-based methods like ORB or SIFT to a raw frame. Args: raw_frame (np.array): The raw frame to be processed. detections (list): List of detections to be used in the processing. Returns: (np.array): Processed frame. Examples: >>> gmc = GMC() >>> gmc.applyFeatures(np.array([[1, 2, 3], [4, 5, 6]])) array([[1, 2, 3], [4, 5, 6]]) """ height, width, _ = raw_frame.shape frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) H = np.eye(2, 3) # Downscale image if self.downscale > 1.0: frame = cv2.resize(frame, (width // self.downscale, height // self.downscale)) width = width // self.downscale height = height // self.downscale # Find the keypoints mask = np.zeros_like(frame) mask[int(0.02 * height) : int(0.98 * height), int(0.02 * width) : int(0.98 * width)] = 255 if detections is not None: for det in detections: tlbr = (det[:4] / self.downscale).astype(np.int_) mask[tlbr[1] : tlbr[3], tlbr[0] : tlbr[2]] = 0 keypoints = self.detector.detect(frame, mask) # Compute the descriptors keypoints, descriptors = self.extractor.compute(frame, keypoints) # Handle first frame if not self.initializedFirstFrame: # Initialize data self.prevFrame = frame.copy() self.prevKeyPoints = copy.copy(keypoints) self.prevDescriptors = copy.copy(descriptors) # Initialization done self.initializedFirstFrame = True return H # Match descriptors knnMatches = self.matcher.knnMatch(self.prevDescriptors, descriptors, 2) # Filter matches based on smallest spatial distance matches = [] spatialDistances = [] maxSpatialDistance = 0.25 * np.array([width, height]) # Handle empty matches case if len(knnMatches) == 0: # Store to next iteration self.prevFrame = frame.copy() self.prevKeyPoints = copy.copy(keypoints) self.prevDescriptors = copy.copy(descriptors) return H for m, n in knnMatches: if m.distance < 0.9 * n.distance: prevKeyPointLocation = self.prevKeyPoints[m.queryIdx].pt currKeyPointLocation = keypoints[m.trainIdx].pt spatialDistance = ( prevKeyPointLocation[0] - currKeyPointLocation[0], prevKeyPointLocation[1] - currKeyPointLocation[1], ) if (np.abs(spatialDistance[0]) < maxSpatialDistance[0]) and ( np.abs(spatialDistance[1]) < maxSpatialDistance[1] ): spatialDistances.append(spatialDistance) matches.append(m) meanSpatialDistances = np.mean(spatialDistances, 0) stdSpatialDistances = np.std(spatialDistances, 0) inliers = (spatialDistances - meanSpatialDistances) < 2.5 * stdSpatialDistances goodMatches = [] prevPoints = [] currPoints = [] for i in range(len(matches)): if inliers[i, 0] and inliers[i, 1]: goodMatches.append(matches[i]) prevPoints.append(self.prevKeyPoints[matches[i].queryIdx].pt) currPoints.append(keypoints[matches[i].trainIdx].pt) prevPoints = np.array(prevPoints) currPoints = np.array(currPoints) # Draw the keypoint matches on the output image # if False: # import matplotlib.pyplot as plt # matches_img = np.hstack((self.prevFrame, frame)) # matches_img = cv2.cvtColor(matches_img, cv2.COLOR_GRAY2BGR) # W = self.prevFrame.shape[1] # for m in goodMatches: # prev_pt = np.array(self.prevKeyPoints[m.queryIdx].pt, dtype=np.int_) # curr_pt = np.array(keypoints[m.trainIdx].pt, dtype=np.int_) # curr_pt[0] += W # color = np.random.randint(0, 255, 3) # color = (int(color[0]), int(color[1]), int(color[2])) # # matches_img = cv2.line(matches_img, prev_pt, curr_pt, tuple(color), 1, cv2.LINE_AA) # matches_img = cv2.circle(matches_img, prev_pt, 2, tuple(color), -1) # matches_img = cv2.circle(matches_img, curr_pt, 2, tuple(color), -1) # # plt.figure() # plt.imshow(matches_img) # plt.show() # Find rigid matrix if prevPoints.shape[0] > 4: H, inliers = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC) # Handle downscale if self.downscale > 1.0: H[0, 2] *= self.downscale H[1, 2] *= self.downscale else: LOGGER.warning("WARNING: not enough matching points") # Store to next iteration self.prevFrame = frame.copy() self.prevKeyPoints = copy.copy(keypoints) self.prevDescriptors = copy.copy(descriptors) return H def applySparseOptFlow(self, raw_frame: np.array) -> np.array: """ Apply Sparse Optical Flow method to a raw frame. Args: raw_frame (np.array): The raw frame to be processed. Returns: (np.array): Processed frame. Examples: >>> gmc = GMC() >>> gmc.applySparseOptFlow(np.array([[1, 2, 3], [4, 5, 6]])) array([[1, 2, 3], [4, 5, 6]]) """ height, width, _ = raw_frame.shape frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) H = np.eye(2, 3) # Downscale image if self.downscale > 1.0: frame = cv2.resize(frame, (width // self.downscale, height // self.downscale)) # Find the keypoints keypoints = cv2.goodFeaturesToTrack(frame, mask=None, **self.feature_params) # Handle first frame if not self.initializedFirstFrame: self.prevFrame = frame.copy() self.prevKeyPoints = copy.copy(keypoints) self.initializedFirstFrame = True return H # Find correspondences matchedKeypoints, status, _ = cv2.calcOpticalFlowPyrLK(self.prevFrame, frame, self.prevKeyPoints, None) # Leave good correspondences only prevPoints = [] currPoints = [] for i in range(len(status)): if status[i]: prevPoints.append(self.prevKeyPoints[i]) currPoints.append(matchedKeypoints[i]) prevPoints = np.array(prevPoints) currPoints = np.array(currPoints) # Find rigid matrix if (prevPoints.shape[0] > 4) and (prevPoints.shape[0] == prevPoints.shape[0]): H, _ = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC) if self.downscale > 1.0: H[0, 2] *= self.downscale H[1, 2] *= self.downscale else: LOGGER.warning("WARNING: not enough matching points") self.prevFrame = frame.copy() self.prevKeyPoints = copy.copy(keypoints) return H def reset_params(self) -> None: """Reset parameters.""" self.prevFrame = None self.prevKeyPoints = None self.prevDescriptors = None self.initializedFirstFrame = False
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/trackers/utils/gmc.py
Python
unknown
13,638
# Ultralytics YOLO 🚀, AGPL-3.0 license import numpy as np import scipy.linalg class KalmanFilterXYAH: """ For bytetrack. A simple Kalman filter for tracking bounding boxes in image space. The 8-dimensional state space (x, y, a, h, vx, vy, va, vh) contains the bounding box center position (x, y), aspect ratio a, height h, and their respective velocities. Object motion follows a constant velocity model. The bounding box location (x, y, a, h) is taken as direct observation of the state space (linear observation model). """ def __init__(self): """Initialize Kalman filter model matrices with motion and observation uncertainty weights.""" ndim, dt = 4, 1.0 # Create Kalman filter model matrices self._motion_mat = np.eye(2 * ndim, 2 * ndim) for i in range(ndim): self._motion_mat[i, ndim + i] = dt self._update_mat = np.eye(ndim, 2 * ndim) # Motion and observation uncertainty are chosen relative to the current state estimate. These weights control # the amount of uncertainty in the model. self._std_weight_position = 1.0 / 20 self._std_weight_velocity = 1.0 / 160 def initiate(self, measurement: np.ndarray) -> tuple: """ Create track from unassociated measurement. Args: measurement (ndarray): Bounding box coordinates (x, y, a, h) with center position (x, y), aspect ratio a, and height h. Returns: (tuple[ndarray, ndarray]): Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional) of the new track. Unobserved velocities are initialized to 0 mean. """ mean_pos = measurement mean_vel = np.zeros_like(mean_pos) mean = np.r_[mean_pos, mean_vel] std = [ 2 * self._std_weight_position * measurement[3], 2 * self._std_weight_position * measurement[3], 1e-2, 2 * self._std_weight_position * measurement[3], 10 * self._std_weight_velocity * measurement[3], 10 * self._std_weight_velocity * measurement[3], 1e-5, 10 * self._std_weight_velocity * measurement[3], ] covariance = np.diag(np.square(std)) return mean, covariance def predict(self, mean: np.ndarray, covariance: np.ndarray) -> tuple: """ Run Kalman filter prediction step. Args: mean (ndarray): The 8 dimensional mean vector of the object state at the previous time step. covariance (ndarray): The 8x8 dimensional covariance matrix of the object state at the previous time step. Returns: (tuple[ndarray, ndarray]): Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are initialized to 0 mean. """ std_pos = [ self._std_weight_position * mean[3], self._std_weight_position * mean[3], 1e-2, self._std_weight_position * mean[3], ] std_vel = [ self._std_weight_velocity * mean[3], self._std_weight_velocity * mean[3], 1e-5, self._std_weight_velocity * mean[3], ] motion_cov = np.diag(np.square(np.r_[std_pos, std_vel])) mean = np.dot(mean, self._motion_mat.T) covariance = np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov return mean, covariance def project(self, mean: np.ndarray, covariance: np.ndarray) -> tuple: """ Project state distribution to measurement space. Args: mean (ndarray): The state's mean vector (8 dimensional array). covariance (ndarray): The state's covariance matrix (8x8 dimensional). Returns: (tuple[ndarray, ndarray]): Returns the projected mean and covariance matrix of the given state estimate. """ std = [ self._std_weight_position * mean[3], self._std_weight_position * mean[3], 1e-1, self._std_weight_position * mean[3], ] innovation_cov = np.diag(np.square(std)) mean = np.dot(self._update_mat, mean) covariance = np.linalg.multi_dot((self._update_mat, covariance, self._update_mat.T)) return mean, covariance + innovation_cov def multi_predict(self, mean: np.ndarray, covariance: np.ndarray) -> tuple: """ Run Kalman filter prediction step (Vectorized version). Args: mean (ndarray): The Nx8 dimensional mean matrix of the object states at the previous time step. covariance (ndarray): The Nx8x8 covariance matrix of the object states at the previous time step. Returns: (tuple[ndarray, ndarray]): Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are initialized to 0 mean. """ std_pos = [ self._std_weight_position * mean[:, 3], self._std_weight_position * mean[:, 3], 1e-2 * np.ones_like(mean[:, 3]), self._std_weight_position * mean[:, 3], ] std_vel = [ self._std_weight_velocity * mean[:, 3], self._std_weight_velocity * mean[:, 3], 1e-5 * np.ones_like(mean[:, 3]), self._std_weight_velocity * mean[:, 3], ] sqr = np.square(np.r_[std_pos, std_vel]).T motion_cov = [np.diag(sqr[i]) for i in range(len(mean))] motion_cov = np.asarray(motion_cov) mean = np.dot(mean, self._motion_mat.T) left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2)) covariance = np.dot(left, self._motion_mat.T) + motion_cov return mean, covariance def update(self, mean: np.ndarray, covariance: np.ndarray, measurement: np.ndarray) -> tuple: """ Run Kalman filter correction step. Args: mean (ndarray): The predicted state's mean vector (8 dimensional). covariance (ndarray): The state's covariance matrix (8x8 dimensional). measurement (ndarray): The 4 dimensional measurement vector (x, y, a, h), where (x, y) is the center position, a the aspect ratio, and h the height of the bounding box. Returns: (tuple[ndarray, ndarray]): Returns the measurement-corrected state distribution. """ projected_mean, projected_cov = self.project(mean, covariance) chol_factor, lower = scipy.linalg.cho_factor(projected_cov, lower=True, check_finite=False) kalman_gain = scipy.linalg.cho_solve( (chol_factor, lower), np.dot(covariance, self._update_mat.T).T, check_finite=False ).T innovation = measurement - projected_mean new_mean = mean + np.dot(innovation, kalman_gain.T) new_covariance = covariance - np.linalg.multi_dot((kalman_gain, projected_cov, kalman_gain.T)) return new_mean, new_covariance def gating_distance( self, mean: np.ndarray, covariance: np.ndarray, measurements: np.ndarray, only_position: bool = False, metric: str = "maha", ) -> np.ndarray: """ Compute gating distance between state distribution and measurements. A suitable distance threshold can be obtained from `chi2inv95`. If `only_position` is False, the chi-square distribution has 4 degrees of freedom, otherwise 2. Args: mean (ndarray): Mean vector over the state distribution (8 dimensional). covariance (ndarray): Covariance of the state distribution (8x8 dimensional). measurements (ndarray): An Nx4 matrix of N measurements, each in format (x, y, a, h) where (x, y) is the bounding box center position, a the aspect ratio, and h the height. only_position (bool, optional): If True, distance computation is done with respect to the bounding box center position only. Defaults to False. metric (str, optional): The metric to use for calculating the distance. Options are 'gaussian' for the squared Euclidean distance and 'maha' for the squared Mahalanobis distance. Defaults to 'maha'. Returns: (np.ndarray): Returns an array of length N, where the i-th element contains the squared distance between (mean, covariance) and `measurements[i]`. """ mean, covariance = self.project(mean, covariance) if only_position: mean, covariance = mean[:2], covariance[:2, :2] measurements = measurements[:, :2] d = measurements - mean if metric == "gaussian": return np.sum(d * d, axis=1) elif metric == "maha": cholesky_factor = np.linalg.cholesky(covariance) z = scipy.linalg.solve_triangular(cholesky_factor, d.T, lower=True, check_finite=False, overwrite_b=True) return np.sum(z * z, axis=0) # square maha else: raise ValueError("Invalid distance metric") class KalmanFilterXYWH(KalmanFilterXYAH): """ For BoT-SORT. A simple Kalman filter for tracking bounding boxes in image space. The 8-dimensional state space (x, y, w, h, vx, vy, vw, vh) contains the bounding box center position (x, y), width w, height h, and their respective velocities. Object motion follows a constant velocity model. The bounding box location (x, y, w, h) is taken as direct observation of the state space (linear observation model). """ def initiate(self, measurement: np.ndarray) -> tuple: """ Create track from unassociated measurement. Args: measurement (ndarray): Bounding box coordinates (x, y, w, h) with center position (x, y), width, and height. Returns: (tuple[ndarray, ndarray]): Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional) of the new track. Unobserved velocities are initialized to 0 mean. """ mean_pos = measurement mean_vel = np.zeros_like(mean_pos) mean = np.r_[mean_pos, mean_vel] std = [ 2 * self._std_weight_position * measurement[2], 2 * self._std_weight_position * measurement[3], 2 * self._std_weight_position * measurement[2], 2 * self._std_weight_position * measurement[3], 10 * self._std_weight_velocity * measurement[2], 10 * self._std_weight_velocity * measurement[3], 10 * self._std_weight_velocity * measurement[2], 10 * self._std_weight_velocity * measurement[3], ] covariance = np.diag(np.square(std)) return mean, covariance def predict(self, mean, covariance) -> tuple: """ Run Kalman filter prediction step. Args: mean (ndarray): The 8 dimensional mean vector of the object state at the previous time step. covariance (ndarray): The 8x8 dimensional covariance matrix of the object state at the previous time step. Returns: (tuple[ndarray, ndarray]): Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are initialized to 0 mean. """ std_pos = [ self._std_weight_position * mean[2], self._std_weight_position * mean[3], self._std_weight_position * mean[2], self._std_weight_position * mean[3], ] std_vel = [ self._std_weight_velocity * mean[2], self._std_weight_velocity * mean[3], self._std_weight_velocity * mean[2], self._std_weight_velocity * mean[3], ] motion_cov = np.diag(np.square(np.r_[std_pos, std_vel])) mean = np.dot(mean, self._motion_mat.T) covariance = np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov return mean, covariance def project(self, mean, covariance) -> tuple: """ Project state distribution to measurement space. Args: mean (ndarray): The state's mean vector (8 dimensional array). covariance (ndarray): The state's covariance matrix (8x8 dimensional). Returns: (tuple[ndarray, ndarray]): Returns the projected mean and covariance matrix of the given state estimate. """ std = [ self._std_weight_position * mean[2], self._std_weight_position * mean[3], self._std_weight_position * mean[2], self._std_weight_position * mean[3], ] innovation_cov = np.diag(np.square(std)) mean = np.dot(self._update_mat, mean) covariance = np.linalg.multi_dot((self._update_mat, covariance, self._update_mat.T)) return mean, covariance + innovation_cov def multi_predict(self, mean, covariance) -> tuple: """ Run Kalman filter prediction step (Vectorized version). Args: mean (ndarray): The Nx8 dimensional mean matrix of the object states at the previous time step. covariance (ndarray): The Nx8x8 covariance matrix of the object states at the previous time step. Returns: (tuple[ndarray, ndarray]): Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are initialized to 0 mean. """ std_pos = [ self._std_weight_position * mean[:, 2], self._std_weight_position * mean[:, 3], self._std_weight_position * mean[:, 2], self._std_weight_position * mean[:, 3], ] std_vel = [ self._std_weight_velocity * mean[:, 2], self._std_weight_velocity * mean[:, 3], self._std_weight_velocity * mean[:, 2], self._std_weight_velocity * mean[:, 3], ] sqr = np.square(np.r_[std_pos, std_vel]).T motion_cov = [np.diag(sqr[i]) for i in range(len(mean))] motion_cov = np.asarray(motion_cov) mean = np.dot(mean, self._motion_mat.T) left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2)) covariance = np.dot(left, self._motion_mat.T) + motion_cov return mean, covariance def update(self, mean, covariance, measurement) -> tuple: """ Run Kalman filter correction step. Args: mean (ndarray): The predicted state's mean vector (8 dimensional). covariance (ndarray): The state's covariance matrix (8x8 dimensional). measurement (ndarray): The 4 dimensional measurement vector (x, y, w, h), where (x, y) is the center position, w the width, and h the height of the bounding box. Returns: (tuple[ndarray, ndarray]): Returns the measurement-corrected state distribution. """ return super().update(mean, covariance, measurement)
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/trackers/utils/kalman_filter.py
Python
unknown
15,168
# Ultralytics YOLO 🚀, AGPL-3.0 license import numpy as np import scipy from scipy.spatial.distance import cdist from ultralytics.utils.metrics import bbox_ioa, batch_probiou try: import lap # for linear_assignment assert lap.__version__ # verify package is not directory except (ImportError, AssertionError, AttributeError): from ultralytics.utils.checks import check_requirements check_requirements("lapx>=0.5.2") # update to lap package from https://github.com/rathaROG/lapx import lap def linear_assignment(cost_matrix: np.ndarray, thresh: float, use_lap: bool = True) -> tuple: """ Perform linear assignment using scipy or lap.lapjv. Args: cost_matrix (np.ndarray): The matrix containing cost values for assignments. thresh (float): Threshold for considering an assignment valid. use_lap (bool, optional): Whether to use lap.lapjv. Defaults to True. Returns: Tuple with: - matched indices - unmatched indices from 'a' - unmatched indices from 'b' """ if cost_matrix.size == 0: return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1])) if use_lap: # Use lap.lapjv # https://github.com/gatagat/lap _, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh) matches = [[ix, mx] for ix, mx in enumerate(x) if mx >= 0] unmatched_a = np.where(x < 0)[0] unmatched_b = np.where(y < 0)[0] else: # Use scipy.optimize.linear_sum_assignment # https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html x, y = scipy.optimize.linear_sum_assignment(cost_matrix) # row x, col y matches = np.asarray([[x[i], y[i]] for i in range(len(x)) if cost_matrix[x[i], y[i]] <= thresh]) if len(matches) == 0: unmatched_a = list(np.arange(cost_matrix.shape[0])) unmatched_b = list(np.arange(cost_matrix.shape[1])) else: unmatched_a = list(set(np.arange(cost_matrix.shape[0])) - set(matches[:, 0])) unmatched_b = list(set(np.arange(cost_matrix.shape[1])) - set(matches[:, 1])) return matches, unmatched_a, unmatched_b def iou_distance(atracks: list, btracks: list) -> np.ndarray: """ Compute cost based on Intersection over Union (IoU) between tracks. Args: atracks (list[STrack] | list[np.ndarray]): List of tracks 'a' or bounding boxes. btracks (list[STrack] | list[np.ndarray]): List of tracks 'b' or bounding boxes. Returns: (np.ndarray): Cost matrix computed based on IoU. """ if atracks and isinstance(atracks[0], np.ndarray) or btracks and isinstance(btracks[0], np.ndarray): atlbrs = atracks btlbrs = btracks else: atlbrs = [track.xywha if track.angle is not None else track.xyxy for track in atracks] btlbrs = [track.xywha if track.angle is not None else track.xyxy for track in btracks] ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float32) if len(atlbrs) and len(btlbrs): if len(atlbrs[0]) == 5 and len(btlbrs[0]) == 5: ious = batch_probiou( np.ascontiguousarray(atlbrs, dtype=np.float32), np.ascontiguousarray(btlbrs, dtype=np.float32), ).numpy() else: ious = bbox_ioa( np.ascontiguousarray(atlbrs, dtype=np.float32), np.ascontiguousarray(btlbrs, dtype=np.float32), iou=True, ) return 1 - ious # cost matrix def embedding_distance(tracks: list, detections: list, metric: str = "cosine") -> np.ndarray: """ Compute distance between tracks and detections based on embeddings. Args: tracks (list[STrack]): List of tracks. detections (list[BaseTrack]): List of detections. metric (str, optional): Metric for distance computation. Defaults to 'cosine'. Returns: (np.ndarray): Cost matrix computed based on embeddings. """ cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float32) if cost_matrix.size == 0: return cost_matrix det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float32) # for i, track in enumerate(tracks): # cost_matrix[i, :] = np.maximum(0.0, cdist(track.smooth_feat.reshape(1,-1), det_features, metric)) track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float32) cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric)) # Normalized features return cost_matrix def fuse_score(cost_matrix: np.ndarray, detections: list) -> np.ndarray: """ Fuses cost matrix with detection scores to produce a single similarity matrix. Args: cost_matrix (np.ndarray): The matrix containing cost values for assignments. detections (list[BaseTrack]): List of detections with scores. Returns: (np.ndarray): Fused similarity matrix. """ if cost_matrix.size == 0: return cost_matrix iou_sim = 1 - cost_matrix det_scores = np.array([det.score for det in detections]) det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0) fuse_sim = iou_sim * det_scores return 1 - fuse_sim # fuse_cost
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/trackers/utils/matching.py
Python
unknown
5,404
# Ultralytics YOLO 🚀, AGPL-3.0 license import contextlib import inspect import logging.config import os import platform import re import subprocess import sys import threading import time import urllib import uuid from pathlib import Path from types import SimpleNamespace from typing import Union import cv2 import matplotlib.pyplot as plt import numpy as np import torch import yaml from tqdm import tqdm as tqdm_original from ultralytics import __version__ # PyTorch Multi-GPU DDP Constants RANK = int(os.getenv("RANK", -1)) LOCAL_RANK = int(os.getenv("LOCAL_RANK", -1)) # https://pytorch.org/docs/stable/elastic/run.html # Other Constants FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLO ASSETS = ROOT / "assets" # default images DEFAULT_CFG_PATH = ROOT / "cfg/default.yaml" NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads AUTOINSTALL = str(os.getenv("YOLO_AUTOINSTALL", True)).lower() == "true" # global auto-install mode VERBOSE = str(os.getenv("YOLO_VERBOSE", True)).lower() == "true" # global verbose mode TQDM_BAR_FORMAT = "{l_bar}{bar:10}{r_bar}" if VERBOSE else None # tqdm bar format LOGGING_NAME = "ultralytics" MACOS, LINUX, WINDOWS = (platform.system() == x for x in ["Darwin", "Linux", "Windows"]) # environment booleans ARM64 = platform.machine() in ("arm64", "aarch64") # ARM64 booleans HELP_MSG = """ Usage examples for running YOLOv8: 1. Install the ultralytics package: pip install ultralytics 2. Use the Python SDK: from ultralytics import YOLO # Load a model model = YOLO('yolov8n.yaml') # build a new model from scratch model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) # Use the model results = model.train(data="coco128.yaml", epochs=3) # train the model results = model.val() # evaluate model performance on the validation set results = model('https://ultralytics.com/images/bus.jpg') # predict on an image success = model.export(format='onnx') # export the model to ONNX format 3. Use the command line interface (CLI): YOLOv8 'yolo' CLI commands use the following syntax: yolo TASK MODE ARGS Where TASK (optional) is one of [detect, segment, classify] MODE (required) is one of [train, val, predict, export] ARGS (optional) are any number of custom 'arg=value' pairs like 'imgsz=320' that override defaults. See all ARGS at https://docs.ultralytics.com/usage/cfg or with 'yolo cfg' - Train a detection model for 10 epochs with an initial learning_rate of 0.01 yolo detect train data=coco128.yaml model=yolov8n.pt epochs=10 lr0=0.01 - Predict a YouTube video using a pretrained segmentation model at image size 320: yolo segment predict model=yolov8n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320 - Val a pretrained detection model at batch-size 1 and image size 640: yolo detect val model=yolov8n.pt data=coco128.yaml batch=1 imgsz=640 - Export a YOLOv8n classification model to ONNX format at image size 224 by 128 (no TASK required) yolo export model=yolov8n-cls.pt format=onnx imgsz=224,128 - Run special commands: yolo help yolo checks yolo version yolo settings yolo copy-cfg yolo cfg Docs: https://docs.ultralytics.com Community: https://community.ultralytics.com GitHub: https://github.com/ultralytics/ultralytics """ # Settings torch.set_printoptions(linewidth=320, precision=4, profile="default") np.set_printoptions(linewidth=320, formatter={"float_kind": "{:11.5g}".format}) # format short g, %precision=5 cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) os.environ["NUMEXPR_MAX_THREADS"] = str(NUM_THREADS) # NumExpr max threads os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" # for deterministic training os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # suppress verbose TF compiler warnings in Colab class TQDM(tqdm_original): """ Custom Ultralytics tqdm class with different default arguments. Args: *args (list): Positional arguments passed to original tqdm. **kwargs (dict): Keyword arguments, with custom defaults applied. """ def __init__(self, *args, **kwargs): """ Initialize custom Ultralytics tqdm class with different default arguments. Note these can still be overridden when calling TQDM. """ kwargs["disable"] = not VERBOSE or kwargs.get("disable", False) # logical 'and' with default value if passed kwargs.setdefault("bar_format", TQDM_BAR_FORMAT) # override default value if passed super().__init__(*args, **kwargs) class SimpleClass: """Ultralytics SimpleClass is a base class providing helpful string representation, error reporting, and attribute access methods for easier debugging and usage. """ def __str__(self): """Return a human-readable string representation of the object.""" attr = [] for a in dir(self): v = getattr(self, a) if not callable(v) and not a.startswith("_"): if isinstance(v, SimpleClass): # Display only the module and class name for subclasses s = f"{a}: {v.__module__}.{v.__class__.__name__} object" else: s = f"{a}: {repr(v)}" attr.append(s) return f"{self.__module__}.{self.__class__.__name__} object with attributes:\n\n" + "\n".join(attr) def __repr__(self): """Return a machine-readable string representation of the object.""" return self.__str__() def __getattr__(self, attr): """Custom attribute access error message with helpful information.""" name = self.__class__.__name__ raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}") class IterableSimpleNamespace(SimpleNamespace): """Ultralytics IterableSimpleNamespace is an extension class of SimpleNamespace that adds iterable functionality and enables usage with dict() and for loops. """ def __iter__(self): """Return an iterator of key-value pairs from the namespace's attributes.""" return iter(vars(self).items()) def __str__(self): """Return a human-readable string representation of the object.""" return "\n".join(f"{k}={v}" for k, v in vars(self).items()) def __getattr__(self, attr): """Custom attribute access error message with helpful information.""" name = self.__class__.__name__ raise AttributeError( f""" '{name}' object has no attribute '{attr}'. This may be caused by a modified or out of date ultralytics 'default.yaml' file.\nPlease update your code with 'pip install -U ultralytics' and if necessary replace {DEFAULT_CFG_PATH} with the latest version from https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/default.yaml """ ) def get(self, key, default=None): """Return the value of the specified key if it exists; otherwise, return the default value.""" return getattr(self, key, default) def plt_settings(rcparams=None, backend="Agg"): """ Decorator to temporarily set rc parameters and the backend for a plotting function. Example: decorator: @plt_settings({"font.size": 12}) context manager: with plt_settings({"font.size": 12}): Args: rcparams (dict): Dictionary of rc parameters to set. backend (str, optional): Name of the backend to use. Defaults to 'Agg'. Returns: (Callable): Decorated function with temporarily set rc parameters and backend. This decorator can be applied to any function that needs to have specific matplotlib rc parameters and backend for its execution. """ if rcparams is None: rcparams = {"font.size": 11} def decorator(func): """Decorator to apply temporary rc parameters and backend to a function.""" def wrapper(*args, **kwargs): """Sets rc parameters and backend, calls the original function, and restores the settings.""" original_backend = plt.get_backend() if backend != original_backend: plt.close("all") # auto-close()ing of figures upon backend switching is deprecated since 3.8 plt.switch_backend(backend) with plt.rc_context(rcparams): result = func(*args, **kwargs) if backend != original_backend: plt.close("all") plt.switch_backend(original_backend) return result return wrapper return decorator def set_logging(name=LOGGING_NAME, verbose=True): """Sets up logging for the given name with UTF-8 encoding support.""" level = logging.INFO if verbose and RANK in {-1, 0} else logging.ERROR # rank in world for Multi-GPU trainings # Configure the console (stdout) encoding to UTF-8 formatter = logging.Formatter("%(message)s") # Default formatter if WINDOWS and sys.stdout.encoding != "utf-8": try: if hasattr(sys.stdout, "reconfigure"): sys.stdout.reconfigure(encoding="utf-8") elif hasattr(sys.stdout, "buffer"): import io sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8") else: sys.stdout.encoding = "utf-8" except Exception as e: print(f"Creating custom formatter for non UTF-8 environments due to {e}") class CustomFormatter(logging.Formatter): def format(self, record): """Sets up logging with UTF-8 encoding and configurable verbosity.""" return emojis(super().format(record)) formatter = CustomFormatter("%(message)s") # Use CustomFormatter to eliminate UTF-8 output as last recourse # Create and configure the StreamHandler stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setFormatter(formatter) stream_handler.setLevel(level) logger = logging.getLogger(name) logger.setLevel(level) logger.addHandler(stream_handler) logger.propagate = False return logger # Set logger LOGGER = set_logging(LOGGING_NAME, verbose=VERBOSE) # define globally (used in train.py, val.py, predict.py, etc.) for logger in "sentry_sdk", "urllib3.connectionpool": logging.getLogger(logger).setLevel(logging.CRITICAL + 1) def emojis(string=""): """Return platform-dependent emoji-safe version of string.""" return string.encode().decode("ascii", "ignore") if WINDOWS else string class ThreadingLocked: """ A decorator class for ensuring thread-safe execution of a function or method. This class can be used as a decorator to make sure that if the decorated function is called from multiple threads, only one thread at a time will be able to execute the function. Attributes: lock (threading.Lock): A lock object used to manage access to the decorated function. Example: ```python from ultralytics.utils import ThreadingLocked @ThreadingLocked() def my_function(): # Your code here pass ``` """ def __init__(self): """Initializes the decorator class for thread-safe execution of a function or method.""" self.lock = threading.Lock() def __call__(self, f): """Run thread-safe execution of function or method.""" from functools import wraps @wraps(f) def decorated(*args, **kwargs): """Applies thread-safety to the decorated function or method.""" with self.lock: return f(*args, **kwargs) return decorated def yaml_save(file="data.yaml", data=None, header=""): """ Save YAML data to a file. Args: file (str, optional): File name. Default is 'data.yaml'. data (dict): Data to save in YAML format. header (str, optional): YAML header to add. Returns: (None): Data is saved to the specified file. """ if data is None: data = {} file = Path(file) if not file.parent.exists(): # Create parent directories if they don't exist file.parent.mkdir(parents=True, exist_ok=True) # Convert Path objects to strings valid_types = int, float, str, bool, list, tuple, dict, type(None) for k, v in data.items(): if not isinstance(v, valid_types): data[k] = str(v) # Dump data to file in YAML format with open(file, "w", errors="ignore", encoding="utf-8") as f: if header: f.write(header) yaml.safe_dump(data, f, sort_keys=False, allow_unicode=True) def yaml_load(file="data.yaml", append_filename=False): """ Load YAML data from a file. Args: file (str, optional): File name. Default is 'data.yaml'. append_filename (bool): Add the YAML filename to the YAML dictionary. Default is False. Returns: (dict): YAML data and file name. """ assert Path(file).suffix in (".yaml", ".yml"), f"Attempting to load non-YAML file {file} with yaml_load()" with open(file, errors="ignore", encoding="utf-8") as f: s = f.read() # string # Remove special characters if not s.isprintable(): s = re.sub(r"[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]+", "", s) # Add YAML filename to dict and return data = yaml.safe_load(s) or {} # always return a dict (yaml.safe_load() may return None for empty files) if append_filename: data["yaml_file"] = str(file) return data def yaml_print(yaml_file: Union[str, Path, dict]) -> None: """ Pretty prints a YAML file or a YAML-formatted dictionary. Args: yaml_file: The file path of the YAML file or a YAML-formatted dictionary. Returns: (None) """ yaml_dict = yaml_load(yaml_file) if isinstance(yaml_file, (str, Path)) else yaml_file dump = yaml.dump(yaml_dict, sort_keys=False, allow_unicode=True) LOGGER.info(f"Printing '{colorstr('bold', 'black', yaml_file)}'\n\n{dump}") # Default configuration DEFAULT_CFG_DICT = yaml_load(DEFAULT_CFG_PATH) for k, v in DEFAULT_CFG_DICT.items(): if isinstance(v, str) and v.lower() == "none": DEFAULT_CFG_DICT[k] = None DEFAULT_CFG_KEYS = DEFAULT_CFG_DICT.keys() DEFAULT_CFG = IterableSimpleNamespace(**DEFAULT_CFG_DICT) def is_ubuntu() -> bool: """ Check if the OS is Ubuntu. Returns: (bool): True if OS is Ubuntu, False otherwise. """ with contextlib.suppress(FileNotFoundError): with open("/etc/os-release") as f: return "ID=ubuntu" in f.read() return False def is_colab(): """ Check if the current script is running inside a Google Colab notebook. Returns: (bool): True if running inside a Colab notebook, False otherwise. """ return "COLAB_RELEASE_TAG" in os.environ or "COLAB_BACKEND_VERSION" in os.environ def is_kaggle(): """ Check if the current script is running inside a Kaggle kernel. Returns: (bool): True if running inside a Kaggle kernel, False otherwise. """ return os.environ.get("PWD") == "/kaggle/working" and os.environ.get("KAGGLE_URL_BASE") == "https://www.kaggle.com" def is_jupyter(): """ Check if the current script is running inside a Jupyter Notebook. Verified on Colab, Jupyterlab, Kaggle, Paperspace. Returns: (bool): True if running inside a Jupyter Notebook, False otherwise. """ with contextlib.suppress(Exception): from IPython import get_ipython return get_ipython() is not None return False def is_docker() -> bool: """ Determine if the script is running inside a Docker container. Returns: (bool): True if the script is running inside a Docker container, False otherwise. """ file = Path("/proc/self/cgroup") if file.exists(): with open(file) as f: return "docker" in f.read() else: return False def is_online() -> bool: """ Check internet connectivity by attempting to connect to a known online host. Returns: (bool): True if connection is successful, False otherwise. """ import socket for host in "1.1.1.1", "8.8.8.8", "223.5.5.5": # Cloudflare, Google, AliDNS: try: test_connection = socket.create_connection(address=(host, 53), timeout=2) except (socket.timeout, socket.gaierror, OSError): continue else: # If the connection was successful, close it to avoid a ResourceWarning test_connection.close() return True return False ONLINE = is_online() def is_pip_package(filepath: str = __name__) -> bool: """ Determines if the file at the given filepath is part of a pip package. Args: filepath (str): The filepath to check. Returns: (bool): True if the file is part of a pip package, False otherwise. """ import importlib.util # Get the spec for the module spec = importlib.util.find_spec(filepath) # Return whether the spec is not None and the origin is not None (indicating it is a package) return spec is not None and spec.origin is not None def is_dir_writeable(dir_path: Union[str, Path]) -> bool: """ Check if a directory is writeable. Args: dir_path (str | Path): The path to the directory. Returns: (bool): True if the directory is writeable, False otherwise. """ return os.access(str(dir_path), os.W_OK) def is_pytest_running(): """ Determines whether pytest is currently running or not. Returns: (bool): True if pytest is running, False otherwise. """ return ("PYTEST_CURRENT_TEST" in os.environ) or ("pytest" in sys.modules) or ("pytest" in Path(sys.argv[0]).stem) def is_github_action_running() -> bool: """ Determine if the current environment is a GitHub Actions runner. Returns: (bool): True if the current environment is a GitHub Actions runner, False otherwise. """ return "GITHUB_ACTIONS" in os.environ and "GITHUB_WORKFLOW" in os.environ and "RUNNER_OS" in os.environ def is_git_dir(): """ Determines whether the current file is part of a git repository. If the current file is not part of a git repository, returns None. Returns: (bool): True if current file is part of a git repository. """ return get_git_dir() is not None def get_git_dir(): """ Determines whether the current file is part of a git repository and if so, returns the repository root directory. If the current file is not part of a git repository, returns None. Returns: (Path | None): Git root directory if found or None if not found. """ for d in Path(__file__).parents: if (d / ".git").is_dir(): return d def get_git_origin_url(): """ Retrieves the origin URL of a git repository. Returns: (str | None): The origin URL of the git repository or None if not git directory. """ if is_git_dir(): with contextlib.suppress(subprocess.CalledProcessError): origin = subprocess.check_output(["git", "config", "--get", "remote.origin.url"]) return origin.decode().strip() def get_git_branch(): """ Returns the current git branch name. If not in a git repository, returns None. Returns: (str | None): The current git branch name or None if not a git directory. """ if is_git_dir(): with contextlib.suppress(subprocess.CalledProcessError): origin = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"]) return origin.decode().strip() def get_default_args(func): """ Returns a dictionary of default arguments for a function. Args: func (callable): The function to inspect. Returns: (dict): A dictionary where each key is a parameter name, and each value is the default value of that parameter. """ signature = inspect.signature(func) return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty} def get_ubuntu_version(): """ Retrieve the Ubuntu version if the OS is Ubuntu. Returns: (str): Ubuntu version or None if not an Ubuntu OS. """ if is_ubuntu(): with contextlib.suppress(FileNotFoundError, AttributeError): with open("/etc/os-release") as f: return re.search(r'VERSION_ID="(\d+\.\d+)"', f.read())[1] def get_user_config_dir(sub_dir="Ultralytics"): """ Return the appropriate config directory based on the environment operating system. Args: sub_dir (str): The name of the subdirectory to create. Returns: (Path): The path to the user config directory. """ if WINDOWS: path = Path.home() / "AppData" / "Roaming" / sub_dir elif MACOS: # macOS path = Path.home() / "Library" / "Application Support" / sub_dir elif LINUX: path = Path.home() / ".config" / sub_dir else: raise ValueError(f"Unsupported operating system: {platform.system()}") # GCP and AWS lambda fix, only /tmp is writeable if not is_dir_writeable(path.parent): LOGGER.warning( f"WARNING ⚠️ user config directory '{path}' is not writeable, defaulting to '/tmp' or CWD." "Alternatively you can define a YOLO_CONFIG_DIR environment variable for this path." ) path = Path("/tmp") / sub_dir if is_dir_writeable("/tmp") else Path().cwd() / sub_dir # Create the subdirectory if it does not exist path.mkdir(parents=True, exist_ok=True) return path USER_CONFIG_DIR = Path(os.getenv("YOLO_CONFIG_DIR") or get_user_config_dir()) # Ultralytics settings dir SETTINGS_YAML = USER_CONFIG_DIR / "settings.yaml" def colorstr(*input): """ Colors a string based on the provided color and style arguments. Utilizes ANSI escape codes. See https://en.wikipedia.org/wiki/ANSI_escape_code for more details. This function can be called in two ways: - colorstr('color', 'style', 'your string') - colorstr('your string') In the second form, 'blue' and 'bold' will be applied by default. Args: *input (str): A sequence of strings where the first n-1 strings are color and style arguments, and the last string is the one to be colored. Supported Colors and Styles: Basic Colors: 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white' Bright Colors: 'bright_black', 'bright_red', 'bright_green', 'bright_yellow', 'bright_blue', 'bright_magenta', 'bright_cyan', 'bright_white' Misc: 'end', 'bold', 'underline' Returns: (str): The input string wrapped with ANSI escape codes for the specified color and style. Examples: >>> colorstr('blue', 'bold', 'hello world') >>> '\033[34m\033[1mhello world\033[0m' """ *args, string = input if len(input) > 1 else ("blue", "bold", input[0]) # color arguments, string colors = { "black": "\033[30m", # basic colors "red": "\033[31m", "green": "\033[32m", "yellow": "\033[33m", "blue": "\033[34m", "magenta": "\033[35m", "cyan": "\033[36m", "white": "\033[37m", "bright_black": "\033[90m", # bright colors "bright_red": "\033[91m", "bright_green": "\033[92m", "bright_yellow": "\033[93m", "bright_blue": "\033[94m", "bright_magenta": "\033[95m", "bright_cyan": "\033[96m", "bright_white": "\033[97m", "end": "\033[0m", # misc "bold": "\033[1m", "underline": "\033[4m", } return "".join(colors[x] for x in args) + f"{string}" + colors["end"] def remove_colorstr(input_string): """ Removes ANSI escape codes from a string, effectively un-coloring it. Args: input_string (str): The string to remove color and style from. Returns: (str): A new string with all ANSI escape codes removed. Examples: >>> remove_colorstr(colorstr('blue', 'bold', 'hello world')) >>> 'hello world' """ ansi_escape = re.compile(r"\x1B\[[0-9;]*[A-Za-z]") return ansi_escape.sub("", input_string) class TryExcept(contextlib.ContextDecorator): """ Ultralytics TryExcept class. Use as @TryExcept() decorator or 'with TryExcept():' context manager. Examples: As a decorator: >>> @TryExcept(msg="Error occurred in func", verbose=True) >>> def func(): >>> # Function logic here >>> pass As a context manager: >>> with TryExcept(msg="Error occurred in block", verbose=True): >>> # Code block here >>> pass """ def __init__(self, msg="", verbose=True): """Initialize TryExcept class with optional message and verbosity settings.""" self.msg = msg self.verbose = verbose def __enter__(self): """Executes when entering TryExcept context, initializes instance.""" pass def __exit__(self, exc_type, value, traceback): """Defines behavior when exiting a 'with' block, prints error message if necessary.""" if self.verbose and value: print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}")) return True class Retry(contextlib.ContextDecorator): """ Retry class for function execution with exponential backoff. Can be used as a decorator or a context manager to retry a function or block of code on exceptions, up to a specified number of times with an exponentially increasing delay between retries. Examples: Example usage as a decorator: >>> @Retry(times=3, delay=2) >>> def test_func(): >>> # Replace with function logic that may raise exceptions >>> return True Example usage as a context manager: >>> with Retry(times=3, delay=2): >>> # Replace with code block that may raise exceptions >>> pass """ def __init__(self, times=3, delay=2): """Initialize Retry class with specified number of retries and delay.""" self.times = times self.delay = delay self._attempts = 0 def __call__(self, func): """Decorator implementation for Retry with exponential backoff.""" def wrapped_func(*args, **kwargs): self._attempts = 0 while self._attempts < self.times: try: return func(*args, **kwargs) except Exception as e: self._attempts += 1 print(f"Retry {self._attempts}/{self.times} failed: {e}") if self._attempts >= self.times: raise e time.sleep(self.delay * (2**self._attempts)) # exponential backoff delay return wrapped_func def __enter__(self): """Enter the runtime context related to this object.""" self._attempts = 0 def __exit__(self, exc_type, exc_value, traceback): """Exit the runtime context related to this object with exponential backoff.""" if exc_type is not None: self._attempts += 1 if self._attempts < self.times: print(f"Retry {self._attempts}/{self.times} failed: {exc_value}") time.sleep(self.delay * (2**self._attempts)) # exponential backoff delay return True # Suppresses the exception and retries return False # Re-raises the exception if retries are exhausted def threaded(func): """ Multi-threads a target function by default and returns the thread or function result. Use as @threaded decorator. The function runs in a separate thread unless 'threaded=False' is passed. """ def wrapper(*args, **kwargs): """Multi-threads a given function based on 'threaded' kwarg and returns the thread or function result.""" if kwargs.pop("threaded", True): # run in thread thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) thread.start() return thread else: return func(*args, **kwargs) return wrapper def set_sentry(): """ Initialize the Sentry SDK for error tracking and reporting. Only used if sentry_sdk package is installed and sync=True in settings. Run 'yolo settings' to see and update settings YAML file. Conditions required to send errors (ALL conditions must be met or no errors will be reported): - sentry_sdk package is installed - sync=True in YOLO settings - pytest is not running - running in a pip package installation - running in a non-git directory - running with rank -1 or 0 - online environment - CLI used to run package (checked with 'yolo' as the name of the main CLI command) The function also configures Sentry SDK to ignore KeyboardInterrupt and FileNotFoundError exceptions and to exclude events with 'out of memory' in their exception message. Additionally, the function sets custom tags and user information for Sentry events. """ def before_send(event, hint): """ Modify the event before sending it to Sentry based on specific exception types and messages. Args: event (dict): The event dictionary containing information about the error. hint (dict): A dictionary containing additional information about the error. Returns: dict: The modified event or None if the event should not be sent to Sentry. """ if "exc_info" in hint: exc_type, exc_value, tb = hint["exc_info"] if exc_type in (KeyboardInterrupt, FileNotFoundError) or "out of memory" in str(exc_value): return None # do not send event event["tags"] = { "sys_argv": sys.argv[0], "sys_argv_name": Path(sys.argv[0]).name, "install": "git" if is_git_dir() else "pip" if is_pip_package() else "other", "os": ENVIRONMENT, } return event if ( SETTINGS["sync"] and RANK in (-1, 0) and Path(sys.argv[0]).name == "yolo" and not TESTS_RUNNING and ONLINE and is_pip_package() and not is_git_dir() ): # If sentry_sdk package is not installed then return and do not use Sentry try: import sentry_sdk # noqa except ImportError: return sentry_sdk.init( dsn="https://5ff1556b71594bfea135ff0203a0d290@o4504521589325824.ingest.sentry.io/4504521592406016", debug=False, traces_sample_rate=1.0, release=__version__, environment="production", # 'dev' or 'production' before_send=before_send, ignore_errors=[KeyboardInterrupt, FileNotFoundError], ) sentry_sdk.set_user({"id": SETTINGS["uuid"]}) # SHA-256 anonymized UUID hash class SettingsManager(dict): """ Manages Ultralytics settings stored in a YAML file. Args: file (str | Path): Path to the Ultralytics settings YAML file. Default is USER_CONFIG_DIR / 'settings.yaml'. version (str): Settings version. In case of local version mismatch, new default settings will be saved. """ def __init__(self, file=SETTINGS_YAML, version="0.0.4"): """Initialize the SettingsManager with default settings, load and validate current settings from the YAML file. """ import copy import hashlib from ultralytics.utils.checks import check_version from ultralytics.utils.torch_utils import torch_distributed_zero_first git_dir = get_git_dir() root = git_dir or Path() datasets_root = (root.parent if git_dir and is_dir_writeable(root.parent) else root).resolve() self.file = Path(file) self.version = version self.defaults = { "settings_version": version, "datasets_dir": str(datasets_root / "datasets"), "weights_dir": str(root / "weights"), "runs_dir": str(root / "runs"), "uuid": hashlib.sha256(str(uuid.getnode()).encode()).hexdigest(), "sync": True, "api_key": "", "openai_api_key": "", "clearml": True, # integrations "comet": True, "dvc": True, "hub": True, "mlflow": True, "neptune": True, "raytune": True, "tensorboard": True, "wandb": True, } super().__init__(copy.deepcopy(self.defaults)) with torch_distributed_zero_first(RANK): if not self.file.exists(): self.save() self.load() correct_keys = self.keys() == self.defaults.keys() correct_types = all(type(a) is type(b) for a, b in zip(self.values(), self.defaults.values())) correct_version = check_version(self["settings_version"], self.version) if not (correct_keys and correct_types and correct_version): LOGGER.warning( "WARNING ⚠️ Ultralytics settings reset to default values. This may be due to a possible problem " "with your settings or a recent ultralytics package update. " f"\nView settings with 'yolo settings' or at '{self.file}'" "\nUpdate settings with 'yolo settings key=value', i.e. 'yolo settings runs_dir=path/to/dir'." ) self.reset() def load(self): """Loads settings from the YAML file.""" super().update(yaml_load(self.file)) def save(self): """Saves the current settings to the YAML file.""" yaml_save(self.file, dict(self)) def update(self, *args, **kwargs): """Updates a setting value in the current settings.""" super().update(*args, **kwargs) self.save() def reset(self): """Resets the settings to default and saves them.""" self.clear() self.update(self.defaults) self.save() def deprecation_warn(arg, new_arg, version=None): """Issue a deprecation warning when a deprecated argument is used, suggesting an updated argument.""" if not version: version = float(__version__[:3]) + 0.2 # deprecate after 2nd major release LOGGER.warning( f"WARNING ⚠️ '{arg}' is deprecated and will be removed in 'ultralytics {version}' in the future. " f"Please use '{new_arg}' instead." ) def clean_url(url): """Strip auth from URL, i.e. https://url.com/file.txt?auth -> https://url.com/file.txt.""" url = Path(url).as_posix().replace(":/", "://") # Pathlib turns :// -> :/, as_posix() for Windows return urllib.parse.unquote(url).split("?")[0] # '%2F' to '/', split https://url.com/file.txt?auth def url2file(url): """Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt.""" return Path(clean_url(url)).name # Run below code on utils init ------------------------------------------------------------------------------------ # Check first-install steps PREFIX = colorstr("Ultralytics: ") SETTINGS = SettingsManager() # initialize settings DATASETS_DIR = Path(SETTINGS["datasets_dir"]) # global datasets directory WEIGHTS_DIR = Path(SETTINGS["weights_dir"]) # global weights directory RUNS_DIR = Path(SETTINGS["runs_dir"]) # global runs directory ENVIRONMENT = ( "Colab" if is_colab() else "Kaggle" if is_kaggle() else "Jupyter" if is_jupyter() else "Docker" if is_docker() else platform.system() ) TESTS_RUNNING = is_pytest_running() or is_github_action_running() set_sentry() # Apply monkey patches from .patches import imread, imshow, imwrite, torch_save torch.save = torch_save if WINDOWS: # Apply cv2 patches for non-ASCII and non-UTF characters in image paths cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/__init__.py
Python
unknown
36,882
# Ultralytics YOLO 🚀, AGPL-3.0 license """Functions for estimating the best YOLO batch size to use a fraction of the available CUDA memory in PyTorch.""" from copy import deepcopy import numpy as np import torch from ultralytics.utils import DEFAULT_CFG, LOGGER, colorstr from ultralytics.utils.torch_utils import profile def check_train_batch_size(model, imgsz=640, amp=True): """ Check YOLO training batch size using the autobatch() function. Args: model (torch.nn.Module): YOLO model to check batch size for. imgsz (int): Image size used for training. amp (bool): If True, use automatic mixed precision (AMP) for training. Returns: (int): Optimal batch size computed using the autobatch() function. """ with torch.cuda.amp.autocast(amp): return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size def autobatch(model, imgsz=640, fraction=0.60, batch_size=DEFAULT_CFG.batch): """ Automatically estimate the best YOLO batch size to use a fraction of the available CUDA memory. Args: model (torch.nn.module): YOLO model to compute batch size for. imgsz (int, optional): The image size used as input for the YOLO model. Defaults to 640. fraction (float, optional): The fraction of available CUDA memory to use. Defaults to 0.60. batch_size (int, optional): The default batch size to use if an error is detected. Defaults to 16. Returns: (int): The optimal batch size. """ # Check device prefix = colorstr("AutoBatch: ") LOGGER.info(f"{prefix}Computing optimal batch size for imgsz={imgsz}") device = next(model.parameters()).device # get model device if device.type == "cpu": LOGGER.info(f"{prefix}CUDA not detected, using default CPU batch-size {batch_size}") return batch_size if torch.backends.cudnn.benchmark: LOGGER.info(f"{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}") return batch_size # Inspect CUDA memory gb = 1 << 30 # bytes to GiB (1024 ** 3) d = str(device).upper() # 'CUDA:0' properties = torch.cuda.get_device_properties(device) # device properties t = properties.total_memory / gb # GiB total r = torch.cuda.memory_reserved(device) / gb # GiB reserved a = torch.cuda.memory_allocated(device) / gb # GiB allocated f = t - (r + a) # GiB free LOGGER.info(f"{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free") # Profile batch sizes batch_sizes = [1, 2, 4, 8, 16] try: img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes] results = profile(img, model, n=3, device=device) # Fit a solution y = [x[2] for x in results if x] # memory [2] p = np.polyfit(batch_sizes[: len(y)], y, deg=1) # first degree polynomial fit b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) if None in results: # some sizes failed i = results.index(None) # first fail index if b >= batch_sizes[i]: # y intercept above failure point b = batch_sizes[max(i - 1, 0)] # select prior safe point if b < 1 or b > 1024: # b outside of safe range b = batch_size LOGGER.info(f"{prefix}WARNING ⚠️ CUDA anomaly detected, using default batch-size {batch_size}.") fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted LOGGER.info(f"{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅") return b except Exception as e: LOGGER.warning(f"{prefix}WARNING ⚠️ error detected: {e}, using default batch-size {batch_size}.") return batch_size
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/autobatch.py
Python
unknown
3,863
# Ultralytics YOLO 🚀, AGPL-3.0 license """ Benchmark a YOLO model formats for speed and accuracy. Usage: from ultralytics.utils.benchmarks import ProfileModels, benchmark ProfileModels(['yolov8n.yaml', 'yolov8s.yaml']).profile() benchmark(model='yolov8n.pt', imgsz=160) Format | `format=argument` | Model --- | --- | --- PyTorch | - | yolov8n.pt TorchScript | `torchscript` | yolov8n.torchscript ONNX | `onnx` | yolov8n.onnx OpenVINO | `openvino` | yolov8n_openvino_model/ TensorRT | `engine` | yolov8n.engine CoreML | `coreml` | yolov8n.mlpackage TensorFlow SavedModel | `saved_model` | yolov8n_saved_model/ TensorFlow GraphDef | `pb` | yolov8n.pb TensorFlow Lite | `tflite` | yolov8n.tflite TensorFlow Edge TPU | `edgetpu` | yolov8n_edgetpu.tflite TensorFlow.js | `tfjs` | yolov8n_web_model/ PaddlePaddle | `paddle` | yolov8n_paddle_model/ ncnn | `ncnn` | yolov8n_ncnn_model/ """ import glob import platform import time from pathlib import Path import numpy as np import torch.cuda from ultralytics import YOLO from ultralytics.cfg import TASK2DATA, TASK2METRIC from ultralytics.engine.exporter import export_formats from ultralytics.utils import ASSETS, LINUX, LOGGER, MACOS, TQDM, WEIGHTS_DIR from ultralytics.utils.checks import check_requirements, check_yolo from ultralytics.utils.files import file_size from ultralytics.utils.torch_utils import select_device def benchmark( model=WEIGHTS_DIR / "yolov8n.pt", data=None, imgsz=160, half=False, int8=False, device="cpu", verbose=False ): """ Benchmark a YOLO model across different formats for speed and accuracy. Args: model (str | Path | optional): Path to the model file or directory. Default is Path(SETTINGS['weights_dir']) / 'yolov8n.pt'. data (str, optional): Dataset to evaluate on, inherited from TASK2DATA if not passed. Default is None. imgsz (int, optional): Image size for the benchmark. Default is 160. half (bool, optional): Use half-precision for the model if True. Default is False. int8 (bool, optional): Use int8-precision for the model if True. Default is False. device (str, optional): Device to run the benchmark on, either 'cpu' or 'cuda'. Default is 'cpu'. verbose (bool | float | optional): If True or a float, assert benchmarks pass with given metric. Default is False. Returns: df (pandas.DataFrame): A pandas DataFrame with benchmark results for each format, including file size, metric, and inference time. Example: ```python from ultralytics.utils.benchmarks import benchmark benchmark(model='yolov8n.pt', imgsz=640) ``` """ import pandas as pd pd.options.display.max_columns = 10 pd.options.display.width = 120 device = select_device(device, verbose=False) if isinstance(model, (str, Path)): model = YOLO(model) y = [] t0 = time.time() for i, (name, format, suffix, cpu, gpu) in export_formats().iterrows(): # index, (name, format, suffix, CPU, GPU) emoji, filename = "❌", None # export defaults try: # Checks if i == 9: assert LINUX, "Edge TPU export only supported on Linux" elif i == 7: assert model.task != "obb", "TensorFlow GraphDef not supported for OBB task" elif i in {5, 10}: # CoreML and TF.js assert MACOS or LINUX, "export only supported on macOS and Linux" if "cpu" in device.type: assert cpu, "inference not supported on CPU" if "cuda" in device.type: assert gpu, "inference not supported on GPU" # Export if format == "-": filename = model.ckpt_path or model.cfg exported_model = model # PyTorch format else: filename = model.export(imgsz=imgsz, format=format, half=half, int8=int8, device=device, verbose=False) exported_model = YOLO(filename, task=model.task) assert suffix in str(filename), "export failed" emoji = "❎" # indicates export succeeded # Predict assert model.task != "pose" or i != 7, "GraphDef Pose inference is not supported" assert i not in (9, 10), "inference not supported" # Edge TPU and TF.js are unsupported assert i != 5 or platform.system() == "Darwin", "inference only supported on macOS>=10.13" # CoreML exported_model.predict(ASSETS / "bus.jpg", imgsz=imgsz, device=device, half=half) # Validate data = data or TASK2DATA[model.task] # task to dataset, i.e. coco8.yaml for task=detect key = TASK2METRIC[model.task] # task to metric, i.e. metrics/mAP50-95(B) for task=detect results = exported_model.val( data=data, batch=1, imgsz=imgsz, plots=False, device=device, half=half, int8=int8, verbose=False ) metric, speed = results.results_dict[key], results.speed["inference"] y.append([name, "✅", round(file_size(filename), 1), round(metric, 4), round(speed, 2)]) except Exception as e: if verbose: assert type(e) is AssertionError, f"Benchmark failure for {name}: {e}" LOGGER.warning(f"ERROR ❌️ Benchmark failure for {name}: {e}") y.append([name, emoji, round(file_size(filename), 1), None, None]) # mAP, t_inference # Print results check_yolo(device=device) # print system info df = pd.DataFrame(y, columns=["Format", "Status❔", "Size (MB)", key, "Inference time (ms/im)"]) name = Path(model.ckpt_path).name s = f"\nBenchmarks complete for {name} on {data} at imgsz={imgsz} ({time.time() - t0:.2f}s)\n{df}\n" LOGGER.info(s) with open("benchmarks.log", "a", errors="ignore", encoding="utf-8") as f: f.write(s) if verbose and isinstance(verbose, float): metrics = df[key].array # values to compare to floor floor = verbose # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n assert all(x > floor for x in metrics if pd.notna(x)), f"Benchmark failure: metric(s) < floor {floor}" return df class ProfileModels: """ ProfileModels class for profiling different models on ONNX and TensorRT. This class profiles the performance of different models, provided their paths. The profiling includes parameters such as model speed and FLOPs. Attributes: paths (list): Paths of the models to profile. num_timed_runs (int): Number of timed runs for the profiling. Default is 100. num_warmup_runs (int): Number of warmup runs before profiling. Default is 10. min_time (float): Minimum number of seconds to profile for. Default is 60. imgsz (int): Image size used in the models. Default is 640. Methods: profile(): Profiles the models and prints the result. Example: ```python from ultralytics.utils.benchmarks import ProfileModels ProfileModels(['yolov8n.yaml', 'yolov8s.yaml'], imgsz=640).profile() ``` """ def __init__( self, paths: list, num_timed_runs=100, num_warmup_runs=10, min_time=60, imgsz=640, half=True, trt=True, device=None, ): """ Initialize the ProfileModels class for profiling models. Args: paths (list): List of paths of the models to be profiled. num_timed_runs (int, optional): Number of timed runs for the profiling. Default is 100. num_warmup_runs (int, optional): Number of warmup runs before the actual profiling starts. Default is 10. min_time (float, optional): Minimum time in seconds for profiling a model. Default is 60. imgsz (int, optional): Size of the image used during profiling. Default is 640. half (bool, optional): Flag to indicate whether to use half-precision floating point for profiling. Default is True. trt (bool, optional): Flag to indicate whether to profile using TensorRT. Default is True. device (torch.device, optional): Device used for profiling. If None, it is determined automatically. Default is None. """ self.paths = paths self.num_timed_runs = num_timed_runs self.num_warmup_runs = num_warmup_runs self.min_time = min_time self.imgsz = imgsz self.half = half self.trt = trt # run TensorRT profiling self.device = device or torch.device(0 if torch.cuda.is_available() else "cpu") def profile(self): """Logs the benchmarking results of a model, checks metrics against floor and returns the results.""" files = self.get_files() if not files: print("No matching *.pt or *.onnx files found.") return table_rows = [] output = [] for file in files: engine_file = file.with_suffix(".engine") if file.suffix in (".pt", ".yaml", ".yml"): model = YOLO(str(file)) model.fuse() # to report correct params and GFLOPs in model.info() model_info = model.info() if self.trt and self.device.type != "cpu" and not engine_file.is_file(): engine_file = model.export( format="engine", half=self.half, imgsz=self.imgsz, device=self.device, verbose=False ) onnx_file = model.export( format="onnx", half=self.half, imgsz=self.imgsz, simplify=True, device=self.device, verbose=False ) elif file.suffix == ".onnx": model_info = self.get_onnx_model_info(file) onnx_file = file else: continue t_engine = self.profile_tensorrt_model(str(engine_file)) t_onnx = self.profile_onnx_model(str(onnx_file)) table_rows.append(self.generate_table_row(file.stem, t_onnx, t_engine, model_info)) output.append(self.generate_results_dict(file.stem, t_onnx, t_engine, model_info)) self.print_table(table_rows) return output def get_files(self): """Returns a list of paths for all relevant model files given by the user.""" files = [] for path in self.paths: path = Path(path) if path.is_dir(): extensions = ["*.pt", "*.onnx", "*.yaml"] files.extend([file for ext in extensions for file in glob.glob(str(path / ext))]) elif path.suffix in {".pt", ".yaml", ".yml"}: # add non-existing files.append(str(path)) else: files.extend(glob.glob(str(path))) print(f"Profiling: {sorted(files)}") return [Path(file) for file in sorted(files)] def get_onnx_model_info(self, onnx_file: str): """Retrieves the information including number of layers, parameters, gradients and FLOPs for an ONNX model file. """ return 0.0, 0.0, 0.0, 0.0 # return (num_layers, num_params, num_gradients, num_flops) def iterative_sigma_clipping(self, data, sigma=2, max_iters=3): """Applies an iterative sigma clipping algorithm to the given data times number of iterations.""" data = np.array(data) for _ in range(max_iters): mean, std = np.mean(data), np.std(data) clipped_data = data[(data > mean - sigma * std) & (data < mean + sigma * std)] if len(clipped_data) == len(data): break data = clipped_data return data def profile_tensorrt_model(self, engine_file: str, eps: float = 1e-3): """Profiles the TensorRT model, measuring average run time and standard deviation among runs.""" if not self.trt or not Path(engine_file).is_file(): return 0.0, 0.0 # Model and input model = YOLO(engine_file) input_data = np.random.rand(self.imgsz, self.imgsz, 3).astype(np.float32) # must be FP32 # Warmup runs elapsed = 0.0 for _ in range(3): start_time = time.time() for _ in range(self.num_warmup_runs): model(input_data, imgsz=self.imgsz, verbose=False) elapsed = time.time() - start_time # Compute number of runs as higher of min_time or num_timed_runs num_runs = max(round(self.min_time / (elapsed + eps) * self.num_warmup_runs), self.num_timed_runs * 50) # Timed runs run_times = [] for _ in TQDM(range(num_runs), desc=engine_file): results = model(input_data, imgsz=self.imgsz, verbose=False) run_times.append(results[0].speed["inference"]) # Convert to milliseconds run_times = self.iterative_sigma_clipping(np.array(run_times), sigma=2, max_iters=3) # sigma clipping return np.mean(run_times), np.std(run_times) def profile_onnx_model(self, onnx_file: str, eps: float = 1e-3): """Profiles an ONNX model by executing it multiple times and returns the mean and standard deviation of run times. """ check_requirements("onnxruntime") import onnxruntime as ort # Session with either 'TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider' sess_options = ort.SessionOptions() sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL sess_options.intra_op_num_threads = 8 # Limit the number of threads sess = ort.InferenceSession(onnx_file, sess_options, providers=["CPUExecutionProvider"]) input_tensor = sess.get_inputs()[0] input_type = input_tensor.type # Mapping ONNX datatype to numpy datatype if "float16" in input_type: input_dtype = np.float16 elif "float" in input_type: input_dtype = np.float32 elif "double" in input_type: input_dtype = np.float64 elif "int64" in input_type: input_dtype = np.int64 elif "int32" in input_type: input_dtype = np.int32 else: raise ValueError(f"Unsupported ONNX datatype {input_type}") input_data = np.random.rand(*input_tensor.shape).astype(input_dtype) input_name = input_tensor.name output_name = sess.get_outputs()[0].name # Warmup runs elapsed = 0.0 for _ in range(3): start_time = time.time() for _ in range(self.num_warmup_runs): sess.run([output_name], {input_name: input_data}) elapsed = time.time() - start_time # Compute number of runs as higher of min_time or num_timed_runs num_runs = max(round(self.min_time / (elapsed + eps) * self.num_warmup_runs), self.num_timed_runs) # Timed runs run_times = [] for _ in TQDM(range(num_runs), desc=onnx_file): start_time = time.time() sess.run([output_name], {input_name: input_data}) run_times.append((time.time() - start_time) * 1000) # Convert to milliseconds run_times = self.iterative_sigma_clipping(np.array(run_times), sigma=2, max_iters=5) # sigma clipping return np.mean(run_times), np.std(run_times) def generate_table_row(self, model_name, t_onnx, t_engine, model_info): """Generates a formatted string for a table row that includes model performance and metric details.""" layers, params, gradients, flops = model_info return f"| {model_name:18s} | {self.imgsz} | - | {t_onnx[0]:.2f} ± {t_onnx[1]:.2f} ms | {t_engine[0]:.2f} ± {t_engine[1]:.2f} ms | {params / 1e6:.1f} | {flops:.1f} |" def generate_results_dict(self, model_name, t_onnx, t_engine, model_info): """Generates a dictionary of model details including name, parameters, GFLOPS and speed metrics.""" layers, params, gradients, flops = model_info return { "model/name": model_name, "model/parameters": params, "model/GFLOPs": round(flops, 3), "model/speed_ONNX(ms)": round(t_onnx[0], 3), "model/speed_TensorRT(ms)": round(t_engine[0], 3), } def print_table(self, table_rows): """Formats and prints a comparison table for different models with given statistics and performance data.""" gpu = torch.cuda.get_device_name(0) if torch.cuda.is_available() else "GPU" header = f"| Model | size<br><sup>(pixels) | mAP<sup>val<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>{gpu} TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |" separator = "|-------------|---------------------|--------------------|------------------------------|-----------------------------------|------------------|-----------------|" print(f"\n\n{header}") print(separator) for row in table_rows: print(row)
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/benchmarks.py
Python
unknown
17,556
# Ultralytics YOLO 🚀, AGPL-3.0 license from .base import add_integration_callbacks, default_callbacks, get_default_callbacks __all__ = "add_integration_callbacks", "default_callbacks", "get_default_callbacks"
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/callbacks/__init__.py
Python
unknown
214
# Ultralytics YOLO 🚀, AGPL-3.0 license """Base callbacks.""" from collections import defaultdict from copy import deepcopy # Trainer callbacks ---------------------------------------------------------------------------------------------------- def on_pretrain_routine_start(trainer): """Called before the pretraining routine starts.""" pass def on_pretrain_routine_end(trainer): """Called after the pretraining routine ends.""" pass def on_train_start(trainer): """Called when the training starts.""" pass def on_train_epoch_start(trainer): """Called at the start of each training epoch.""" pass def on_train_batch_start(trainer): """Called at the start of each training batch.""" pass def optimizer_step(trainer): """Called when the optimizer takes a step.""" pass def on_before_zero_grad(trainer): """Called before the gradients are set to zero.""" pass def on_train_batch_end(trainer): """Called at the end of each training batch.""" pass def on_train_epoch_end(trainer): """Called at the end of each training epoch.""" pass def on_fit_epoch_end(trainer): """Called at the end of each fit epoch (train + val).""" pass def on_model_save(trainer): """Called when the model is saved.""" pass def on_train_end(trainer): """Called when the training ends.""" pass def on_params_update(trainer): """Called when the model parameters are updated.""" pass def teardown(trainer): """Called during the teardown of the training process.""" pass # Validator callbacks -------------------------------------------------------------------------------------------------- def on_val_start(validator): """Called when the validation starts.""" pass def on_val_batch_start(validator): """Called at the start of each validation batch.""" pass def on_val_batch_end(validator): """Called at the end of each validation batch.""" pass def on_val_end(validator): """Called when the validation ends.""" pass # Predictor callbacks -------------------------------------------------------------------------------------------------- def on_predict_start(predictor): """Called when the prediction starts.""" pass def on_predict_batch_start(predictor): """Called at the start of each prediction batch.""" pass def on_predict_batch_end(predictor): """Called at the end of each prediction batch.""" pass def on_predict_postprocess_end(predictor): """Called after the post-processing of the prediction ends.""" pass def on_predict_end(predictor): """Called when the prediction ends.""" pass # Exporter callbacks --------------------------------------------------------------------------------------------------- def on_export_start(exporter): """Called when the model export starts.""" pass def on_export_end(exporter): """Called when the model export ends.""" pass default_callbacks = { # Run in trainer "on_pretrain_routine_start": [on_pretrain_routine_start], "on_pretrain_routine_end": [on_pretrain_routine_end], "on_train_start": [on_train_start], "on_train_epoch_start": [on_train_epoch_start], "on_train_batch_start": [on_train_batch_start], "optimizer_step": [optimizer_step], "on_before_zero_grad": [on_before_zero_grad], "on_train_batch_end": [on_train_batch_end], "on_train_epoch_end": [on_train_epoch_end], "on_fit_epoch_end": [on_fit_epoch_end], # fit = train + val "on_model_save": [on_model_save], "on_train_end": [on_train_end], "on_params_update": [on_params_update], "teardown": [teardown], # Run in validator "on_val_start": [on_val_start], "on_val_batch_start": [on_val_batch_start], "on_val_batch_end": [on_val_batch_end], "on_val_end": [on_val_end], # Run in predictor "on_predict_start": [on_predict_start], "on_predict_batch_start": [on_predict_batch_start], "on_predict_postprocess_end": [on_predict_postprocess_end], "on_predict_batch_end": [on_predict_batch_end], "on_predict_end": [on_predict_end], # Run in exporter "on_export_start": [on_export_start], "on_export_end": [on_export_end], } def get_default_callbacks(): """ Return a copy of the default_callbacks dictionary with lists as default values. Returns: (defaultdict): A defaultdict with keys from default_callbacks and empty lists as default values. """ return defaultdict(list, deepcopy(default_callbacks)) def add_integration_callbacks(instance): """ Add integration callbacks from various sources to the instance's callbacks. Args: instance (Trainer, Predictor, Validator, Exporter): An object with a 'callbacks' attribute that is a dictionary of callback lists. """ # Load HUB callbacks from .hub import callbacks as hub_cb callbacks_list = [hub_cb] # Load training callbacks if "Trainer" in instance.__class__.__name__: from .clearml import callbacks as clear_cb from .comet import callbacks as comet_cb from .dvc import callbacks as dvc_cb from .mlflow import callbacks as mlflow_cb from .neptune import callbacks as neptune_cb from .raytune import callbacks as tune_cb from .tensorboard import callbacks as tb_cb from .wb import callbacks as wb_cb callbacks_list.extend([clear_cb, comet_cb, dvc_cb, mlflow_cb, neptune_cb, tune_cb, tb_cb, wb_cb]) # Add the callbacks to the callbacks dictionary for callbacks in callbacks_list: for k, v in callbacks.items(): if v not in instance.callbacks[k]: instance.callbacks[k].append(v)
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/callbacks/base.py
Python
unknown
5,777
# Ultralytics YOLO 🚀, AGPL-3.0 license from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING try: assert not TESTS_RUNNING # do not log pytest assert SETTINGS["clearml"] is True # verify integration is enabled import clearml from clearml import Task from clearml.binding.frameworks.pytorch_bind import PatchPyTorchModelIO from clearml.binding.matplotlib_bind import PatchedMatplotlib assert hasattr(clearml, "__version__") # verify package is not directory except (ImportError, AssertionError): clearml = None def _log_debug_samples(files, title="Debug Samples") -> None: """ Log files (images) as debug samples in the ClearML task. Args: files (list): A list of file paths in PosixPath format. title (str): A title that groups together images with the same values. """ import re if task := Task.current_task(): for f in files: if f.exists(): it = re.search(r"_batch(\d+)", f.name) iteration = int(it.groups()[0]) if it else 0 task.get_logger().report_image( title=title, series=f.name.replace(it.group(), ""), local_path=str(f), iteration=iteration ) def _log_plot(title, plot_path) -> None: """ Log an image as a plot in the plot section of ClearML. Args: title (str): The title of the plot. plot_path (str): The path to the saved image file. """ import matplotlib.image as mpimg import matplotlib.pyplot as plt img = mpimg.imread(plot_path) fig = plt.figure() ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect="auto", xticks=[], yticks=[]) # no ticks ax.imshow(img) Task.current_task().get_logger().report_matplotlib_figure( title=title, series="", figure=fig, report_interactive=False ) def on_pretrain_routine_start(trainer): """Runs at start of pretraining routine; initializes and connects/ logs task to ClearML.""" try: if task := Task.current_task(): # Make sure the automatic pytorch and matplotlib bindings are disabled! # We are logging these plots and model files manually in the integration PatchPyTorchModelIO.update_current_task(None) PatchedMatplotlib.update_current_task(None) else: task = Task.init( project_name=trainer.args.project or "YOLOv8", task_name=trainer.args.name, tags=["YOLOv8"], output_uri=True, reuse_last_task_id=False, auto_connect_frameworks={"pytorch": False, "matplotlib": False}, ) LOGGER.warning( "ClearML Initialized a new task. If you want to run remotely, " "please add clearml-init and connect your arguments before initializing YOLO." ) task.connect(vars(trainer.args), name="General") except Exception as e: LOGGER.warning(f"WARNING ⚠️ ClearML installed but not initialized correctly, not logging this run. {e}") def on_train_epoch_end(trainer): """Logs debug samples for the first epoch of YOLO training and report current training progress.""" if task := Task.current_task(): # Log debug samples if trainer.epoch == 1: _log_debug_samples(sorted(trainer.save_dir.glob("train_batch*.jpg")), "Mosaic") # Report the current training progress for k, v in trainer.label_loss_items(trainer.tloss, prefix="train").items(): task.get_logger().report_scalar("train", k, v, iteration=trainer.epoch) for k, v in trainer.lr.items(): task.get_logger().report_scalar("lr", k, v, iteration=trainer.epoch) def on_fit_epoch_end(trainer): """Reports model information to logger at the end of an epoch.""" if task := Task.current_task(): # You should have access to the validation bboxes under jdict task.get_logger().report_scalar( title="Epoch Time", series="Epoch Time", value=trainer.epoch_time, iteration=trainer.epoch ) for k, v in trainer.metrics.items(): task.get_logger().report_scalar("val", k, v, iteration=trainer.epoch) if trainer.epoch == 0: from ultralytics.utils.torch_utils import model_info_for_loggers for k, v in model_info_for_loggers(trainer).items(): task.get_logger().report_single_value(k, v) def on_val_end(validator): """Logs validation results including labels and predictions.""" if Task.current_task(): # Log val_labels and val_pred _log_debug_samples(sorted(validator.save_dir.glob("val*.jpg")), "Validation") def on_train_end(trainer): """Logs final model and its name on training completion.""" if task := Task.current_task(): # Log final results, CM matrix + PR plots files = [ "results.png", "confusion_matrix.png", "confusion_matrix_normalized.png", *(f"{x}_curve.png" for x in ("F1", "PR", "P", "R")), ] files = [(trainer.save_dir / f) for f in files if (trainer.save_dir / f).exists()] # filter for f in files: _log_plot(title=f.stem, plot_path=f) # Report final metrics for k, v in trainer.validator.metrics.results_dict.items(): task.get_logger().report_single_value(k, v) # Log the final model task.update_output_model(model_path=str(trainer.best), model_name=trainer.args.name, auto_delete_file=False) callbacks = ( { "on_pretrain_routine_start": on_pretrain_routine_start, "on_train_epoch_end": on_train_epoch_end, "on_fit_epoch_end": on_fit_epoch_end, "on_val_end": on_val_end, "on_train_end": on_train_end, } if clearml else {} )
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/callbacks/clearml.py
Python
unknown
5,897
# Ultralytics YOLO 🚀, AGPL-3.0 license from ultralytics.utils import LOGGER, RANK, SETTINGS, TESTS_RUNNING, ops try: assert not TESTS_RUNNING # do not log pytest assert SETTINGS["comet"] is True # verify integration is enabled import comet_ml assert hasattr(comet_ml, "__version__") # verify package is not directory import os from pathlib import Path # Ensures certain logging functions only run for supported tasks COMET_SUPPORTED_TASKS = ["detect"] # Names of plots created by YOLOv8 that are logged to Comet EVALUATION_PLOT_NAMES = "F1_curve", "P_curve", "R_curve", "PR_curve", "confusion_matrix" LABEL_PLOT_NAMES = "labels", "labels_correlogram" _comet_image_prediction_count = 0 except (ImportError, AssertionError): comet_ml = None def _get_comet_mode(): """Returns the mode of comet set in the environment variables, defaults to 'online' if not set.""" return os.getenv("COMET_MODE", "online") def _get_comet_model_name(): """Returns the model name for Comet from the environment variable 'COMET_MODEL_NAME' or defaults to 'YOLOv8'.""" return os.getenv("COMET_MODEL_NAME", "YOLOv8") def _get_eval_batch_logging_interval(): """Get the evaluation batch logging interval from environment variable or use default value 1.""" return int(os.getenv("COMET_EVAL_BATCH_LOGGING_INTERVAL", 1)) def _get_max_image_predictions_to_log(): """Get the maximum number of image predictions to log from the environment variables.""" return int(os.getenv("COMET_MAX_IMAGE_PREDICTIONS", 100)) def _scale_confidence_score(score): """Scales the given confidence score by a factor specified in an environment variable.""" scale = float(os.getenv("COMET_MAX_CONFIDENCE_SCORE", 100.0)) return score * scale def _should_log_confusion_matrix(): """Determines if the confusion matrix should be logged based on the environment variable settings.""" return os.getenv("COMET_EVAL_LOG_CONFUSION_MATRIX", "false").lower() == "true" def _should_log_image_predictions(): """Determines whether to log image predictions based on a specified environment variable.""" return os.getenv("COMET_EVAL_LOG_IMAGE_PREDICTIONS", "true").lower() == "true" def _get_experiment_type(mode, project_name): """Return an experiment based on mode and project name.""" if mode == "offline": return comet_ml.OfflineExperiment(project_name=project_name) return comet_ml.Experiment(project_name=project_name) def _create_experiment(args): """Ensures that the experiment object is only created in a single process during distributed training.""" if RANK not in (-1, 0): return try: comet_mode = _get_comet_mode() _project_name = os.getenv("COMET_PROJECT_NAME", args.project) experiment = _get_experiment_type(comet_mode, _project_name) experiment.log_parameters(vars(args)) experiment.log_others( { "eval_batch_logging_interval": _get_eval_batch_logging_interval(), "log_confusion_matrix_on_eval": _should_log_confusion_matrix(), "log_image_predictions": _should_log_image_predictions(), "max_image_predictions": _get_max_image_predictions_to_log(), } ) experiment.log_other("Created from", "yolov8") except Exception as e: LOGGER.warning(f"WARNING ⚠️ Comet installed but not initialized correctly, not logging this run. {e}") def _fetch_trainer_metadata(trainer): """Returns metadata for YOLO training including epoch and asset saving status.""" curr_epoch = trainer.epoch + 1 train_num_steps_per_epoch = len(trainer.train_loader.dataset) // trainer.batch_size curr_step = curr_epoch * train_num_steps_per_epoch final_epoch = curr_epoch == trainer.epochs save = trainer.args.save save_period = trainer.args.save_period save_interval = curr_epoch % save_period == 0 save_assets = save and save_period > 0 and save_interval and not final_epoch return dict(curr_epoch=curr_epoch, curr_step=curr_step, save_assets=save_assets, final_epoch=final_epoch) def _scale_bounding_box_to_original_image_shape(box, resized_image_shape, original_image_shape, ratio_pad): """ YOLOv8 resizes images during training and the label values are normalized based on this resized shape. This function rescales the bounding box labels to the original image shape. """ resized_image_height, resized_image_width = resized_image_shape # Convert normalized xywh format predictions to xyxy in resized scale format box = ops.xywhn2xyxy(box, h=resized_image_height, w=resized_image_width) # Scale box predictions from resized image scale back to original image scale box = ops.scale_boxes(resized_image_shape, box, original_image_shape, ratio_pad) # Convert bounding box format from xyxy to xywh for Comet logging box = ops.xyxy2xywh(box) # Adjust xy center to correspond top-left corner box[:2] -= box[2:] / 2 box = box.tolist() return box def _format_ground_truth_annotations_for_detection(img_idx, image_path, batch, class_name_map=None): """Format ground truth annotations for detection.""" indices = batch["batch_idx"] == img_idx bboxes = batch["bboxes"][indices] if len(bboxes) == 0: LOGGER.debug(f"COMET WARNING: Image: {image_path} has no bounding boxes labels") return None cls_labels = batch["cls"][indices].squeeze(1).tolist() if class_name_map: cls_labels = [str(class_name_map[label]) for label in cls_labels] original_image_shape = batch["ori_shape"][img_idx] resized_image_shape = batch["resized_shape"][img_idx] ratio_pad = batch["ratio_pad"][img_idx] data = [] for box, label in zip(bboxes, cls_labels): box = _scale_bounding_box_to_original_image_shape(box, resized_image_shape, original_image_shape, ratio_pad) data.append( { "boxes": [box], "label": f"gt_{label}", "score": _scale_confidence_score(1.0), } ) return {"name": "ground_truth", "data": data} def _format_prediction_annotations_for_detection(image_path, metadata, class_label_map=None): """Format YOLO predictions for object detection visualization.""" stem = image_path.stem image_id = int(stem) if stem.isnumeric() else stem predictions = metadata.get(image_id) if not predictions: LOGGER.debug(f"COMET WARNING: Image: {image_path} has no bounding boxes predictions") return None data = [] for prediction in predictions: boxes = prediction["bbox"] score = _scale_confidence_score(prediction["score"]) cls_label = prediction["category_id"] if class_label_map: cls_label = str(class_label_map[cls_label]) data.append({"boxes": [boxes], "label": cls_label, "score": score}) return {"name": "prediction", "data": data} def _fetch_annotations(img_idx, image_path, batch, prediction_metadata_map, class_label_map): """Join the ground truth and prediction annotations if they exist.""" ground_truth_annotations = _format_ground_truth_annotations_for_detection( img_idx, image_path, batch, class_label_map ) prediction_annotations = _format_prediction_annotations_for_detection( image_path, prediction_metadata_map, class_label_map ) annotations = [ annotation for annotation in [ground_truth_annotations, prediction_annotations] if annotation is not None ] return [annotations] if annotations else None def _create_prediction_metadata_map(model_predictions): """Create metadata map for model predictions by groupings them based on image ID.""" pred_metadata_map = {} for prediction in model_predictions: pred_metadata_map.setdefault(prediction["image_id"], []) pred_metadata_map[prediction["image_id"]].append(prediction) return pred_metadata_map def _log_confusion_matrix(experiment, trainer, curr_step, curr_epoch): """Log the confusion matrix to Comet experiment.""" conf_mat = trainer.validator.confusion_matrix.matrix names = list(trainer.data["names"].values()) + ["background"] experiment.log_confusion_matrix( matrix=conf_mat, labels=names, max_categories=len(names), epoch=curr_epoch, step=curr_step ) def _log_images(experiment, image_paths, curr_step, annotations=None): """Logs images to the experiment with optional annotations.""" if annotations: for image_path, annotation in zip(image_paths, annotations): experiment.log_image(image_path, name=image_path.stem, step=curr_step, annotations=annotation) else: for image_path in image_paths: experiment.log_image(image_path, name=image_path.stem, step=curr_step) def _log_image_predictions(experiment, validator, curr_step): """Logs predicted boxes for a single image during training.""" global _comet_image_prediction_count task = validator.args.task if task not in COMET_SUPPORTED_TASKS: return jdict = validator.jdict if not jdict: return predictions_metadata_map = _create_prediction_metadata_map(jdict) dataloader = validator.dataloader class_label_map = validator.names batch_logging_interval = _get_eval_batch_logging_interval() max_image_predictions = _get_max_image_predictions_to_log() for batch_idx, batch in enumerate(dataloader): if (batch_idx + 1) % batch_logging_interval != 0: continue image_paths = batch["im_file"] for img_idx, image_path in enumerate(image_paths): if _comet_image_prediction_count >= max_image_predictions: return image_path = Path(image_path) annotations = _fetch_annotations( img_idx, image_path, batch, predictions_metadata_map, class_label_map, ) _log_images( experiment, [image_path], curr_step, annotations=annotations, ) _comet_image_prediction_count += 1 def _log_plots(experiment, trainer): """Logs evaluation plots and label plots for the experiment.""" plot_filenames = [trainer.save_dir / f"{plots}.png" for plots in EVALUATION_PLOT_NAMES] _log_images(experiment, plot_filenames, None) label_plot_filenames = [trainer.save_dir / f"{labels}.jpg" for labels in LABEL_PLOT_NAMES] _log_images(experiment, label_plot_filenames, None) def _log_model(experiment, trainer): """Log the best-trained model to Comet.ml.""" model_name = _get_comet_model_name() experiment.log_model(model_name, file_or_folder=str(trainer.best), file_name="best.pt", overwrite=True) def on_pretrain_routine_start(trainer): """Creates or resumes a CometML experiment at the start of a YOLO pre-training routine.""" experiment = comet_ml.get_global_experiment() is_alive = getattr(experiment, "alive", False) if not experiment or not is_alive: _create_experiment(trainer.args) def on_train_epoch_end(trainer): """Log metrics and save batch images at the end of training epochs.""" experiment = comet_ml.get_global_experiment() if not experiment: return metadata = _fetch_trainer_metadata(trainer) curr_epoch = metadata["curr_epoch"] curr_step = metadata["curr_step"] experiment.log_metrics(trainer.label_loss_items(trainer.tloss, prefix="train"), step=curr_step, epoch=curr_epoch) if curr_epoch == 1: _log_images(experiment, trainer.save_dir.glob("train_batch*.jpg"), curr_step) def on_fit_epoch_end(trainer): """Logs model assets at the end of each epoch.""" experiment = comet_ml.get_global_experiment() if not experiment: return metadata = _fetch_trainer_metadata(trainer) curr_epoch = metadata["curr_epoch"] curr_step = metadata["curr_step"] save_assets = metadata["save_assets"] experiment.log_metrics(trainer.metrics, step=curr_step, epoch=curr_epoch) experiment.log_metrics(trainer.lr, step=curr_step, epoch=curr_epoch) if curr_epoch == 1: from ultralytics.utils.torch_utils import model_info_for_loggers experiment.log_metrics(model_info_for_loggers(trainer), step=curr_step, epoch=curr_epoch) if not save_assets: return _log_model(experiment, trainer) if _should_log_confusion_matrix(): _log_confusion_matrix(experiment, trainer, curr_step, curr_epoch) if _should_log_image_predictions(): _log_image_predictions(experiment, trainer.validator, curr_step) def on_train_end(trainer): """Perform operations at the end of training.""" experiment = comet_ml.get_global_experiment() if not experiment: return metadata = _fetch_trainer_metadata(trainer) curr_epoch = metadata["curr_epoch"] curr_step = metadata["curr_step"] plots = trainer.args.plots _log_model(experiment, trainer) if plots: _log_plots(experiment, trainer) _log_confusion_matrix(experiment, trainer, curr_step, curr_epoch) _log_image_predictions(experiment, trainer.validator, curr_step) experiment.end() global _comet_image_prediction_count _comet_image_prediction_count = 0 callbacks = ( { "on_pretrain_routine_start": on_pretrain_routine_start, "on_train_epoch_end": on_train_epoch_end, "on_fit_epoch_end": on_fit_epoch_end, "on_train_end": on_train_end, } if comet_ml else {} )
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/callbacks/comet.py
Python
unknown
13,744
# Ultralytics YOLO 🚀, AGPL-3.0 license from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING, checks try: assert not TESTS_RUNNING # do not log pytest assert SETTINGS["dvc"] is True # verify integration is enabled import dvclive assert checks.check_version("dvclive", "2.11.0", verbose=True) import os import re from pathlib import Path # DVCLive logger instance live = None _processed_plots = {} # `on_fit_epoch_end` is called on final validation (probably need to be fixed) for now this is the way we # distinguish final evaluation of the best model vs last epoch validation _training_epoch = False except (ImportError, AssertionError, TypeError): dvclive = None def _log_images(path, prefix=""): """Logs images at specified path with an optional prefix using DVCLive.""" if live: name = path.name # Group images by batch to enable sliders in UI if m := re.search(r"_batch(\d+)", name): ni = m[1] new_stem = re.sub(r"_batch(\d+)", "_batch", path.stem) name = (Path(new_stem) / ni).with_suffix(path.suffix) live.log_image(os.path.join(prefix, name), path) def _log_plots(plots, prefix=""): """Logs plot images for training progress if they have not been previously processed.""" for name, params in plots.items(): timestamp = params["timestamp"] if _processed_plots.get(name) != timestamp: _log_images(name, prefix) _processed_plots[name] = timestamp def _log_confusion_matrix(validator): """Logs the confusion matrix for the given validator using DVCLive.""" targets = [] preds = [] matrix = validator.confusion_matrix.matrix names = list(validator.names.values()) if validator.confusion_matrix.task == "detect": names += ["background"] for ti, pred in enumerate(matrix.T.astype(int)): for pi, num in enumerate(pred): targets.extend([names[ti]] * num) preds.extend([names[pi]] * num) live.log_sklearn_plot("confusion_matrix", targets, preds, name="cf.json", normalized=True) def on_pretrain_routine_start(trainer): """Initializes DVCLive logger for training metadata during pre-training routine.""" try: global live live = dvclive.Live(save_dvc_exp=True, cache_images=True) LOGGER.info("DVCLive is detected and auto logging is enabled (run 'yolo settings dvc=False' to disable).") except Exception as e: LOGGER.warning(f"WARNING ⚠️ DVCLive installed but not initialized correctly, not logging this run. {e}") def on_pretrain_routine_end(trainer): """Logs plots related to the training process at the end of the pretraining routine.""" _log_plots(trainer.plots, "train") def on_train_start(trainer): """Logs the training parameters if DVCLive logging is active.""" if live: live.log_params(trainer.args) def on_train_epoch_start(trainer): """Sets the global variable _training_epoch value to True at the start of training each epoch.""" global _training_epoch _training_epoch = True def on_fit_epoch_end(trainer): """Logs training metrics and model info, and advances to next step on the end of each fit epoch.""" global _training_epoch if live and _training_epoch: all_metrics = {**trainer.label_loss_items(trainer.tloss, prefix="train"), **trainer.metrics, **trainer.lr} for metric, value in all_metrics.items(): live.log_metric(metric, value) if trainer.epoch == 0: from ultralytics.utils.torch_utils import model_info_for_loggers for metric, value in model_info_for_loggers(trainer).items(): live.log_metric(metric, value, plot=False) _log_plots(trainer.plots, "train") _log_plots(trainer.validator.plots, "val") live.next_step() _training_epoch = False def on_train_end(trainer): """Logs the best metrics, plots, and confusion matrix at the end of training if DVCLive is active.""" if live: # At the end log the best metrics. It runs validator on the best model internally. all_metrics = {**trainer.label_loss_items(trainer.tloss, prefix="train"), **trainer.metrics, **trainer.lr} for metric, value in all_metrics.items(): live.log_metric(metric, value, plot=False) _log_plots(trainer.plots, "val") _log_plots(trainer.validator.plots, "val") _log_confusion_matrix(trainer.validator) if trainer.best.exists(): live.log_artifact(trainer.best, copy=True, type="model") live.end() callbacks = ( { "on_pretrain_routine_start": on_pretrain_routine_start, "on_pretrain_routine_end": on_pretrain_routine_end, "on_train_start": on_train_start, "on_train_epoch_start": on_train_epoch_start, "on_fit_epoch_end": on_fit_epoch_end, "on_train_end": on_train_end, } if dvclive else {} )
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/callbacks/dvc.py
Python
unknown
5,045
# Ultralytics YOLO 🚀, AGPL-3.0 license import json from time import time from ultralytics.hub.utils import HUB_WEB_ROOT, PREFIX, events from ultralytics.utils import LOGGER, SETTINGS def on_pretrain_routine_end(trainer): """Logs info before starting timer for upload rate limit.""" session = getattr(trainer, "hub_session", None) if session: # Start timer for upload rate limit session.timers = { "metrics": time(), "ckpt": time(), } # start timer on session.rate_limit def on_fit_epoch_end(trainer): """Uploads training progress metrics at the end of each epoch.""" session = getattr(trainer, "hub_session", None) if session: # Upload metrics after val end all_plots = { **trainer.label_loss_items(trainer.tloss, prefix="train"), **trainer.metrics, } if trainer.epoch == 0: from ultralytics.utils.torch_utils import model_info_for_loggers all_plots = {**all_plots, **model_info_for_loggers(trainer)} session.metrics_queue[trainer.epoch] = json.dumps(all_plots) if time() - session.timers["metrics"] > session.rate_limits["metrics"]: session.upload_metrics() session.timers["metrics"] = time() # reset timer session.metrics_queue = {} # reset queue def on_model_save(trainer): """Saves checkpoints to Ultralytics HUB with rate limiting.""" session = getattr(trainer, "hub_session", None) if session: # Upload checkpoints with rate limiting is_best = trainer.best_fitness == trainer.fitness if time() - session.timers["ckpt"] > session.rate_limits["ckpt"]: LOGGER.info(f"{PREFIX}Uploading checkpoint {HUB_WEB_ROOT}/models/{session.model_id}") session.upload_model(trainer.epoch, trainer.last, is_best) session.timers["ckpt"] = time() # reset timer def on_train_end(trainer): """Upload final model and metrics to Ultralytics HUB at the end of training.""" session = getattr(trainer, "hub_session", None) if session: # Upload final model and metrics with exponential standoff LOGGER.info(f"{PREFIX}Syncing final model...") session.upload_model( trainer.epoch, trainer.best, map=trainer.metrics.get("metrics/mAP50-95(B)", 0), final=True, ) session.alive = False # stop heartbeats LOGGER.info(f"{PREFIX}Done ✅\n" f"{PREFIX}View model at {session.model_url} 🚀") def on_train_start(trainer): """Run events on train start.""" events(trainer.args) def on_val_start(validator): """Runs events on validation start.""" events(validator.args) def on_predict_start(predictor): """Run events on predict start.""" events(predictor.args) def on_export_start(exporter): """Run events on export start.""" events(exporter.args) callbacks = ( { "on_pretrain_routine_end": on_pretrain_routine_end, "on_fit_epoch_end": on_fit_epoch_end, "on_model_save": on_model_save, "on_train_end": on_train_end, "on_train_start": on_train_start, "on_val_start": on_val_start, "on_predict_start": on_predict_start, "on_export_start": on_export_start, } if SETTINGS["hub"] is True else {} ) # verify enabled
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/callbacks/hub.py
Python
unknown
3,402
# Ultralytics YOLO 🚀, AGPL-3.0 license """ MLflow Logging for Ultralytics YOLO. This module enables MLflow logging for Ultralytics YOLO. It logs metrics, parameters, and model artifacts. For setting up, a tracking URI should be specified. The logging can be customized using environment variables. Commands: 1. To set a project name: `export MLFLOW_EXPERIMENT_NAME=<your_experiment_name>` or use the project=<project> argument 2. To set a run name: `export MLFLOW_RUN=<your_run_name>` or use the name=<name> argument 3. To start a local MLflow server: mlflow server --backend-store-uri runs/mlflow It will by default start a local server at http://127.0.0.1:5000. To specify a different URI, set the MLFLOW_TRACKING_URI environment variable. 4. To kill all running MLflow server instances: ps aux | grep 'mlflow' | grep -v 'grep' | awk '{print $2}' | xargs kill -9 """ from ultralytics.utils import LOGGER, RUNS_DIR, SETTINGS, TESTS_RUNNING, colorstr try: import os assert not TESTS_RUNNING or "test_mlflow" in os.environ.get("PYTEST_CURRENT_TEST", "") # do not log pytest assert SETTINGS["mlflow"] is True # verify integration is enabled import mlflow assert hasattr(mlflow, "__version__") # verify package is not directory from pathlib import Path PREFIX = colorstr("MLflow: ") SANITIZE = lambda x: {k.replace("(", "").replace(")", ""): float(v) for k, v in x.items()} except (ImportError, AssertionError): mlflow = None def on_pretrain_routine_end(trainer): """ Log training parameters to MLflow at the end of the pretraining routine. This function sets up MLflow logging based on environment variables and trainer arguments. It sets the tracking URI, experiment name, and run name, then starts the MLflow run if not already active. It finally logs the parameters from the trainer. Args: trainer (ultralytics.engine.trainer.BaseTrainer): The training object with arguments and parameters to log. Global: mlflow: The imported mlflow module to use for logging. Environment Variables: MLFLOW_TRACKING_URI: The URI for MLflow tracking. If not set, defaults to 'runs/mlflow'. MLFLOW_EXPERIMENT_NAME: The name of the MLflow experiment. If not set, defaults to trainer.args.project. MLFLOW_RUN: The name of the MLflow run. If not set, defaults to trainer.args.name. """ global mlflow uri = os.environ.get("MLFLOW_TRACKING_URI") or str(RUNS_DIR / "mlflow") LOGGER.debug(f"{PREFIX} tracking uri: {uri}") mlflow.set_tracking_uri(uri) # Set experiment and run names experiment_name = os.environ.get("MLFLOW_EXPERIMENT_NAME") or trainer.args.project or "/Shared/YOLOv8" run_name = os.environ.get("MLFLOW_RUN") or trainer.args.name mlflow.set_experiment(experiment_name) mlflow.autolog() try: active_run = mlflow.active_run() or mlflow.start_run(run_name=run_name) LOGGER.info(f"{PREFIX}logging run_id({active_run.info.run_id}) to {uri}") if Path(uri).is_dir(): LOGGER.info(f"{PREFIX}view at http://127.0.0.1:5000 with 'mlflow server --backend-store-uri {uri}'") LOGGER.info(f"{PREFIX}disable with 'yolo settings mlflow=False'") mlflow.log_params(dict(trainer.args)) except Exception as e: LOGGER.warning(f"{PREFIX}WARNING ⚠️ Failed to initialize: {e}\n" f"{PREFIX}WARNING ⚠️ Not tracking this run") def on_train_epoch_end(trainer): """Log training metrics at the end of each train epoch to MLflow.""" if mlflow: mlflow.log_metrics( metrics={ **SANITIZE(trainer.lr), **SANITIZE(trainer.label_loss_items(trainer.tloss, prefix="train")), }, step=trainer.epoch, ) def on_fit_epoch_end(trainer): """Log training metrics at the end of each fit epoch to MLflow.""" if mlflow: mlflow.log_metrics(metrics=SANITIZE(trainer.metrics), step=trainer.epoch) def on_train_end(trainer): """Log model artifacts at the end of the training.""" if mlflow: mlflow.log_artifact(str(trainer.best.parent)) # log save_dir/weights directory with best.pt and last.pt for f in trainer.save_dir.glob("*"): # log all other files in save_dir if f.suffix in {".png", ".jpg", ".csv", ".pt", ".yaml"}: mlflow.log_artifact(str(f)) mlflow.end_run() LOGGER.info( f"{PREFIX}results logged to {mlflow.get_tracking_uri()}\n" f"{PREFIX}disable with 'yolo settings mlflow=False'" ) callbacks = ( { "on_pretrain_routine_end": on_pretrain_routine_end, "on_train_epoch_end": on_train_epoch_end, "on_fit_epoch_end": on_fit_epoch_end, "on_train_end": on_train_end, } if mlflow else {} )
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/callbacks/mlflow.py
Python
unknown
4,909
# Ultralytics YOLO 🚀, AGPL-3.0 license from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING try: assert not TESTS_RUNNING # do not log pytest assert SETTINGS["neptune"] is True # verify integration is enabled import neptune from neptune.types import File assert hasattr(neptune, "__version__") run = None # NeptuneAI experiment logger instance except (ImportError, AssertionError): neptune = None def _log_scalars(scalars, step=0): """Log scalars to the NeptuneAI experiment logger.""" if run: for k, v in scalars.items(): run[k].append(value=v, step=step) def _log_images(imgs_dict, group=""): """Log scalars to the NeptuneAI experiment logger.""" if run: for k, v in imgs_dict.items(): run[f"{group}/{k}"].upload(File(v)) def _log_plot(title, plot_path): """ Log plots to the NeptuneAI experiment logger. Args: title (str): Title of the plot. plot_path (PosixPath | str): Path to the saved image file. """ import matplotlib.image as mpimg import matplotlib.pyplot as plt img = mpimg.imread(plot_path) fig = plt.figure() ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect="auto", xticks=[], yticks=[]) # no ticks ax.imshow(img) run[f"Plots/{title}"].upload(fig) def on_pretrain_routine_start(trainer): """Callback function called before the training routine starts.""" try: global run run = neptune.init_run(project=trainer.args.project or "YOLOv8", name=trainer.args.name, tags=["YOLOv8"]) run["Configuration/Hyperparameters"] = {k: "" if v is None else v for k, v in vars(trainer.args).items()} except Exception as e: LOGGER.warning(f"WARNING ⚠️ NeptuneAI installed but not initialized correctly, not logging this run. {e}") def on_train_epoch_end(trainer): """Callback function called at end of each training epoch.""" _log_scalars(trainer.label_loss_items(trainer.tloss, prefix="train"), trainer.epoch + 1) _log_scalars(trainer.lr, trainer.epoch + 1) if trainer.epoch == 1: _log_images({f.stem: str(f) for f in trainer.save_dir.glob("train_batch*.jpg")}, "Mosaic") def on_fit_epoch_end(trainer): """Callback function called at end of each fit (train+val) epoch.""" if run and trainer.epoch == 0: from ultralytics.utils.torch_utils import model_info_for_loggers run["Configuration/Model"] = model_info_for_loggers(trainer) _log_scalars(trainer.metrics, trainer.epoch + 1) def on_val_end(validator): """Callback function called at end of each validation.""" if run: # Log val_labels and val_pred _log_images({f.stem: str(f) for f in validator.save_dir.glob("val*.jpg")}, "Validation") def on_train_end(trainer): """Callback function called at end of training.""" if run: # Log final results, CM matrix + PR plots files = [ "results.png", "confusion_matrix.png", "confusion_matrix_normalized.png", *(f"{x}_curve.png" for x in ("F1", "PR", "P", "R")), ] files = [(trainer.save_dir / f) for f in files if (trainer.save_dir / f).exists()] # filter for f in files: _log_plot(title=f.stem, plot_path=f) # Log the final model run[f"weights/{trainer.args.name or trainer.args.task}/{trainer.best.name}"].upload(File(str(trainer.best))) callbacks = ( { "on_pretrain_routine_start": on_pretrain_routine_start, "on_train_epoch_end": on_train_epoch_end, "on_fit_epoch_end": on_fit_epoch_end, "on_val_end": on_val_end, "on_train_end": on_train_end, } if neptune else {} )
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/callbacks/neptune.py
Python
unknown
3,756
# Ultralytics YOLO 🚀, AGPL-3.0 license from ultralytics.utils import SETTINGS try: assert SETTINGS["raytune"] is True # verify integration is enabled import ray from ray import tune from ray.air import session except (ImportError, AssertionError): tune = None def on_fit_epoch_end(trainer): """Sends training metrics to Ray Tune at end of each epoch.""" if ray.tune.is_session_enabled(): metrics = trainer.metrics metrics["epoch"] = trainer.epoch session.report(metrics) callbacks = ( { "on_fit_epoch_end": on_fit_epoch_end, } if tune else {} )
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/callbacks/raytune.py
Python
unknown
632
# Ultralytics YOLO 🚀, AGPL-3.0 license import contextlib from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING, colorstr try: # WARNING: do not move SummaryWriter import due to protobuf bug https://github.com/ultralytics/ultralytics/pull/4674 from torch.utils.tensorboard import SummaryWriter assert not TESTS_RUNNING # do not log pytest assert SETTINGS["tensorboard"] is True # verify integration is enabled WRITER = None # TensorBoard SummaryWriter instance PREFIX = colorstr("TensorBoard: ") # Imports below only required if TensorBoard enabled import warnings from copy import deepcopy from ultralytics.utils.torch_utils import de_parallel, torch except (ImportError, AssertionError, TypeError, AttributeError): # TypeError for handling 'Descriptors cannot not be created directly.' protobuf errors in Windows # AttributeError: module 'tensorflow' has no attribute 'io' if 'tensorflow' not installed SummaryWriter = None def _log_scalars(scalars, step=0): """Logs scalar values to TensorBoard.""" if WRITER: for k, v in scalars.items(): WRITER.add_scalar(k, v, step) def _log_tensorboard_graph(trainer): """Log model graph to TensorBoard.""" # Input image imgsz = trainer.args.imgsz imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz p = next(trainer.model.parameters()) # for device, type im = torch.zeros((1, 3, *imgsz), device=p.device, dtype=p.dtype) # input image (must be zeros, not empty) with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UserWarning) # suppress jit trace warning warnings.simplefilter("ignore", category=torch.jit.TracerWarning) # suppress jit trace warning # Try simple method first (YOLO) with contextlib.suppress(Exception): WRITER.add_graph(torch.jit.trace(de_parallel(trainer.model), im, strict=False), []) LOGGER.info(f"{PREFIX}model graph visualization added ✅") return # Fallback to TorchScript export steps (RTDETR) try: model = deepcopy(de_parallel(trainer.model)) model.eval() model = model.fuse(verbose=False) for m in model.modules(): if hasattr(m, "export"): # Detect, RTDETRDecoder (Segment and Pose use Detect base class) m.export = True m.format = "torchscript" model(im) # dry run WRITER.add_graph(torch.jit.trace(model, im, strict=False), []) LOGGER.info(f"{PREFIX}model graph visualization added ✅") except Exception as e: LOGGER.warning(f"{PREFIX}WARNING ⚠️ TensorBoard graph visualization failure {e}") def on_pretrain_routine_start(trainer): """Initialize TensorBoard logging with SummaryWriter.""" if SummaryWriter: try: global WRITER WRITER = SummaryWriter(str(trainer.save_dir)) LOGGER.info(f"{PREFIX}Start with 'tensorboard --logdir {trainer.save_dir}', view at http://localhost:6006/") except Exception as e: LOGGER.warning(f"{PREFIX}WARNING ⚠️ TensorBoard not initialized correctly, not logging this run. {e}") def on_train_start(trainer): """Log TensorBoard graph.""" if WRITER: _log_tensorboard_graph(trainer) def on_train_epoch_end(trainer): """Logs scalar statistics at the end of a training epoch.""" _log_scalars(trainer.label_loss_items(trainer.tloss, prefix="train"), trainer.epoch + 1) _log_scalars(trainer.lr, trainer.epoch + 1) def on_fit_epoch_end(trainer): """Logs epoch metrics at end of training epoch.""" _log_scalars(trainer.metrics, trainer.epoch + 1) callbacks = ( { "on_pretrain_routine_start": on_pretrain_routine_start, "on_train_start": on_train_start, "on_fit_epoch_end": on_fit_epoch_end, "on_train_epoch_end": on_train_epoch_end, } if SummaryWriter else {} )
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/callbacks/tensorboard.py
Python
unknown
4,038
# Ultralytics YOLO 🚀, AGPL-3.0 license from ultralytics.utils import SETTINGS, TESTS_RUNNING from ultralytics.utils.torch_utils import model_info_for_loggers try: assert not TESTS_RUNNING # do not log pytest assert SETTINGS["wandb"] is True # verify integration is enabled import wandb as wb assert hasattr(wb, "__version__") # verify package is not directory import numpy as np import pandas as pd _processed_plots = {} except (ImportError, AssertionError): wb = None def _custom_table(x, y, classes, title="Precision Recall Curve", x_title="Recall", y_title="Precision"): """ Create and log a custom metric visualization to wandb.plot.pr_curve. This function crafts a custom metric visualization that mimics the behavior of wandb's default precision-recall curve while allowing for enhanced customization. The visual metric is useful for monitoring model performance across different classes. Args: x (List): Values for the x-axis; expected to have length N. y (List): Corresponding values for the y-axis; also expected to have length N. classes (List): Labels identifying the class of each point; length N. title (str, optional): Title for the plot; defaults to 'Precision Recall Curve'. x_title (str, optional): Label for the x-axis; defaults to 'Recall'. y_title (str, optional): Label for the y-axis; defaults to 'Precision'. Returns: (wandb.Object): A wandb object suitable for logging, showcasing the crafted metric visualization. """ df = pd.DataFrame({"class": classes, "y": y, "x": x}).round(3) fields = {"x": "x", "y": "y", "class": "class"} string_fields = {"title": title, "x-axis-title": x_title, "y-axis-title": y_title} return wb.plot_table( "wandb/area-under-curve/v0", wb.Table(dataframe=df), fields=fields, string_fields=string_fields ) def _plot_curve( x, y, names=None, id="precision-recall", title="Precision Recall Curve", x_title="Recall", y_title="Precision", num_x=100, only_mean=False, ): """ Log a metric curve visualization. This function generates a metric curve based on input data and logs the visualization to wandb. The curve can represent aggregated data (mean) or individual class data, depending on the 'only_mean' flag. Args: x (np.ndarray): Data points for the x-axis with length N. y (np.ndarray): Corresponding data points for the y-axis with shape CxN, where C represents the number of classes. names (list, optional): Names of the classes corresponding to the y-axis data; length C. Defaults to an empty list. id (str, optional): Unique identifier for the logged data in wandb. Defaults to 'precision-recall'. title (str, optional): Title for the visualization plot. Defaults to 'Precision Recall Curve'. x_title (str, optional): Label for the x-axis. Defaults to 'Recall'. y_title (str, optional): Label for the y-axis. Defaults to 'Precision'. num_x (int, optional): Number of interpolated data points for visualization. Defaults to 100. only_mean (bool, optional): Flag to indicate if only the mean curve should be plotted. Defaults to True. Note: The function leverages the '_custom_table' function to generate the actual visualization. """ # Create new x if names is None: names = [] x_new = np.linspace(x[0], x[-1], num_x).round(5) # Create arrays for logging x_log = x_new.tolist() y_log = np.interp(x_new, x, np.mean(y, axis=0)).round(3).tolist() if only_mean: table = wb.Table(data=list(zip(x_log, y_log)), columns=[x_title, y_title]) wb.run.log({title: wb.plot.line(table, x_title, y_title, title=title)}) else: classes = ["mean"] * len(x_log) for i, yi in enumerate(y): x_log.extend(x_new) # add new x y_log.extend(np.interp(x_new, x, yi)) # interpolate y to new x classes.extend([names[i]] * len(x_new)) # add class names wb.log({id: _custom_table(x_log, y_log, classes, title, x_title, y_title)}, commit=False) def _log_plots(plots, step): """Logs plots from the input dictionary if they haven't been logged already at the specified step.""" for name, params in plots.items(): timestamp = params["timestamp"] if _processed_plots.get(name) != timestamp: wb.run.log({name.stem: wb.Image(str(name))}, step=step) _processed_plots[name] = timestamp def on_pretrain_routine_start(trainer): """Initiate and start project if module is present.""" wb.run or wb.init(project=trainer.args.project or "YOLOv8", name=trainer.args.name, config=vars(trainer.args)) def on_fit_epoch_end(trainer): """Logs training metrics and model information at the end of an epoch.""" wb.run.log(trainer.metrics, step=trainer.epoch + 1) _log_plots(trainer.plots, step=trainer.epoch + 1) _log_plots(trainer.validator.plots, step=trainer.epoch + 1) if trainer.epoch == 0: wb.run.log(model_info_for_loggers(trainer), step=trainer.epoch + 1) def on_train_epoch_end(trainer): """Log metrics and save images at the end of each training epoch.""" wb.run.log(trainer.label_loss_items(trainer.tloss, prefix="train"), step=trainer.epoch + 1) wb.run.log(trainer.lr, step=trainer.epoch + 1) if trainer.epoch == 1: _log_plots(trainer.plots, step=trainer.epoch + 1) def on_train_end(trainer): """Save the best model as an artifact at end of training.""" _log_plots(trainer.validator.plots, step=trainer.epoch + 1) _log_plots(trainer.plots, step=trainer.epoch + 1) art = wb.Artifact(type="model", name=f"run_{wb.run.id}_model") if trainer.best.exists(): art.add_file(trainer.best) wb.run.log_artifact(art, aliases=["best"]) for curve_name, curve_values in zip(trainer.validator.metrics.curves, trainer.validator.metrics.curves_results): x, y, x_title, y_title = curve_values _plot_curve( x, y, names=list(trainer.validator.metrics.names.values()), id=f"curves/{curve_name}", title=curve_name, x_title=x_title, y_title=y_title, ) wb.run.finish() # required or run continues on dashboard callbacks = ( { "on_pretrain_routine_start": on_pretrain_routine_start, "on_train_epoch_end": on_train_epoch_end, "on_fit_epoch_end": on_fit_epoch_end, "on_train_end": on_train_end, } if wb else {} )
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/callbacks/wb.py
Python
unknown
6,650
# Ultralytics YOLO 🚀, AGPL-3.0 license import contextlib import glob import inspect import math import os import platform import re import shutil import subprocess import sys import time from importlib import metadata from pathlib import Path from typing import Optional import cv2 import numpy as np import requests import torch from matplotlib import font_manager from ultralytics.utils import ( ASSETS, AUTOINSTALL, LINUX, LOGGER, ONLINE, ROOT, USER_CONFIG_DIR, SimpleNamespace, ThreadingLocked, TryExcept, clean_url, colorstr, downloads, emojis, is_colab, is_docker, is_github_action_running, is_jupyter, is_kaggle, is_online, is_pip_package, url2file, ) def parse_requirements(file_path=ROOT.parent / "requirements.txt", package=""): """ Parse a requirements.txt file, ignoring lines that start with '#' and any text after '#'. Args: file_path (Path): Path to the requirements.txt file. package (str, optional): Python package to use instead of requirements.txt file, i.e. package='ultralytics'. Returns: (List[Dict[str, str]]): List of parsed requirements as dictionaries with `name` and `specifier` keys. Example: ```python from ultralytics.utils.checks import parse_requirements parse_requirements(package='ultralytics') ``` """ if package: requires = [x for x in metadata.distribution(package).requires if "extra == " not in x] else: requires = Path(file_path).read_text().splitlines() requirements = [] for line in requires: line = line.strip() if line and not line.startswith("#"): line = line.split("#")[0].strip() # ignore inline comments match = re.match(r"([a-zA-Z0-9-_]+)\s*([<>!=~]+.*)?", line) if match: requirements.append(SimpleNamespace(name=match[1], specifier=match[2].strip() if match[2] else "")) return requirements def parse_version(version="0.0.0") -> tuple: """ Convert a version string to a tuple of integers, ignoring any extra non-numeric string attached to the version. This function replaces deprecated 'pkg_resources.parse_version(v)'. Args: version (str): Version string, i.e. '2.0.1+cpu' Returns: (tuple): Tuple of integers representing the numeric part of the version and the extra string, i.e. (2, 0, 1) """ try: return tuple(map(int, re.findall(r"\d+", version)[:3])) # '2.0.1+cpu' -> (2, 0, 1) except Exception as e: LOGGER.warning(f"WARNING ⚠️ failure for parse_version({version}), returning (0, 0, 0): {e}") return 0, 0, 0 def is_ascii(s) -> bool: """ Check if a string is composed of only ASCII characters. Args: s (str): String to be checked. Returns: (bool): True if the string is composed only of ASCII characters, False otherwise. """ # Convert list, tuple, None, etc. to string s = str(s) # Check if the string is composed of only ASCII characters return all(ord(c) < 128 for c in s) def check_imgsz(imgsz, stride=32, min_dim=1, max_dim=2, floor=0): """ Verify image size is a multiple of the given stride in each dimension. If the image size is not a multiple of the stride, update it to the nearest multiple of the stride that is greater than or equal to the given floor value. Args: imgsz (int | cList[int]): Image size. stride (int): Stride value. min_dim (int): Minimum number of dimensions. max_dim (int): Maximum number of dimensions. floor (int): Minimum allowed value for image size. Returns: (List[int]): Updated image size. """ # Convert stride to integer if it is a tensor stride = int(stride.max() if isinstance(stride, torch.Tensor) else stride) # Convert image size to list if it is an integer if isinstance(imgsz, int): imgsz = [imgsz] elif isinstance(imgsz, (list, tuple)): imgsz = list(imgsz) else: raise TypeError( f"'imgsz={imgsz}' is of invalid type {type(imgsz).__name__}. " f"Valid imgsz types are int i.e. 'imgsz=640' or list i.e. 'imgsz=[640,640]'" ) # Apply max_dim if len(imgsz) > max_dim: msg = ( "'train' and 'val' imgsz must be an integer, while 'predict' and 'export' imgsz may be a [h, w] list " "or an integer, i.e. 'yolo export imgsz=640,480' or 'yolo export imgsz=640'" ) if max_dim != 1: raise ValueError(f"imgsz={imgsz} is not a valid image size. {msg}") LOGGER.warning(f"WARNING ⚠️ updating to 'imgsz={max(imgsz)}'. {msg}") imgsz = [max(imgsz)] # Make image size a multiple of the stride sz = [max(math.ceil(x / stride) * stride, floor) for x in imgsz] # Print warning message if image size was updated if sz != imgsz: LOGGER.warning(f"WARNING ⚠️ imgsz={imgsz} must be multiple of max stride {stride}, updating to {sz}") # Add missing dimensions if necessary sz = [sz[0], sz[0]] if min_dim == 2 and len(sz) == 1 else sz[0] if min_dim == 1 and len(sz) == 1 else sz return sz def check_version( current: str = "0.0.0", required: str = "0.0.0", name: str = "version", hard: bool = False, verbose: bool = False, msg: str = "", ) -> bool: """ Check current version against the required version or range. Args: current (str): Current version or package name to get version from. required (str): Required version or range (in pip-style format). name (str, optional): Name to be used in warning message. hard (bool, optional): If True, raise an AssertionError if the requirement is not met. verbose (bool, optional): If True, print warning message if requirement is not met. msg (str, optional): Extra message to display if verbose. Returns: (bool): True if requirement is met, False otherwise. Example: ```python # Check if current version is exactly 22.04 check_version(current='22.04', required='==22.04') # Check if current version is greater than or equal to 22.04 check_version(current='22.10', required='22.04') # assumes '>=' inequality if none passed # Check if current version is less than or equal to 22.04 check_version(current='22.04', required='<=22.04') # Check if current version is between 20.04 (inclusive) and 22.04 (exclusive) check_version(current='21.10', required='>20.04,<22.04') ``` """ if not current: # if current is '' or None LOGGER.warning(f"WARNING ⚠️ invalid check_version({current}, {required}) requested, please check values.") return True elif not current[0].isdigit(): # current is package name rather than version string, i.e. current='ultralytics' try: name = current # assigned package name to 'name' arg current = metadata.version(current) # get version string from package name except metadata.PackageNotFoundError as e: if hard: raise ModuleNotFoundError(emojis(f"WARNING ⚠️ {current} package is required but not installed")) from e else: return False if not required: # if required is '' or None return True op = "" version = "" result = True c = parse_version(current) # '1.2.3' -> (1, 2, 3) for r in required.strip(",").split(","): op, version = re.match(r"([^0-9]*)([\d.]+)", r).groups() # split '>=22.04' -> ('>=', '22.04') v = parse_version(version) # '1.2.3' -> (1, 2, 3) if op == "==" and c != v: result = False elif op == "!=" and c == v: result = False elif op in (">=", "") and not (c >= v): # if no constraint passed assume '>=required' result = False elif op == "<=" and not (c <= v): result = False elif op == ">" and not (c > v): result = False elif op == "<" and not (c < v): result = False if not result: warning = f"WARNING ⚠️ {name}{op}{version} is required, but {name}=={current} is currently installed {msg}" if hard: raise ModuleNotFoundError(emojis(warning)) # assert version requirements met if verbose: LOGGER.warning(warning) return result def check_latest_pypi_version(package_name="ultralytics"): """ Returns the latest version of a PyPI package without downloading or installing it. Parameters: package_name (str): The name of the package to find the latest version for. Returns: (str): The latest version of the package. """ with contextlib.suppress(Exception): requests.packages.urllib3.disable_warnings() # Disable the InsecureRequestWarning response = requests.get(f"https://pypi.org/pypi/{package_name}/json", timeout=3) if response.status_code == 200: return response.json()["info"]["version"] def check_pip_update_available(): """ Checks if a new version of the ultralytics package is available on PyPI. Returns: (bool): True if an update is available, False otherwise. """ if ONLINE and is_pip_package(): with contextlib.suppress(Exception): from ultralytics import __version__ latest = check_latest_pypi_version() if check_version(__version__, f"<{latest}"): # check if current version is < latest version LOGGER.info( f"New https://pypi.org/project/ultralytics/{latest} available 😃 " f"Update with 'pip install -U ultralytics'" ) return True return False @ThreadingLocked() def check_font(font="Arial.ttf"): """ Find font locally or download to user's configuration directory if it does not already exist. Args: font (str): Path or name of font. Returns: file (Path): Resolved font file path. """ name = Path(font).name # Check USER_CONFIG_DIR file = USER_CONFIG_DIR / name if file.exists(): return file # Check system fonts matches = [s for s in font_manager.findSystemFonts() if font in s] if any(matches): return matches[0] # Download to USER_CONFIG_DIR if missing url = f"https://ultralytics.com/assets/{name}" if downloads.is_url(url): downloads.safe_download(url=url, file=file) return file def check_python(minimum: str = "3.8.0") -> bool: """ Check current python version against the required minimum version. Args: minimum (str): Required minimum version of python. Returns: (bool): Whether the installed Python version meets the minimum constraints. """ return check_version(platform.python_version(), minimum, name="Python ", hard=True) @TryExcept() def check_requirements(requirements=ROOT.parent / "requirements.txt", exclude=(), install=True, cmds=""): """ Check if installed dependencies meet YOLOv8 requirements and attempt to auto-update if needed. Args: requirements (Union[Path, str, List[str]]): Path to a requirements.txt file, a single package requirement as a string, or a list of package requirements as strings. exclude (Tuple[str]): Tuple of package names to exclude from checking. install (bool): If True, attempt to auto-update packages that don't meet requirements. cmds (str): Additional commands to pass to the pip install command when auto-updating. Example: ```python from ultralytics.utils.checks import check_requirements # Check a requirements.txt file check_requirements('path/to/requirements.txt') # Check a single package check_requirements('ultralytics>=8.0.0') # Check multiple packages check_requirements(['numpy', 'ultralytics>=8.0.0']) ``` """ prefix = colorstr("red", "bold", "requirements:") check_python() # check python version check_torchvision() # check torch-torchvision compatibility if isinstance(requirements, Path): # requirements.txt file file = requirements.resolve() assert file.exists(), f"{prefix} {file} not found, check failed." requirements = [f"{x.name}{x.specifier}" for x in parse_requirements(file) if x.name not in exclude] elif isinstance(requirements, str): requirements = [requirements] pkgs = [] for r in requirements: r_stripped = r.split("/")[-1].replace(".git", "") # replace git+https://org/repo.git -> 'repo' match = re.match(r"([a-zA-Z0-9-_]+)([<>!=~]+.*)?", r_stripped) name, required = match[1], match[2].strip() if match[2] else "" try: assert check_version(metadata.version(name), required) # exception if requirements not met except (AssertionError, metadata.PackageNotFoundError): pkgs.append(r) s = " ".join(f'"{x}"' for x in pkgs) # console string if s: if install and AUTOINSTALL: # check environment variable n = len(pkgs) # number of packages updates LOGGER.info(f"{prefix} Ultralytics requirement{'s' * (n > 1)} {pkgs} not found, attempting AutoUpdate...") try: t = time.time() assert is_online(), "AutoUpdate skipped (offline)" LOGGER.info(subprocess.check_output(f"pip install --no-cache {s} {cmds}", shell=True).decode()) dt = time.time() - t LOGGER.info( f"{prefix} AutoUpdate success ✅ {dt:.1f}s, installed {n} package{'s' * (n > 1)}: {pkgs}\n" f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" ) except Exception as e: LOGGER.warning(f"{prefix} ❌ {e}") return False else: return False return True def check_torchvision(): """ Checks the installed versions of PyTorch and Torchvision to ensure they're compatible. This function checks the installed versions of PyTorch and Torchvision, and warns if they're incompatible according to the provided compatibility table based on: https://github.com/pytorch/vision#installation. The compatibility table is a dictionary where the keys are PyTorch versions and the values are lists of compatible Torchvision versions. """ import torchvision # Compatibility table compatibility_table = {"2.0": ["0.15"], "1.13": ["0.14"], "1.12": ["0.13"]} # Extract only the major and minor versions v_torch = ".".join(torch.__version__.split("+")[0].split(".")[:2]) v_torchvision = ".".join(torchvision.__version__.split("+")[0].split(".")[:2]) if v_torch in compatibility_table: compatible_versions = compatibility_table[v_torch] if all(v_torchvision != v for v in compatible_versions): print( f"WARNING ⚠️ torchvision=={v_torchvision} is incompatible with torch=={v_torch}.\n" f"Run 'pip install torchvision=={compatible_versions[0]}' to fix torchvision or " "'pip install -U torch torchvision' to update both.\n" "For a full compatibility table see https://github.com/pytorch/vision#installation" ) def check_suffix(file="yolov8n.pt", suffix=".pt", msg=""): """Check file(s) for acceptable suffix.""" if file and suffix: if isinstance(suffix, str): suffix = (suffix,) for f in file if isinstance(file, (list, tuple)) else [file]: s = Path(f).suffix.lower().strip() # file suffix if len(s): assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}, not {s}" def check_yolov5u_filename(file: str, verbose: bool = True): """Replace legacy YOLOv5 filenames with updated YOLOv5u filenames.""" if "yolov3" in file or "yolov5" in file: if "u.yaml" in file: file = file.replace("u.yaml", ".yaml") # i.e. yolov5nu.yaml -> yolov5n.yaml elif ".pt" in file and "u" not in file: original_file = file file = re.sub(r"(.*yolov5([nsmlx]))\.pt", "\\1u.pt", file) # i.e. yolov5n.pt -> yolov5nu.pt file = re.sub(r"(.*yolov5([nsmlx])6)\.pt", "\\1u.pt", file) # i.e. yolov5n6.pt -> yolov5n6u.pt file = re.sub(r"(.*yolov3(|-tiny|-spp))\.pt", "\\1u.pt", file) # i.e. yolov3-spp.pt -> yolov3-sppu.pt if file != original_file and verbose: LOGGER.info( f"PRO TIP 💡 Replace 'model={original_file}' with new 'model={file}'.\nYOLOv5 'u' models are " f"trained with https://github.com/ultralytics/ultralytics and feature improved performance vs " f"standard YOLOv5 models trained with https://github.com/ultralytics/yolov5.\n" ) return file def check_model_file_from_stem(model="yolov8n"): """Return a model filename from a valid model stem.""" if model and not Path(model).suffix and Path(model).stem in downloads.GITHUB_ASSETS_STEMS: return Path(model).with_suffix(".pt") # add suffix, i.e. yolov8n -> yolov8n.pt else: return model def check_file(file, suffix="", download=True, hard=True): """Search/download file (if necessary) and return path.""" check_suffix(file, suffix) # optional file = str(file).strip() # convert to string and strip spaces file = check_yolov5u_filename(file) # yolov5n -> yolov5nu if ( not file or ("://" not in file and Path(file).exists()) # '://' check required in Windows Python<3.10 or file.lower().startswith("grpc://") ): # file exists or gRPC Triton images return file elif download and file.lower().startswith(("https://", "http://", "rtsp://", "rtmp://", "tcp://")): # download url = file # warning: Pathlib turns :// -> :/ file = url2file(file) # '%2F' to '/', split https://url.com/file.txt?auth if Path(file).exists(): LOGGER.info(f"Found {clean_url(url)} locally at {file}") # file already exists else: downloads.safe_download(url=url, file=file, unzip=False) return file else: # search files = glob.glob(str(ROOT / "cfg" / "**" / file), recursive=True) # find file if not files and hard: raise FileNotFoundError(f"'{file}' does not exist") elif len(files) > 1 and hard: raise FileNotFoundError(f"Multiple files match '{file}', specify exact path: {files}") return files[0] if len(files) else [] # return file def check_yaml(file, suffix=(".yaml", ".yml"), hard=True): """Search/download YAML file (if necessary) and return path, checking suffix.""" return check_file(file, suffix, hard=hard) def check_is_path_safe(basedir, path): """ Check if the resolved path is under the intended directory to prevent path traversal. Args: basedir (Path | str): The intended directory. path (Path | str): The path to check. Returns: (bool): True if the path is safe, False otherwise. """ base_dir_resolved = Path(basedir).resolve() path_resolved = Path(path).resolve() return path_resolved.is_file() and path_resolved.parts[: len(base_dir_resolved.parts)] == base_dir_resolved.parts def check_imshow(warn=False): """Check if environment supports image displays.""" try: if LINUX: assert "DISPLAY" in os.environ and not is_docker() and not is_colab() and not is_kaggle() cv2.imshow("test", np.zeros((8, 8, 3), dtype=np.uint8)) # show a small 8-pixel image cv2.waitKey(1) cv2.destroyAllWindows() cv2.waitKey(1) return True except Exception as e: if warn: LOGGER.warning(f"WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show()\n{e}") return False def check_yolo(verbose=True, device=""): """Return a human-readable YOLO software and hardware summary.""" import psutil from ultralytics.utils.torch_utils import select_device if is_jupyter(): if check_requirements("wandb", install=False): os.system("pip uninstall -y wandb") # uninstall wandb: unwanted account creation prompt with infinite hang if is_colab(): shutil.rmtree("sample_data", ignore_errors=True) # remove colab /sample_data directory if verbose: # System info gib = 1 << 30 # bytes per GiB ram = psutil.virtual_memory().total total, used, free = shutil.disk_usage("/") s = f"({os.cpu_count()} CPUs, {ram / gib:.1f} GB RAM, {(total - free) / gib:.1f}/{total / gib:.1f} GB disk)" with contextlib.suppress(Exception): # clear display if ipython is installed from IPython import display display.clear_output() else: s = "" select_device(device=device, newline=False) LOGGER.info(f"Setup complete ✅ {s}") def collect_system_info(): """Collect and print relevant system information including OS, Python, RAM, CPU, and CUDA.""" import psutil from ultralytics.utils import ENVIRONMENT, is_git_dir from ultralytics.utils.torch_utils import get_cpu_info ram_info = psutil.virtual_memory().total / (1024**3) # Convert bytes to GB check_yolo() LOGGER.info( f"\n{'OS':<20}{platform.platform()}\n" f"{'Environment':<20}{ENVIRONMENT}\n" f"{'Python':<20}{sys.version.split()[0]}\n" f"{'Install':<20}{'git' if is_git_dir() else 'pip' if is_pip_package() else 'other'}\n" f"{'RAM':<20}{ram_info:.2f} GB\n" f"{'CPU':<20}{get_cpu_info()}\n" f"{'CUDA':<20}{torch.version.cuda if torch and torch.cuda.is_available() else None}\n" ) for r in parse_requirements(package="ultralytics"): try: current = metadata.version(r.name) is_met = "✅ " if check_version(current, str(r.specifier), hard=True) else "❌ " except metadata.PackageNotFoundError: current = "(not installed)" is_met = "❌ " LOGGER.info(f"{r.name:<20}{is_met}{current}{r.specifier}") if is_github_action_running(): LOGGER.info( f"\nRUNNER_OS: {os.getenv('RUNNER_OS')}\n" f"GITHUB_EVENT_NAME: {os.getenv('GITHUB_EVENT_NAME')}\n" f"GITHUB_WORKFLOW: {os.getenv('GITHUB_WORKFLOW')}\n" f"GITHUB_ACTOR: {os.getenv('GITHUB_ACTOR')}\n" f"GITHUB_REPOSITORY: {os.getenv('GITHUB_REPOSITORY')}\n" f"GITHUB_REPOSITORY_OWNER: {os.getenv('GITHUB_REPOSITORY_OWNER')}\n" ) def check_amp(model): """ This function checks the PyTorch Automatic Mixed Precision (AMP) functionality of a YOLOv8 model. If the checks fail, it means there are anomalies with AMP on the system that may cause NaN losses or zero-mAP results, so AMP will be disabled during training. Args: model (nn.Module): A YOLOv8 model instance. Example: ```python from ultralytics import YOLO from ultralytics.utils.checks import check_amp model = YOLO('yolov8n.pt').model.cuda() check_amp(model) ``` Returns: (bool): Returns True if the AMP functionality works correctly with YOLOv8 model, else False. """ device = next(model.parameters()).device # get model device if device.type in ("cpu", "mps"): return False # AMP only used on CUDA devices def amp_allclose(m, im): """All close FP32 vs AMP results.""" a = m(im, device=device, verbose=False)[0].boxes.data # FP32 inference with torch.cuda.amp.autocast(True): b = m(im, device=device, verbose=False)[0].boxes.data # AMP inference del m return a.shape == b.shape and torch.allclose(a, b.float(), atol=0.5) # close to 0.5 absolute tolerance im = ASSETS / "bus.jpg" # image to check prefix = colorstr("AMP: ") LOGGER.info(f"{prefix}running Automatic Mixed Precision (AMP) checks with YOLOv8n...") warning_msg = "Setting 'amp=True'. If you experience zero-mAP or NaN losses you can disable AMP with amp=False." try: from ultralytics import YOLO assert amp_allclose(YOLO("yolov8n.pt"), im) LOGGER.info(f"{prefix}checks passed ✅") except ConnectionError: LOGGER.warning(f"{prefix}checks skipped ⚠️, offline and unable to download YOLOv8n. {warning_msg}") except (AttributeError, ModuleNotFoundError): LOGGER.warning( f"{prefix}checks skipped ⚠️. " f"Unable to load YOLOv8n due to possible Ultralytics package modifications. {warning_msg}" ) except AssertionError: LOGGER.warning( f"{prefix}checks failed ❌. Anomalies were detected with AMP on your system that may lead to " f"NaN losses or zero-mAP results, so AMP will be disabled during training." ) return False return True def git_describe(path=ROOT): # path must be a directory """Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe.""" with contextlib.suppress(Exception): return subprocess.check_output(f"git -C {path} describe --tags --long --always", shell=True).decode()[:-1] return "" def print_args(args: Optional[dict] = None, show_file=True, show_func=False): """Print function arguments (optional args dict).""" def strip_auth(v): """Clean longer Ultralytics HUB URLs by stripping potential authentication information.""" return clean_url(v) if (isinstance(v, str) and v.startswith("http") and len(v) > 100) else v x = inspect.currentframe().f_back # previous frame file, _, func, _, _ = inspect.getframeinfo(x) if args is None: # get args automatically args, _, _, frm = inspect.getargvalues(x) args = {k: v for k, v in frm.items() if k in args} try: file = Path(file).resolve().relative_to(ROOT).with_suffix("") except ValueError: file = Path(file).stem s = (f"{file}: " if show_file else "") + (f"{func}: " if show_func else "") LOGGER.info(colorstr(s) + ", ".join(f"{k}={strip_auth(v)}" for k, v in args.items())) def cuda_device_count() -> int: """ Get the number of NVIDIA GPUs available in the environment. Returns: (int): The number of NVIDIA GPUs available. """ try: # Run the nvidia-smi command and capture its output output = subprocess.check_output( ["nvidia-smi", "--query-gpu=count", "--format=csv,noheader,nounits"], encoding="utf-8" ) # Take the first line and strip any leading/trailing white space first_line = output.strip().split("\n")[0] return int(first_line) except (subprocess.CalledProcessError, FileNotFoundError, ValueError): # If the command fails, nvidia-smi is not found, or output is not an integer, assume no GPUs are available return 0 def cuda_is_available() -> bool: """ Check if CUDA is available in the environment. Returns: (bool): True if one or more NVIDIA GPUs are available, False otherwise. """ return cuda_device_count() > 0
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/checks.py
Python
unknown
27,665
# Ultralytics YOLO 🚀, AGPL-3.0 license import os import shutil import socket import sys import tempfile from . import USER_CONFIG_DIR from .torch_utils import TORCH_1_9 def find_free_network_port() -> int: """ Finds a free port on localhost. It is useful in single-node training when we don't want to connect to a real main node but have to set the `MASTER_PORT` environment variable. """ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(("127.0.0.1", 0)) return s.getsockname()[1] # port def generate_ddp_file(trainer): """Generates a DDP file and returns its file name.""" module, name = f"{trainer.__class__.__module__}.{trainer.__class__.__name__}".rsplit(".", 1) content = f""" # Ultralytics Multi-GPU training temp file (should be automatically deleted after use) overrides = {vars(trainer.args)} if __name__ == "__main__": from {module} import {name} from ultralytics.utils import DEFAULT_CFG_DICT cfg = DEFAULT_CFG_DICT.copy() cfg.update(save_dir='') # handle the extra key 'save_dir' trainer = {name}(cfg=cfg, overrides=overrides) results = trainer.train() """ (USER_CONFIG_DIR / "DDP").mkdir(exist_ok=True) with tempfile.NamedTemporaryFile( prefix="_temp_", suffix=f"{id(trainer)}.py", mode="w+", encoding="utf-8", dir=USER_CONFIG_DIR / "DDP", delete=False, ) as file: file.write(content) return file.name def generate_ddp_command(world_size, trainer): """Generates and returns command for distributed training.""" import __main__ # noqa local import to avoid https://github.com/Lightning-AI/lightning/issues/15218 if not trainer.resume: shutil.rmtree(trainer.save_dir) # remove the save_dir file = generate_ddp_file(trainer) dist_cmd = "torch.distributed.run" if TORCH_1_9 else "torch.distributed.launch" port = find_free_network_port() cmd = [sys.executable, "-m", dist_cmd, "--nproc_per_node", f"{world_size}", "--master_port", f"{port}", file] return cmd, file def ddp_cleanup(trainer, file): """Delete temp file if created.""" if f"{id(trainer)}.py" in file: # if temp_file suffix in file os.remove(file)
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/dist.py
Python
unknown
2,267
# Ultralytics YOLO 🚀, AGPL-3.0 license import contextlib import re import shutil import subprocess from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from urllib import parse, request import requests import torch from ultralytics.utils import LOGGER, TQDM, checks, clean_url, emojis, is_online, url2file # Define Ultralytics GitHub assets maintained at https://github.com/ultralytics/assets GITHUB_ASSETS_REPO = "ultralytics/assets" GITHUB_ASSETS_NAMES = ( [f"yolov8{k}{suffix}.pt" for k in "nsmlx" for suffix in ("", "-cls", "-seg", "-pose", "-obb")] + [f"yolov5{k}{resolution}u.pt" for k in "nsmlx" for resolution in ("", "6")] + [f"yolov3{k}u.pt" for k in ("", "-spp", "-tiny")] + [f"yolo_nas_{k}.pt" for k in "sml"] + [f"sam_{k}.pt" for k in "bl"] + [f"FastSAM-{k}.pt" for k in "sx"] + [f"rtdetr-{k}.pt" for k in "lx"] + ["mobile_sam.pt"] ) GITHUB_ASSETS_STEMS = [Path(k).stem for k in GITHUB_ASSETS_NAMES] def is_url(url, check=True): """ Validates if the given string is a URL and optionally checks if the URL exists online. Args: url (str): The string to be validated as a URL. check (bool, optional): If True, performs an additional check to see if the URL exists online. Defaults to True. Returns: (bool): Returns True if the string is a valid URL. If 'check' is True, also returns True if the URL exists online. Returns False otherwise. Example: ```python valid = is_url("https://www.example.com") ``` """ with contextlib.suppress(Exception): url = str(url) result = parse.urlparse(url) assert all([result.scheme, result.netloc]) # check if is url if check: with request.urlopen(url) as response: return response.getcode() == 200 # check if exists online return True return False def delete_dsstore(path, files_to_delete=(".DS_Store", "__MACOSX")): """ Deletes all ".DS_store" files under a specified directory. Args: path (str, optional): The directory path where the ".DS_store" files should be deleted. files_to_delete (tuple): The files to be deleted. Example: ```python from ultralytics.utils.downloads import delete_dsstore delete_dsstore('path/to/dir') ``` Note: ".DS_store" files are created by the Apple operating system and contain metadata about folders and files. They are hidden system files and can cause issues when transferring files between different operating systems. """ for file in files_to_delete: matches = list(Path(path).rglob(file)) LOGGER.info(f"Deleting {file} files: {matches}") for f in matches: f.unlink() def zip_directory(directory, compress=True, exclude=(".DS_Store", "__MACOSX"), progress=True): """ Zips the contents of a directory, excluding files containing strings in the exclude list. The resulting zip file is named after the directory and placed alongside it. Args: directory (str | Path): The path to the directory to be zipped. compress (bool): Whether to compress the files while zipping. Default is True. exclude (tuple, optional): A tuple of filename strings to be excluded. Defaults to ('.DS_Store', '__MACOSX'). progress (bool, optional): Whether to display a progress bar. Defaults to True. Returns: (Path): The path to the resulting zip file. Example: ```python from ultralytics.utils.downloads import zip_directory file = zip_directory('path/to/dir') ``` """ from zipfile import ZIP_DEFLATED, ZIP_STORED, ZipFile delete_dsstore(directory) directory = Path(directory) if not directory.is_dir(): raise FileNotFoundError(f"Directory '{directory}' does not exist.") # Unzip with progress bar files_to_zip = [f for f in directory.rglob("*") if f.is_file() and all(x not in f.name for x in exclude)] zip_file = directory.with_suffix(".zip") compression = ZIP_DEFLATED if compress else ZIP_STORED with ZipFile(zip_file, "w", compression) as f: for file in TQDM(files_to_zip, desc=f"Zipping {directory} to {zip_file}...", unit="file", disable=not progress): f.write(file, file.relative_to(directory)) return zip_file # return path to zip file def unzip_file(file, path=None, exclude=(".DS_Store", "__MACOSX"), exist_ok=False, progress=True): """ Unzips a *.zip file to the specified path, excluding files containing strings in the exclude list. If the zipfile does not contain a single top-level directory, the function will create a new directory with the same name as the zipfile (without the extension) to extract its contents. If a path is not provided, the function will use the parent directory of the zipfile as the default path. Args: file (str): The path to the zipfile to be extracted. path (str, optional): The path to extract the zipfile to. Defaults to None. exclude (tuple, optional): A tuple of filename strings to be excluded. Defaults to ('.DS_Store', '__MACOSX'). exist_ok (bool, optional): Whether to overwrite existing contents if they exist. Defaults to False. progress (bool, optional): Whether to display a progress bar. Defaults to True. Raises: BadZipFile: If the provided file does not exist or is not a valid zipfile. Returns: (Path): The path to the directory where the zipfile was extracted. Example: ```python from ultralytics.utils.downloads import unzip_file dir = unzip_file('path/to/file.zip') ``` """ from zipfile import BadZipFile, ZipFile, is_zipfile if not (Path(file).exists() and is_zipfile(file)): raise BadZipFile(f"File '{file}' does not exist or is a bad zip file.") if path is None: path = Path(file).parent # default path # Unzip the file contents with ZipFile(file) as zipObj: files = [f for f in zipObj.namelist() if all(x not in f for x in exclude)] top_level_dirs = {Path(f).parts[0] for f in files} if len(top_level_dirs) > 1 or (len(files) > 1 and not files[0].endswith("/")): # Zip has multiple files at top level path = extract_path = Path(path) / Path(file).stem # i.e. ../datasets/coco8 else: # Zip has 1 top-level directory extract_path = path # i.e. ../datasets path = Path(path) / list(top_level_dirs)[0] # i.e. ../datasets/coco8 # Check if destination directory already exists and contains files if path.exists() and any(path.iterdir()) and not exist_ok: # If it exists and is not empty, return the path without unzipping LOGGER.warning(f"WARNING ⚠️ Skipping {file} unzip as destination directory {path} is not empty.") return path for f in TQDM(files, desc=f"Unzipping {file} to {Path(path).resolve()}...", unit="file", disable=not progress): # Ensure the file is within the extract_path to avoid path traversal security vulnerability if ".." in Path(f).parts: LOGGER.warning(f"Potentially insecure file path: {f}, skipping extraction.") continue zipObj.extract(f, extract_path) return path # return unzip dir def check_disk_space(url="https://ultralytics.com/assets/coco128.zip", sf=1.5, hard=True): """ Check if there is sufficient disk space to download and store a file. Args: url (str, optional): The URL to the file. Defaults to 'https://ultralytics.com/assets/coco128.zip'. sf (float, optional): Safety factor, the multiplier for the required free space. Defaults to 2.0. hard (bool, optional): Whether to throw an error or not on insufficient disk space. Defaults to True. Returns: (bool): True if there is sufficient disk space, False otherwise. """ try: r = requests.head(url) # response assert r.status_code < 400, f"URL error for {url}: {r.status_code} {r.reason}" # check response except Exception: return True # requests issue, default to True # Check file size gib = 1 << 30 # bytes per GiB data = int(r.headers.get("Content-Length", 0)) / gib # file size (GB) total, used, free = (x / gib for x in shutil.disk_usage(Path.cwd())) # bytes if data * sf < free: return True # sufficient space # Insufficient space text = ( f"WARNING ⚠️ Insufficient free disk space {free:.1f} GB < {data * sf:.3f} GB required, " f"Please free {data * sf - free:.1f} GB additional disk space and try again." ) if hard: raise MemoryError(text) LOGGER.warning(text) return False def get_google_drive_file_info(link): """ Retrieves the direct download link and filename for a shareable Google Drive file link. Args: link (str): The shareable link of the Google Drive file. Returns: (str): Direct download URL for the Google Drive file. (str): Original filename of the Google Drive file. If filename extraction fails, returns None. Example: ```python from ultralytics.utils.downloads import get_google_drive_file_info link = "https://drive.google.com/file/d/1cqT-cJgANNrhIHCrEufUYhQ4RqiWG_lJ/view?usp=drive_link" url, filename = get_google_drive_file_info(link) ``` """ file_id = link.split("/d/")[1].split("/view")[0] drive_url = f"https://drive.google.com/uc?export=download&id={file_id}" filename = None # Start session with requests.Session() as session: response = session.get(drive_url, stream=True) if "quota exceeded" in str(response.content.lower()): raise ConnectionError( emojis( f"❌ Google Drive file download quota exceeded. " f"Please try again later or download this file manually at {link}." ) ) for k, v in response.cookies.items(): if k.startswith("download_warning"): drive_url += f"&confirm={v}" # v is token cd = response.headers.get("content-disposition") if cd: filename = re.findall('filename="(.+)"', cd)[0] return drive_url, filename def safe_download( url, file=None, dir=None, unzip=True, delete=False, curl=False, retry=3, min_bytes=1e0, exist_ok=False, progress=True, ): """ Downloads files from a URL, with options for retrying, unzipping, and deleting the downloaded file. Args: url (str): The URL of the file to be downloaded. file (str, optional): The filename of the downloaded file. If not provided, the file will be saved with the same name as the URL. dir (str, optional): The directory to save the downloaded file. If not provided, the file will be saved in the current working directory. unzip (bool, optional): Whether to unzip the downloaded file. Default: True. delete (bool, optional): Whether to delete the downloaded file after unzipping. Default: False. curl (bool, optional): Whether to use curl command line tool for downloading. Default: False. retry (int, optional): The number of times to retry the download in case of failure. Default: 3. min_bytes (float, optional): The minimum number of bytes that the downloaded file should have, to be considered a successful download. Default: 1E0. exist_ok (bool, optional): Whether to overwrite existing contents during unzipping. Defaults to False. progress (bool, optional): Whether to display a progress bar during the download. Default: True. Example: ```python from ultralytics.utils.downloads import safe_download link = "https://ultralytics.com/assets/bus.jpg" path = safe_download(link) ``` """ gdrive = url.startswith("https://drive.google.com/") # check if the URL is a Google Drive link if gdrive: url, file = get_google_drive_file_info(url) f = Path(dir or ".") / (file or url2file(url)) # URL converted to filename if "://" not in str(url) and Path(url).is_file(): # URL exists ('://' check required in Windows Python<3.10) f = Path(url) # filename elif not f.is_file(): # URL and file do not exist desc = f"Downloading {url if gdrive else clean_url(url)} to '{f}'" LOGGER.info(f"{desc}...") f.parent.mkdir(parents=True, exist_ok=True) # make directory if missing check_disk_space(url) for i in range(retry + 1): try: if curl or i > 0: # curl download with retry, continue s = "sS" * (not progress) # silent r = subprocess.run(["curl", "-#", f"-{s}L", url, "-o", f, "--retry", "3", "-C", "-"]).returncode assert r == 0, f"Curl return value {r}" else: # urllib download method = "torch" if method == "torch": torch.hub.download_url_to_file(url, f, progress=progress) else: with request.urlopen(url) as response, TQDM( total=int(response.getheader("Content-Length", 0)), desc=desc, disable=not progress, unit="B", unit_scale=True, unit_divisor=1024, ) as pbar: with open(f, "wb") as f_opened: for data in response: f_opened.write(data) pbar.update(len(data)) if f.exists(): if f.stat().st_size > min_bytes: break # success f.unlink() # remove partial downloads except Exception as e: if i == 0 and not is_online(): raise ConnectionError(emojis(f"❌ Download failure for {url}. Environment is not online.")) from e elif i >= retry: raise ConnectionError(emojis(f"❌ Download failure for {url}. Retry limit reached.")) from e LOGGER.warning(f"⚠️ Download failure, retrying {i + 1}/{retry} {url}...") if unzip and f.exists() and f.suffix in ("", ".zip", ".tar", ".gz"): from zipfile import is_zipfile unzip_dir = (dir or f.parent).resolve() # unzip to dir if provided else unzip in place if is_zipfile(f): unzip_dir = unzip_file(file=f, path=unzip_dir, exist_ok=exist_ok, progress=progress) # unzip elif f.suffix in (".tar", ".gz"): LOGGER.info(f"Unzipping {f} to {unzip_dir}...") subprocess.run(["tar", "xf" if f.suffix == ".tar" else "xfz", f, "--directory", unzip_dir], check=True) if delete: f.unlink() # remove zip return unzip_dir def get_github_assets(repo="ultralytics/assets", version="latest", retry=False): """ Retrieve the specified version's tag and assets from a GitHub repository. If the version is not specified, the function fetches the latest release assets. Args: repo (str, optional): The GitHub repository in the format 'owner/repo'. Defaults to 'ultralytics/assets'. version (str, optional): The release version to fetch assets from. Defaults to 'latest'. retry (bool, optional): Flag to retry the request in case of a failure. Defaults to False. Returns: (tuple): A tuple containing the release tag and a list of asset names. Example: ```python tag, assets = get_github_assets(repo='ultralytics/assets', version='latest') ``` """ if version != "latest": version = f"tags/{version}" # i.e. tags/v6.2 url = f"https://api.github.com/repos/{repo}/releases/{version}" r = requests.get(url) # github api if r.status_code != 200 and r.reason != "rate limit exceeded" and retry: # failed and not 403 rate limit exceeded r = requests.get(url) # try again if r.status_code != 200: LOGGER.warning(f"⚠️ GitHub assets check failure for {url}: {r.status_code} {r.reason}") return "", [] data = r.json() return data["tag_name"], [x["name"] for x in data["assets"]] # tag, assets i.e. ['yolov8n.pt', 'yolov8s.pt', ...] def attempt_download_asset(file, repo="ultralytics/assets", release="v8.1.0", **kwargs): """ Attempt to download a file from GitHub release assets if it is not found locally. The function checks for the file locally first, then tries to download it from the specified GitHub repository release. Args: file (str | Path): The filename or file path to be downloaded. repo (str, optional): The GitHub repository in the format 'owner/repo'. Defaults to 'ultralytics/assets'. release (str, optional): The specific release version to be downloaded. Defaults to 'v8.1.0'. **kwargs (dict): Additional keyword arguments for the download process. Returns: (str): The path to the downloaded file. Example: ```python file_path = attempt_download_asset('yolov5s.pt', repo='ultralytics/assets', release='latest') ``` """ from ultralytics.utils import SETTINGS # scoped for circular import # YOLOv3/5u updates file = str(file) file = checks.check_yolov5u_filename(file) file = Path(file.strip().replace("'", "")) if file.exists(): return str(file) elif (SETTINGS["weights_dir"] / file).exists(): return str(SETTINGS["weights_dir"] / file) else: # URL specified name = Path(parse.unquote(str(file))).name # decode '%2F' to '/' etc. download_url = f"https://github.com/{repo}/releases/download" if str(file).startswith(("http:/", "https:/")): # download url = str(file).replace(":/", "://") # Pathlib turns :// -> :/ file = url2file(name) # parse authentication https://url.com/file.txt?auth... if Path(file).is_file(): LOGGER.info(f"Found {clean_url(url)} locally at {file}") # file already exists else: safe_download(url=url, file=file, min_bytes=1e5, **kwargs) elif repo == GITHUB_ASSETS_REPO and name in GITHUB_ASSETS_NAMES: safe_download(url=f"{download_url}/{release}/{name}", file=file, min_bytes=1e5, **kwargs) else: tag, assets = get_github_assets(repo, release) if not assets: tag, assets = get_github_assets(repo) # latest release if name in assets: safe_download(url=f"{download_url}/{tag}/{name}", file=file, min_bytes=1e5, **kwargs) return str(file) def download(url, dir=Path.cwd(), unzip=True, delete=False, curl=False, threads=1, retry=3, exist_ok=False): """ Downloads files from specified URLs to a given directory. Supports concurrent downloads if multiple threads are specified. Args: url (str | list): The URL or list of URLs of the files to be downloaded. dir (Path, optional): The directory where the files will be saved. Defaults to the current working directory. unzip (bool, optional): Flag to unzip the files after downloading. Defaults to True. delete (bool, optional): Flag to delete the zip files after extraction. Defaults to False. curl (bool, optional): Flag to use curl for downloading. Defaults to False. threads (int, optional): Number of threads to use for concurrent downloads. Defaults to 1. retry (int, optional): Number of retries in case of download failure. Defaults to 3. exist_ok (bool, optional): Whether to overwrite existing contents during unzipping. Defaults to False. Example: ```python download('https://ultralytics.com/assets/example.zip', dir='path/to/dir', unzip=True) ``` """ dir = Path(dir) dir.mkdir(parents=True, exist_ok=True) # make directory if threads > 1: with ThreadPool(threads) as pool: pool.map( lambda x: safe_download( url=x[0], dir=x[1], unzip=unzip, delete=delete, curl=curl, retry=retry, exist_ok=exist_ok, progress=threads <= 1, ), zip(url, repeat(dir)), ) pool.close() pool.join() else: for u in [url] if isinstance(url, (str, Path)) else url: safe_download(url=u, dir=dir, unzip=unzip, delete=delete, curl=curl, retry=retry, exist_ok=exist_ok)
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/downloads.py
Python
unknown
21,189
# Ultralytics YOLO 🚀, AGPL-3.0 license from ultralytics.utils import emojis class HUBModelError(Exception): """ Custom exception class for handling errors related to model fetching in Ultralytics YOLO. This exception is raised when a requested model is not found or cannot be retrieved. The message is also processed to include emojis for better user experience. Attributes: message (str): The error message displayed when the exception is raised. Note: The message is automatically processed through the 'emojis' function from the 'ultralytics.utils' package. """ def __init__(self, message="Model not found. Please check model URL and try again."): """Create an exception for when a model is not found.""" super().__init__(emojis(message))
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/errors.py
Python
unknown
816
# Ultralytics YOLO 🚀, AGPL-3.0 license import contextlib import glob import os import shutil import tempfile from contextlib import contextmanager from datetime import datetime from pathlib import Path class WorkingDirectory(contextlib.ContextDecorator): """Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager.""" def __init__(self, new_dir): """Sets the working directory to 'new_dir' upon instantiation.""" self.dir = new_dir # new dir self.cwd = Path.cwd().resolve() # current dir def __enter__(self): """Changes the current directory to the specified directory.""" os.chdir(self.dir) def __exit__(self, exc_type, exc_val, exc_tb): # noqa """Restore the current working directory on context exit.""" os.chdir(self.cwd) @contextmanager def spaces_in_path(path): """ Context manager to handle paths with spaces in their names. If a path contains spaces, it replaces them with underscores, copies the file/directory to the new path, executes the context code block, then copies the file/directory back to its original location. Args: path (str | Path): The original path. Yields: (Path): Temporary path with spaces replaced by underscores if spaces were present, otherwise the original path. Example: ```python with ultralytics.utils.files import spaces_in_path with spaces_in_path('/path/with spaces') as new_path: # Your code here ``` """ # If path has spaces, replace them with underscores if " " in str(path): string = isinstance(path, str) # input type path = Path(path) # Create a temporary directory and construct the new path with tempfile.TemporaryDirectory() as tmp_dir: tmp_path = Path(tmp_dir) / path.name.replace(" ", "_") # Copy file/directory if path.is_dir(): # tmp_path.mkdir(parents=True, exist_ok=True) shutil.copytree(path, tmp_path) elif path.is_file(): tmp_path.parent.mkdir(parents=True, exist_ok=True) shutil.copy2(path, tmp_path) try: # Yield the temporary path yield str(tmp_path) if string else tmp_path finally: # Copy file/directory back if tmp_path.is_dir(): shutil.copytree(tmp_path, path, dirs_exist_ok=True) elif tmp_path.is_file(): shutil.copy2(tmp_path, path) # Copy back the file else: # If there are no spaces, just yield the original path yield path def increment_path(path, exist_ok=False, sep="", mkdir=False): """ Increments a file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. If the path exists and exist_ok is not set to True, the path will be incremented by appending a number and sep to the end of the path. If the path is a file, the file extension will be preserved. If the path is a directory, the number will be appended directly to the end of the path. If mkdir is set to True, the path will be created as a directory if it does not already exist. Args: path (str, pathlib.Path): Path to increment. exist_ok (bool, optional): If True, the path will not be incremented and returned as-is. Defaults to False. sep (str, optional): Separator to use between the path and the incrementation number. Defaults to ''. mkdir (bool, optional): Create a directory if it does not exist. Defaults to False. Returns: (pathlib.Path): Incremented path. """ path = Path(path) # os-agnostic if path.exists() and not exist_ok: path, suffix = (path.with_suffix(""), path.suffix) if path.is_file() else (path, "") # Method 1 for n in range(2, 9999): p = f"{path}{sep}{n}{suffix}" # increment path if not os.path.exists(p): break path = Path(p) if mkdir: path.mkdir(parents=True, exist_ok=True) # make directory return path def file_age(path=__file__): """Return days since last file update.""" dt = datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime) # delta return dt.days # + dt.seconds / 86400 # fractional days def file_date(path=__file__): """Return human-readable file modification date, i.e. '2021-3-26'.""" t = datetime.fromtimestamp(Path(path).stat().st_mtime) return f"{t.year}-{t.month}-{t.day}" def file_size(path): """Return file/dir size (MB).""" if isinstance(path, (str, Path)): mb = 1 << 20 # bytes to MiB (1024 ** 2) path = Path(path) if path.is_file(): return path.stat().st_size / mb elif path.is_dir(): return sum(f.stat().st_size for f in path.glob("**/*") if f.is_file()) / mb return 0.0 def get_latest_run(search_dir="."): """Return path to most recent 'last.pt' in /runs (i.e. to --resume from).""" last_list = glob.glob(f"{search_dir}/**/last*.pt", recursive=True) return max(last_list, key=os.path.getctime) if last_list else ""
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/files.py
Python
unknown
5,275
# Ultralytics YOLO 🚀, AGPL-3.0 license from collections import abc from itertools import repeat from numbers import Number from typing import List import numpy as np from .ops import ltwh2xywh, ltwh2xyxy, xywh2ltwh, xywh2xyxy, xyxy2ltwh, xyxy2xywh def _ntuple(n): """From PyTorch internals.""" def parse(x): """Parse bounding boxes format between XYWH and LTWH.""" return x if isinstance(x, abc.Iterable) else tuple(repeat(x, n)) return parse to_2tuple = _ntuple(2) to_4tuple = _ntuple(4) # `xyxy` means left top and right bottom # `xywh` means center x, center y and width, height(YOLO format) # `ltwh` means left top and width, height(COCO format) _formats = ["xyxy", "xywh", "ltwh"] __all__ = ("Bboxes",) # tuple or list class Bboxes: """ A class for handling bounding boxes. The class supports various bounding box formats like 'xyxy', 'xywh', and 'ltwh'. Bounding box data should be provided in numpy arrays. Attributes: bboxes (numpy.ndarray): The bounding boxes stored in a 2D numpy array. format (str): The format of the bounding boxes ('xyxy', 'xywh', or 'ltwh'). Note: This class does not handle normalization or denormalization of bounding boxes. """ def __init__(self, bboxes, format="xyxy") -> None: """Initializes the Bboxes class with bounding box data in a specified format.""" assert format in _formats, f"Invalid bounding box format: {format}, format must be one of {_formats}" bboxes = bboxes[None, :] if bboxes.ndim == 1 else bboxes assert bboxes.ndim == 2 assert bboxes.shape[1] == 4 self.bboxes = bboxes self.format = format # self.normalized = normalized def convert(self, format): """Converts bounding box format from one type to another.""" assert format in _formats, f"Invalid bounding box format: {format}, format must be one of {_formats}" if self.format == format: return elif self.format == "xyxy": func = xyxy2xywh if format == "xywh" else xyxy2ltwh elif self.format == "xywh": func = xywh2xyxy if format == "xyxy" else xywh2ltwh else: func = ltwh2xyxy if format == "xyxy" else ltwh2xywh self.bboxes = func(self.bboxes) self.format = format def areas(self): """Return box areas.""" self.convert("xyxy") return (self.bboxes[:, 2] - self.bboxes[:, 0]) * (self.bboxes[:, 3] - self.bboxes[:, 1]) # def denormalize(self, w, h): # if not self.normalized: # return # assert (self.bboxes <= 1.0).all() # self.bboxes[:, 0::2] *= w # self.bboxes[:, 1::2] *= h # self.normalized = False # # def normalize(self, w, h): # if self.normalized: # return # assert (self.bboxes > 1.0).any() # self.bboxes[:, 0::2] /= w # self.bboxes[:, 1::2] /= h # self.normalized = True def mul(self, scale): """ Args: scale (tuple | list | int): the scale for four coords. """ if isinstance(scale, Number): scale = to_4tuple(scale) assert isinstance(scale, (tuple, list)) assert len(scale) == 4 self.bboxes[:, 0] *= scale[0] self.bboxes[:, 1] *= scale[1] self.bboxes[:, 2] *= scale[2] self.bboxes[:, 3] *= scale[3] def add(self, offset): """ Args: offset (tuple | list | int): the offset for four coords. """ if isinstance(offset, Number): offset = to_4tuple(offset) assert isinstance(offset, (tuple, list)) assert len(offset) == 4 self.bboxes[:, 0] += offset[0] self.bboxes[:, 1] += offset[1] self.bboxes[:, 2] += offset[2] self.bboxes[:, 3] += offset[3] def __len__(self): """Return the number of boxes.""" return len(self.bboxes) @classmethod def concatenate(cls, boxes_list: List["Bboxes"], axis=0) -> "Bboxes": """ Concatenate a list of Bboxes objects into a single Bboxes object. Args: boxes_list (List[Bboxes]): A list of Bboxes objects to concatenate. axis (int, optional): The axis along which to concatenate the bounding boxes. Defaults to 0. Returns: Bboxes: A new Bboxes object containing the concatenated bounding boxes. Note: The input should be a list or tuple of Bboxes objects. """ assert isinstance(boxes_list, (list, tuple)) if not boxes_list: return cls(np.empty(0)) assert all(isinstance(box, Bboxes) for box in boxes_list) if len(boxes_list) == 1: return boxes_list[0] return cls(np.concatenate([b.bboxes for b in boxes_list], axis=axis)) def __getitem__(self, index) -> "Bboxes": """ Retrieve a specific bounding box or a set of bounding boxes using indexing. Args: index (int, slice, or np.ndarray): The index, slice, or boolean array to select the desired bounding boxes. Returns: Bboxes: A new Bboxes object containing the selected bounding boxes. Raises: AssertionError: If the indexed bounding boxes do not form a 2-dimensional matrix. Note: When using boolean indexing, make sure to provide a boolean array with the same length as the number of bounding boxes. """ if isinstance(index, int): return Bboxes(self.bboxes[index].view(1, -1)) b = self.bboxes[index] assert b.ndim == 2, f"Indexing on Bboxes with {index} failed to return a matrix!" return Bboxes(b) class Instances: """ Container for bounding boxes, segments, and keypoints of detected objects in an image. Attributes: _bboxes (Bboxes): Internal object for handling bounding box operations. keypoints (ndarray): keypoints(x, y, visible) with shape [N, 17, 3]. Default is None. normalized (bool): Flag indicating whether the bounding box coordinates are normalized. segments (ndarray): Segments array with shape [N, 1000, 2] after resampling. Args: bboxes (ndarray): An array of bounding boxes with shape [N, 4]. segments (list | ndarray, optional): A list or array of object segments. Default is None. keypoints (ndarray, optional): An array of keypoints with shape [N, 17, 3]. Default is None. bbox_format (str, optional): The format of bounding boxes ('xywh' or 'xyxy'). Default is 'xywh'. normalized (bool, optional): Whether the bounding box coordinates are normalized. Default is True. Examples: ```python # Create an Instances object instances = Instances( bboxes=np.array([[10, 10, 30, 30], [20, 20, 40, 40]]), segments=[np.array([[5, 5], [10, 10]]), np.array([[15, 15], [20, 20]])], keypoints=np.array([[[5, 5, 1], [10, 10, 1]], [[15, 15, 1], [20, 20, 1]]]) ) ``` Note: The bounding box format is either 'xywh' or 'xyxy', and is determined by the `bbox_format` argument. This class does not perform input validation, and it assumes the inputs are well-formed. """ def __init__(self, bboxes, segments=None, keypoints=None, bbox_format="xywh", normalized=True) -> None: """ Args: bboxes (ndarray): bboxes with shape [N, 4]. segments (list | ndarray): segments. keypoints (ndarray): keypoints(x, y, visible) with shape [N, 17, 3]. """ self._bboxes = Bboxes(bboxes=bboxes, format=bbox_format) self.keypoints = keypoints self.normalized = normalized self.segments = segments def convert_bbox(self, format): """Convert bounding box format.""" self._bboxes.convert(format=format) @property def bbox_areas(self): """Calculate the area of bounding boxes.""" return self._bboxes.areas() def scale(self, scale_w, scale_h, bbox_only=False): """This might be similar with denormalize func but without normalized sign.""" self._bboxes.mul(scale=(scale_w, scale_h, scale_w, scale_h)) if bbox_only: return self.segments[..., 0] *= scale_w self.segments[..., 1] *= scale_h if self.keypoints is not None: self.keypoints[..., 0] *= scale_w self.keypoints[..., 1] *= scale_h def denormalize(self, w, h): """Denormalizes boxes, segments, and keypoints from normalized coordinates.""" if not self.normalized: return self._bboxes.mul(scale=(w, h, w, h)) self.segments[..., 0] *= w self.segments[..., 1] *= h if self.keypoints is not None: self.keypoints[..., 0] *= w self.keypoints[..., 1] *= h self.normalized = False def normalize(self, w, h): """Normalize bounding boxes, segments, and keypoints to image dimensions.""" if self.normalized: return self._bboxes.mul(scale=(1 / w, 1 / h, 1 / w, 1 / h)) self.segments[..., 0] /= w self.segments[..., 1] /= h if self.keypoints is not None: self.keypoints[..., 0] /= w self.keypoints[..., 1] /= h self.normalized = True def add_padding(self, padw, padh): """Handle rect and mosaic situation.""" assert not self.normalized, "you should add padding with absolute coordinates." self._bboxes.add(offset=(padw, padh, padw, padh)) self.segments[..., 0] += padw self.segments[..., 1] += padh if self.keypoints is not None: self.keypoints[..., 0] += padw self.keypoints[..., 1] += padh def __getitem__(self, index) -> "Instances": """ Retrieve a specific instance or a set of instances using indexing. Args: index (int, slice, or np.ndarray): The index, slice, or boolean array to select the desired instances. Returns: Instances: A new Instances object containing the selected bounding boxes, segments, and keypoints if present. Note: When using boolean indexing, make sure to provide a boolean array with the same length as the number of instances. """ segments = self.segments[index] if len(self.segments) else self.segments keypoints = self.keypoints[index] if self.keypoints is not None else None bboxes = self.bboxes[index] bbox_format = self._bboxes.format return Instances( bboxes=bboxes, segments=segments, keypoints=keypoints, bbox_format=bbox_format, normalized=self.normalized, ) def flipud(self, h): """Flips the coordinates of bounding boxes, segments, and keypoints vertically.""" if self._bboxes.format == "xyxy": y1 = self.bboxes[:, 1].copy() y2 = self.bboxes[:, 3].copy() self.bboxes[:, 1] = h - y2 self.bboxes[:, 3] = h - y1 else: self.bboxes[:, 1] = h - self.bboxes[:, 1] self.segments[..., 1] = h - self.segments[..., 1] if self.keypoints is not None: self.keypoints[..., 1] = h - self.keypoints[..., 1] def fliplr(self, w): """Reverses the order of the bounding boxes and segments horizontally.""" if self._bboxes.format == "xyxy": x1 = self.bboxes[:, 0].copy() x2 = self.bboxes[:, 2].copy() self.bboxes[:, 0] = w - x2 self.bboxes[:, 2] = w - x1 else: self.bboxes[:, 0] = w - self.bboxes[:, 0] self.segments[..., 0] = w - self.segments[..., 0] if self.keypoints is not None: self.keypoints[..., 0] = w - self.keypoints[..., 0] def clip(self, w, h): """Clips bounding boxes, segments, and keypoints values to stay within image boundaries.""" ori_format = self._bboxes.format self.convert_bbox(format="xyxy") self.bboxes[:, [0, 2]] = self.bboxes[:, [0, 2]].clip(0, w) self.bboxes[:, [1, 3]] = self.bboxes[:, [1, 3]].clip(0, h) if ori_format != "xyxy": self.convert_bbox(format=ori_format) self.segments[..., 0] = self.segments[..., 0].clip(0, w) self.segments[..., 1] = self.segments[..., 1].clip(0, h) if self.keypoints is not None: self.keypoints[..., 0] = self.keypoints[..., 0].clip(0, w) self.keypoints[..., 1] = self.keypoints[..., 1].clip(0, h) def remove_zero_area_boxes(self): """ Remove zero-area boxes, i.e. after clipping some boxes may have zero width or height. This removes them. """ good = self.bbox_areas > 0 if not all(good): self._bboxes = self._bboxes[good] if len(self.segments): self.segments = self.segments[good] if self.keypoints is not None: self.keypoints = self.keypoints[good] return good def update(self, bboxes, segments=None, keypoints=None): """Updates instance variables.""" self._bboxes = Bboxes(bboxes, format=self._bboxes.format) if segments is not None: self.segments = segments if keypoints is not None: self.keypoints = keypoints def __len__(self): """Return the length of the instance list.""" return len(self.bboxes) @classmethod def concatenate(cls, instances_list: List["Instances"], axis=0) -> "Instances": """ Concatenates a list of Instances objects into a single Instances object. Args: instances_list (List[Instances]): A list of Instances objects to concatenate. axis (int, optional): The axis along which the arrays will be concatenated. Defaults to 0. Returns: Instances: A new Instances object containing the concatenated bounding boxes, segments, and keypoints if present. Note: The `Instances` objects in the list should have the same properties, such as the format of the bounding boxes, whether keypoints are present, and if the coordinates are normalized. """ assert isinstance(instances_list, (list, tuple)) if not instances_list: return cls(np.empty(0)) assert all(isinstance(instance, Instances) for instance in instances_list) if len(instances_list) == 1: return instances_list[0] use_keypoint = instances_list[0].keypoints is not None bbox_format = instances_list[0]._bboxes.format normalized = instances_list[0].normalized cat_boxes = np.concatenate([ins.bboxes for ins in instances_list], axis=axis) cat_segments = np.concatenate([b.segments for b in instances_list], axis=axis) cat_keypoints = np.concatenate([b.keypoints for b in instances_list], axis=axis) if use_keypoint else None return cls(cat_boxes, cat_segments, cat_keypoints, bbox_format, normalized) @property def bboxes(self): """Return bounding boxes.""" return self._bboxes.bboxes
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/instance.py
Python
unknown
15,575
# Ultralytics YOLO 🚀, AGPL-3.0 license import torch import torch.nn as nn import torch.nn.functional as F from ultralytics.utils.metrics import OKS_SIGMA from ultralytics.utils.ops import crop_mask, xywh2xyxy, xyxy2xywh from ultralytics.utils.tal import RotatedTaskAlignedAssigner, TaskAlignedAssigner, dist2bbox, dist2rbox, make_anchors from .metrics import bbox_iou, probiou from .tal import bbox2dist class VarifocalLoss(nn.Module): """ Varifocal loss by Zhang et al. https://arxiv.org/abs/2008.13367. """ def __init__(self): """Initialize the VarifocalLoss class.""" super().__init__() @staticmethod def forward(pred_score, gt_score, label, alpha=0.75, gamma=2.0): """Computes varfocal loss.""" weight = alpha * pred_score.sigmoid().pow(gamma) * (1 - label) + gt_score * label with torch.cuda.amp.autocast(enabled=False): loss = ( (F.binary_cross_entropy_with_logits(pred_score.float(), gt_score.float(), reduction="none") * weight) .mean(1) .sum() ) return loss class FocalLoss(nn.Module): """Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5).""" def __init__(self): """Initializer for FocalLoss class with no parameters.""" super().__init__() @staticmethod def forward(pred, label, gamma=1.5, alpha=0.25): """Calculates and updates confusion matrix for object detection/classification tasks.""" loss = F.binary_cross_entropy_with_logits(pred, label, reduction="none") # p_t = torch.exp(-loss) # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py pred_prob = pred.sigmoid() # prob from logits p_t = label * pred_prob + (1 - label) * (1 - pred_prob) modulating_factor = (1.0 - p_t) ** gamma loss *= modulating_factor if alpha > 0: alpha_factor = label * alpha + (1 - label) * (1 - alpha) loss *= alpha_factor return loss.mean(1).sum() class BboxLoss(nn.Module): """Criterion class for computing training losses during training.""" def __init__(self, reg_max, use_dfl=False): """Initialize the BboxLoss module with regularization maximum and DFL settings.""" super().__init__() self.reg_max = reg_max self.use_dfl = use_dfl def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask): """IoU loss.""" weight = target_scores.sum(-1)[fg_mask].unsqueeze(-1) iou = bbox_iou(pred_bboxes[fg_mask], target_bboxes[fg_mask], xywh=False, CIoU=True) loss_iou = ((1.0 - iou) * weight).sum() / target_scores_sum # DFL loss if self.use_dfl: target_ltrb = bbox2dist(anchor_points, target_bboxes, self.reg_max) loss_dfl = self._df_loss(pred_dist[fg_mask].view(-1, self.reg_max + 1), target_ltrb[fg_mask]) * weight loss_dfl = loss_dfl.sum() / target_scores_sum else: loss_dfl = torch.tensor(0.0).to(pred_dist.device) return loss_iou, loss_dfl @staticmethod def _df_loss(pred_dist, target): """ Return sum of left and right DFL losses. Distribution Focal Loss (DFL) proposed in Generalized Focal Loss https://ieeexplore.ieee.org/document/9792391 """ tl = target.long() # target left tr = tl + 1 # target right wl = tr - target # weight left wr = 1 - wl # weight right return ( F.cross_entropy(pred_dist, tl.view(-1), reduction="none").view(tl.shape) * wl + F.cross_entropy(pred_dist, tr.view(-1), reduction="none").view(tl.shape) * wr ).mean(-1, keepdim=True) class RotatedBboxLoss(BboxLoss): """Criterion class for computing training losses during training.""" def __init__(self, reg_max, use_dfl=False): """Initialize the BboxLoss module with regularization maximum and DFL settings.""" super().__init__(reg_max, use_dfl) def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask): """IoU loss.""" weight = target_scores.sum(-1)[fg_mask].unsqueeze(-1) iou = probiou(pred_bboxes[fg_mask], target_bboxes[fg_mask]) loss_iou = ((1.0 - iou) * weight).sum() / target_scores_sum # DFL loss if self.use_dfl: target_ltrb = bbox2dist(anchor_points, xywh2xyxy(target_bboxes[..., :4]), self.reg_max) loss_dfl = self._df_loss(pred_dist[fg_mask].view(-1, self.reg_max + 1), target_ltrb[fg_mask]) * weight loss_dfl = loss_dfl.sum() / target_scores_sum else: loss_dfl = torch.tensor(0.0).to(pred_dist.device) return loss_iou, loss_dfl class KeypointLoss(nn.Module): """Criterion class for computing training losses.""" def __init__(self, sigmas) -> None: """Initialize the KeypointLoss class.""" super().__init__() self.sigmas = sigmas def forward(self, pred_kpts, gt_kpts, kpt_mask, area): """Calculates keypoint loss factor and Euclidean distance loss for predicted and actual keypoints.""" d = (pred_kpts[..., 0] - gt_kpts[..., 0]) ** 2 + (pred_kpts[..., 1] - gt_kpts[..., 1]) ** 2 kpt_loss_factor = kpt_mask.shape[1] / (torch.sum(kpt_mask != 0, dim=1) + 1e-9) # e = d / (2 * (area * self.sigmas) ** 2 + 1e-9) # from formula e = d / (2 * self.sigmas) ** 2 / (area + 1e-9) / 2 # from cocoeval return (kpt_loss_factor.view(-1, 1) * ((1 - torch.exp(-e)) * kpt_mask)).mean() class v8DetectionLoss: """Criterion class for computing training losses.""" def __init__(self, model): # model must be de-paralleled """Initializes v8DetectionLoss with the model, defining model-related properties and BCE loss function.""" device = next(model.parameters()).device # get model device h = model.args # hyperparameters m = model.model[-1] # Detect() module self.bce = nn.BCEWithLogitsLoss(reduction="none") self.hyp = h self.stride = m.stride # model strides self.nc = m.nc # number of classes self.no = m.no self.reg_max = m.reg_max self.device = device self.use_dfl = m.reg_max > 1 self.assigner = TaskAlignedAssigner(topk=10, num_classes=self.nc, alpha=0.5, beta=6.0) self.bbox_loss = BboxLoss(m.reg_max - 1, use_dfl=self.use_dfl).to(device) self.proj = torch.arange(m.reg_max, dtype=torch.float, device=device) def preprocess(self, targets, batch_size, scale_tensor): """Preprocesses the target counts and matches with the input batch size to output a tensor.""" if targets.shape[0] == 0: out = torch.zeros(batch_size, 0, 5, device=self.device) else: i = targets[:, 0] # image index _, counts = i.unique(return_counts=True) counts = counts.to(dtype=torch.int32) out = torch.zeros(batch_size, counts.max(), 5, device=self.device) for j in range(batch_size): matches = i == j n = matches.sum() if n: out[j, :n] = targets[matches, 1:] out[..., 1:5] = xywh2xyxy(out[..., 1:5].mul_(scale_tensor)) return out def bbox_decode(self, anchor_points, pred_dist): """Decode predicted object bounding box coordinates from anchor points and distribution.""" if self.use_dfl: b, a, c = pred_dist.shape # batch, anchors, channels pred_dist = pred_dist.view(b, a, 4, c // 4).softmax(3).matmul(self.proj.type(pred_dist.dtype)) # pred_dist = pred_dist.view(b, a, c // 4, 4).transpose(2,3).softmax(3).matmul(self.proj.type(pred_dist.dtype)) # pred_dist = (pred_dist.view(b, a, c // 4, 4).softmax(2) * self.proj.type(pred_dist.dtype).view(1, 1, -1, 1)).sum(2) return dist2bbox(pred_dist, anchor_points, xywh=False) def __call__(self, preds, batch): """Calculate the sum of the loss for box, cls and dfl multiplied by batch size.""" loss = torch.zeros(3, device=self.device) # box, cls, dfl feats = preds[1] if isinstance(preds, tuple) else preds pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( (self.reg_max * 4, self.nc), 1 ) pred_scores = pred_scores.permute(0, 2, 1).contiguous() pred_distri = pred_distri.permute(0, 2, 1).contiguous() dtype = pred_scores.dtype batch_size = pred_scores.shape[0] imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) # Targets targets = torch.cat((batch["batch_idx"].view(-1, 1), batch["cls"].view(-1, 1), batch["bboxes"]), 1) targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) # Pboxes pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) _, target_bboxes, target_scores, fg_mask, _ = self.assigner( pred_scores.detach().sigmoid(), (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), anchor_points * stride_tensor, gt_labels, gt_bboxes, mask_gt, ) target_scores_sum = max(target_scores.sum(), 1) # Cls loss # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way loss[1] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE # Bbox loss if fg_mask.sum(): target_bboxes /= stride_tensor loss[0], loss[2] = self.bbox_loss( pred_distri, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask ) loss[0] *= self.hyp.box # box gain loss[1] *= self.hyp.cls # cls gain loss[2] *= self.hyp.dfl # dfl gain return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl) class v8SegmentationLoss(v8DetectionLoss): """Criterion class for computing training losses.""" def __init__(self, model): # model must be de-paralleled """Initializes the v8SegmentationLoss class, taking a de-paralleled model as argument.""" super().__init__(model) self.overlap = model.args.overlap_mask def __call__(self, preds, batch): """Calculate and return the loss for the YOLO model.""" loss = torch.zeros(4, device=self.device) # box, cls, dfl feats, pred_masks, proto = preds if len(preds) == 3 else preds[1] batch_size, _, mask_h, mask_w = proto.shape # batch size, number of masks, mask height, mask width pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( (self.reg_max * 4, self.nc), 1 ) # B, grids, .. pred_scores = pred_scores.permute(0, 2, 1).contiguous() pred_distri = pred_distri.permute(0, 2, 1).contiguous() pred_masks = pred_masks.permute(0, 2, 1).contiguous() dtype = pred_scores.dtype imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) # Targets try: batch_idx = batch["batch_idx"].view(-1, 1) targets = torch.cat((batch_idx, batch["cls"].view(-1, 1), batch["bboxes"]), 1) targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) except RuntimeError as e: raise TypeError( "ERROR ❌ segment dataset incorrectly formatted or not a segment dataset.\n" "This error can occur when incorrectly training a 'segment' model on a 'detect' dataset, " "i.e. 'yolo train model=yolov8n-seg.pt data=coco8.yaml'.\nVerify your dataset is a " "correctly formatted 'segment' dataset using 'data=coco8-seg.yaml' " "as an example.\nSee https://docs.ultralytics.com/datasets/segment/ for help." ) from e # Pboxes pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) _, target_bboxes, target_scores, fg_mask, target_gt_idx = self.assigner( pred_scores.detach().sigmoid(), (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), anchor_points * stride_tensor, gt_labels, gt_bboxes, mask_gt, ) target_scores_sum = max(target_scores.sum(), 1) # Cls loss # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way loss[2] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE if fg_mask.sum(): # Bbox loss loss[0], loss[3] = self.bbox_loss( pred_distri, pred_bboxes, anchor_points, target_bboxes / stride_tensor, target_scores, target_scores_sum, fg_mask, ) # Masks loss masks = batch["masks"].to(self.device).float() if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0] loss[1] = self.calculate_segmentation_loss( fg_mask, masks, target_gt_idx, target_bboxes, batch_idx, proto, pred_masks, imgsz, self.overlap ) # WARNING: lines below prevent Multi-GPU DDP 'unused gradient' PyTorch errors, do not remove else: loss[1] += (proto * 0).sum() + (pred_masks * 0).sum() # inf sums may lead to nan loss loss[0] *= self.hyp.box # box gain loss[1] *= self.hyp.box # seg gain loss[2] *= self.hyp.cls # cls gain loss[3] *= self.hyp.dfl # dfl gain return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl) @staticmethod def single_mask_loss( gt_mask: torch.Tensor, pred: torch.Tensor, proto: torch.Tensor, xyxy: torch.Tensor, area: torch.Tensor ) -> torch.Tensor: """ Compute the instance segmentation loss for a single image. Args: gt_mask (torch.Tensor): Ground truth mask of shape (n, H, W), where n is the number of objects. pred (torch.Tensor): Predicted mask coefficients of shape (n, 32). proto (torch.Tensor): Prototype masks of shape (32, H, W). xyxy (torch.Tensor): Ground truth bounding boxes in xyxy format, normalized to [0, 1], of shape (n, 4). area (torch.Tensor): Area of each ground truth bounding box of shape (n,). Returns: (torch.Tensor): The calculated mask loss for a single image. Notes: The function uses the equation pred_mask = torch.einsum('in,nhw->ihw', pred, proto) to produce the predicted masks from the prototype masks and predicted mask coefficients. """ pred_mask = torch.einsum("in,nhw->ihw", pred, proto) # (n, 32) @ (32, 80, 80) -> (n, 80, 80) loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none") return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).sum() def calculate_segmentation_loss( self, fg_mask: torch.Tensor, masks: torch.Tensor, target_gt_idx: torch.Tensor, target_bboxes: torch.Tensor, batch_idx: torch.Tensor, proto: torch.Tensor, pred_masks: torch.Tensor, imgsz: torch.Tensor, overlap: bool, ) -> torch.Tensor: """ Calculate the loss for instance segmentation. Args: fg_mask (torch.Tensor): A binary tensor of shape (BS, N_anchors) indicating which anchors are positive. masks (torch.Tensor): Ground truth masks of shape (BS, H, W) if `overlap` is False, otherwise (BS, ?, H, W). target_gt_idx (torch.Tensor): Indexes of ground truth objects for each anchor of shape (BS, N_anchors). target_bboxes (torch.Tensor): Ground truth bounding boxes for each anchor of shape (BS, N_anchors, 4). batch_idx (torch.Tensor): Batch indices of shape (N_labels_in_batch, 1). proto (torch.Tensor): Prototype masks of shape (BS, 32, H, W). pred_masks (torch.Tensor): Predicted masks for each anchor of shape (BS, N_anchors, 32). imgsz (torch.Tensor): Size of the input image as a tensor of shape (2), i.e., (H, W). overlap (bool): Whether the masks in `masks` tensor overlap. Returns: (torch.Tensor): The calculated loss for instance segmentation. Notes: The batch loss can be computed for improved speed at higher memory usage. For example, pred_mask can be computed as follows: pred_mask = torch.einsum('in,nhw->ihw', pred, proto) # (i, 32) @ (32, 160, 160) -> (i, 160, 160) """ _, _, mask_h, mask_w = proto.shape loss = 0 # Normalize to 0-1 target_bboxes_normalized = target_bboxes / imgsz[[1, 0, 1, 0]] # Areas of target bboxes marea = xyxy2xywh(target_bboxes_normalized)[..., 2:].prod(2) # Normalize to mask size mxyxy = target_bboxes_normalized * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=proto.device) for i, single_i in enumerate(zip(fg_mask, target_gt_idx, pred_masks, proto, mxyxy, marea, masks)): fg_mask_i, target_gt_idx_i, pred_masks_i, proto_i, mxyxy_i, marea_i, masks_i = single_i if fg_mask_i.any(): mask_idx = target_gt_idx_i[fg_mask_i] if overlap: gt_mask = masks_i == (mask_idx + 1).view(-1, 1, 1) gt_mask = gt_mask.float() else: gt_mask = masks[batch_idx.view(-1) == i][mask_idx] loss += self.single_mask_loss( gt_mask, pred_masks_i[fg_mask_i], proto_i, mxyxy_i[fg_mask_i], marea_i[fg_mask_i] ) # WARNING: lines below prevents Multi-GPU DDP 'unused gradient' PyTorch errors, do not remove else: loss += (proto * 0).sum() + (pred_masks * 0).sum() # inf sums may lead to nan loss return loss / fg_mask.sum() class v8PoseLoss(v8DetectionLoss): """Criterion class for computing training losses.""" def __init__(self, model): # model must be de-paralleled """Initializes v8PoseLoss with model, sets keypoint variables and declares a keypoint loss instance.""" super().__init__(model) self.kpt_shape = model.model[-1].kpt_shape self.bce_pose = nn.BCEWithLogitsLoss() is_pose = self.kpt_shape == [17, 3] nkpt = self.kpt_shape[0] # number of keypoints sigmas = torch.from_numpy(OKS_SIGMA).to(self.device) if is_pose else torch.ones(nkpt, device=self.device) / nkpt self.keypoint_loss = KeypointLoss(sigmas=sigmas) def __call__(self, preds, batch): """Calculate the total loss and detach it.""" loss = torch.zeros(5, device=self.device) # box, cls, dfl, kpt_location, kpt_visibility feats, pred_kpts = preds if isinstance(preds[0], list) else preds[1] pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( (self.reg_max * 4, self.nc), 1 ) # B, grids, .. pred_scores = pred_scores.permute(0, 2, 1).contiguous() pred_distri = pred_distri.permute(0, 2, 1).contiguous() pred_kpts = pred_kpts.permute(0, 2, 1).contiguous() dtype = pred_scores.dtype imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) # Targets batch_size = pred_scores.shape[0] batch_idx = batch["batch_idx"].view(-1, 1) targets = torch.cat((batch_idx, batch["cls"].view(-1, 1), batch["bboxes"]), 1) targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) # Pboxes pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) pred_kpts = self.kpts_decode(anchor_points, pred_kpts.view(batch_size, -1, *self.kpt_shape)) # (b, h*w, 17, 3) _, target_bboxes, target_scores, fg_mask, target_gt_idx = self.assigner( pred_scores.detach().sigmoid(), (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), anchor_points * stride_tensor, gt_labels, gt_bboxes, mask_gt, ) target_scores_sum = max(target_scores.sum(), 1) # Cls loss # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way loss[3] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE # Bbox loss if fg_mask.sum(): target_bboxes /= stride_tensor loss[0], loss[4] = self.bbox_loss( pred_distri, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask ) keypoints = batch["keypoints"].to(self.device).float().clone() keypoints[..., 0] *= imgsz[1] keypoints[..., 1] *= imgsz[0] loss[1], loss[2] = self.calculate_keypoints_loss( fg_mask, target_gt_idx, keypoints, batch_idx, stride_tensor, target_bboxes, pred_kpts ) loss[0] *= self.hyp.box # box gain loss[1] *= self.hyp.pose # pose gain loss[2] *= self.hyp.kobj # kobj gain loss[3] *= self.hyp.cls # cls gain loss[4] *= self.hyp.dfl # dfl gain return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl) @staticmethod def kpts_decode(anchor_points, pred_kpts): """Decodes predicted keypoints to image coordinates.""" y = pred_kpts.clone() y[..., :2] *= 2.0 y[..., 0] += anchor_points[:, [0]] - 0.5 y[..., 1] += anchor_points[:, [1]] - 0.5 return y def calculate_keypoints_loss( self, masks, target_gt_idx, keypoints, batch_idx, stride_tensor, target_bboxes, pred_kpts ): """ Calculate the keypoints loss for the model. This function calculates the keypoints loss and keypoints object loss for a given batch. The keypoints loss is based on the difference between the predicted keypoints and ground truth keypoints. The keypoints object loss is a binary classification loss that classifies whether a keypoint is present or not. Args: masks (torch.Tensor): Binary mask tensor indicating object presence, shape (BS, N_anchors). target_gt_idx (torch.Tensor): Index tensor mapping anchors to ground truth objects, shape (BS, N_anchors). keypoints (torch.Tensor): Ground truth keypoints, shape (N_kpts_in_batch, N_kpts_per_object, kpts_dim). batch_idx (torch.Tensor): Batch index tensor for keypoints, shape (N_kpts_in_batch, 1). stride_tensor (torch.Tensor): Stride tensor for anchors, shape (N_anchors, 1). target_bboxes (torch.Tensor): Ground truth boxes in (x1, y1, x2, y2) format, shape (BS, N_anchors, 4). pred_kpts (torch.Tensor): Predicted keypoints, shape (BS, N_anchors, N_kpts_per_object, kpts_dim). Returns: (tuple): Returns a tuple containing: - kpts_loss (torch.Tensor): The keypoints loss. - kpts_obj_loss (torch.Tensor): The keypoints object loss. """ batch_idx = batch_idx.flatten() batch_size = len(masks) # Find the maximum number of keypoints in a single image max_kpts = torch.unique(batch_idx, return_counts=True)[1].max() # Create a tensor to hold batched keypoints batched_keypoints = torch.zeros( (batch_size, max_kpts, keypoints.shape[1], keypoints.shape[2]), device=keypoints.device ) # TODO: any idea how to vectorize this? # Fill batched_keypoints with keypoints based on batch_idx for i in range(batch_size): keypoints_i = keypoints[batch_idx == i] batched_keypoints[i, : keypoints_i.shape[0]] = keypoints_i # Expand dimensions of target_gt_idx to match the shape of batched_keypoints target_gt_idx_expanded = target_gt_idx.unsqueeze(-1).unsqueeze(-1) # Use target_gt_idx_expanded to select keypoints from batched_keypoints selected_keypoints = batched_keypoints.gather( 1, target_gt_idx_expanded.expand(-1, -1, keypoints.shape[1], keypoints.shape[2]) ) # Divide coordinates by stride selected_keypoints /= stride_tensor.view(1, -1, 1, 1) kpts_loss = 0 kpts_obj_loss = 0 if masks.any(): gt_kpt = selected_keypoints[masks] area = xyxy2xywh(target_bboxes[masks])[:, 2:].prod(1, keepdim=True) pred_kpt = pred_kpts[masks] kpt_mask = gt_kpt[..., 2] != 0 if gt_kpt.shape[-1] == 3 else torch.full_like(gt_kpt[..., 0], True) kpts_loss = self.keypoint_loss(pred_kpt, gt_kpt, kpt_mask, area) # pose loss if pred_kpt.shape[-1] == 3: kpts_obj_loss = self.bce_pose(pred_kpt[..., 2], kpt_mask.float()) # keypoint obj loss return kpts_loss, kpts_obj_loss class v8ClassificationLoss: """Criterion class for computing training losses.""" def __call__(self, preds, batch): """Compute the classification loss between predictions and true labels.""" loss = torch.nn.functional.cross_entropy(preds, batch["cls"], reduction="mean") loss_items = loss.detach() return loss, loss_items class v8OBBLoss(v8DetectionLoss): def __init__(self, model): # model must be de-paralleled super().__init__(model) self.assigner = RotatedTaskAlignedAssigner(topk=10, num_classes=self.nc, alpha=0.5, beta=6.0) self.bbox_loss = RotatedBboxLoss(self.reg_max - 1, use_dfl=self.use_dfl).to(self.device) def preprocess(self, targets, batch_size, scale_tensor): """Preprocesses the target counts and matches with the input batch size to output a tensor.""" if targets.shape[0] == 0: out = torch.zeros(batch_size, 0, 6, device=self.device) else: i = targets[:, 0] # image index _, counts = i.unique(return_counts=True) counts = counts.to(dtype=torch.int32) out = torch.zeros(batch_size, counts.max(), 6, device=self.device) for j in range(batch_size): matches = i == j n = matches.sum() if n: bboxes = targets[matches, 2:] bboxes[..., :4].mul_(scale_tensor) out[j, :n] = torch.cat([targets[matches, 1:2], bboxes], dim=-1) return out def __call__(self, preds, batch): """Calculate and return the loss for the YOLO model.""" loss = torch.zeros(3, device=self.device) # box, cls, dfl feats, pred_angle = preds if isinstance(preds[0], list) else preds[1] batch_size = pred_angle.shape[0] # batch size, number of masks, mask height, mask width pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( (self.reg_max * 4, self.nc), 1 ) # b, grids, .. pred_scores = pred_scores.permute(0, 2, 1).contiguous() pred_distri = pred_distri.permute(0, 2, 1).contiguous() pred_angle = pred_angle.permute(0, 2, 1).contiguous() dtype = pred_scores.dtype imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) # targets try: batch_idx = batch["batch_idx"].view(-1, 1) targets = torch.cat((batch_idx, batch["cls"].view(-1, 1), batch["bboxes"].view(-1, 5)), 1) rw, rh = targets[:, 4] * imgsz[0].item(), targets[:, 5] * imgsz[1].item() targets = targets[(rw >= 2) & (rh >= 2)] # filter rboxes of tiny size to stabilize training targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) gt_labels, gt_bboxes = targets.split((1, 5), 2) # cls, xywhr mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) except RuntimeError as e: raise TypeError( "ERROR ❌ OBB dataset incorrectly formatted or not a OBB dataset.\n" "This error can occur when incorrectly training a 'OBB' model on a 'detect' dataset, " "i.e. 'yolo train model=yolov8n-obb.pt data=dota8.yaml'.\nVerify your dataset is a " "correctly formatted 'OBB' dataset using 'data=dota8.yaml' " "as an example.\nSee https://docs.ultralytics.com/datasets/obb/ for help." ) from e # Pboxes pred_bboxes = self.bbox_decode(anchor_points, pred_distri, pred_angle) # xyxy, (b, h*w, 4) bboxes_for_assigner = pred_bboxes.clone().detach() # Only the first four elements need to be scaled bboxes_for_assigner[..., :4] *= stride_tensor _, target_bboxes, target_scores, fg_mask, _ = self.assigner( pred_scores.detach().sigmoid(), bboxes_for_assigner.type(gt_bboxes.dtype), anchor_points * stride_tensor, gt_labels, gt_bboxes, mask_gt, ) target_scores_sum = max(target_scores.sum(), 1) # Cls loss # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way loss[1] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE # Bbox loss if fg_mask.sum(): target_bboxes[..., :4] /= stride_tensor loss[0], loss[2] = self.bbox_loss( pred_distri, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask ) else: loss[0] += (pred_angle * 0).sum() loss[0] *= self.hyp.box # box gain loss[1] *= self.hyp.cls # cls gain loss[2] *= self.hyp.dfl # dfl gain return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl) def bbox_decode(self, anchor_points, pred_dist, pred_angle): """ Decode predicted object bounding box coordinates from anchor points and distribution. Args: anchor_points (torch.Tensor): Anchor points, (h*w, 2). pred_dist (torch.Tensor): Predicted rotated distance, (bs, h*w, 4). pred_angle (torch.Tensor): Predicted angle, (bs, h*w, 1). Returns: (torch.Tensor): Predicted rotated bounding boxes with angles, (bs, h*w, 5). """ if self.use_dfl: b, a, c = pred_dist.shape # batch, anchors, channels pred_dist = pred_dist.view(b, a, 4, c // 4).softmax(3).matmul(self.proj.type(pred_dist.dtype)) return torch.cat((dist2rbox(pred_dist, pred_angle, anchor_points), pred_angle), dim=-1)
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/loss.py
Python
unknown
32,581
# Ultralytics YOLO 🚀, AGPL-3.0 license """Model validation metrics.""" import math import warnings from pathlib import Path import matplotlib.pyplot as plt import numpy as np import torch from ultralytics.utils import LOGGER, SimpleClass, TryExcept, plt_settings OKS_SIGMA = ( np.array([0.26, 0.25, 0.25, 0.35, 0.35, 0.79, 0.79, 0.72, 0.72, 0.62, 0.62, 1.07, 1.07, 0.87, 0.87, 0.89, 0.89]) / 10.0 ) def bbox_ioa(box1, box2, iou=False, eps=1e-7): """ Calculate the intersection over box2 area given box1 and box2. Boxes are in x1y1x2y2 format. Args: box1 (np.array): A numpy array of shape (n, 4) representing n bounding boxes. box2 (np.array): A numpy array of shape (m, 4) representing m bounding boxes. iou (bool): Calculate the standard iou if True else return inter_area/box2_area. eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7. Returns: (np.array): A numpy array of shape (n, m) representing the intersection over box2 area. """ # Get the coordinates of bounding boxes b1_x1, b1_y1, b1_x2, b1_y2 = box1.T b2_x1, b2_y1, b2_x2, b2_y2 = box2.T # Intersection area inter_area = (np.minimum(b1_x2[:, None], b2_x2) - np.maximum(b1_x1[:, None], b2_x1)).clip(0) * ( np.minimum(b1_y2[:, None], b2_y2) - np.maximum(b1_y1[:, None], b2_y1) ).clip(0) # Box2 area area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) if iou: box1_area = (b1_x2 - b1_x1) * (b1_y2 - b1_y1) area = area + box1_area[:, None] - inter_area # Intersection over box2 area return inter_area / (area + eps) def box_iou(box1, box2, eps=1e-7): """ Calculate intersection-over-union (IoU) of boxes. Both sets of boxes are expected to be in (x1, y1, x2, y2) format. Based on https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py Args: box1 (torch.Tensor): A tensor of shape (N, 4) representing N bounding boxes. box2 (torch.Tensor): A tensor of shape (M, 4) representing M bounding boxes. eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7. Returns: (torch.Tensor): An NxM tensor containing the pairwise IoU values for every element in box1 and box2. """ # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2) inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp_(0).prod(2) # IoU = inter / (area1 + area2 - inter) return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps) def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): """ Calculate Intersection over Union (IoU) of box1(1, 4) to box2(n, 4). Args: box1 (torch.Tensor): A tensor representing a single bounding box with shape (1, 4). box2 (torch.Tensor): A tensor representing n bounding boxes with shape (n, 4). xywh (bool, optional): If True, input boxes are in (x, y, w, h) format. If False, input boxes are in (x1, y1, x2, y2) format. Defaults to True. GIoU (bool, optional): If True, calculate Generalized IoU. Defaults to False. DIoU (bool, optional): If True, calculate Distance IoU. Defaults to False. CIoU (bool, optional): If True, calculate Complete IoU. Defaults to False. eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7. Returns: (torch.Tensor): IoU, GIoU, DIoU, or CIoU values depending on the specified flags. """ # Get the coordinates of bounding boxes if xywh: # transform from xywh to xyxy (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1) w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2 b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_ b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_ else: # x1, y1, x2, y2 = box1 b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1) b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1) w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps # Intersection area inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp_(0) * ( b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1) ).clamp_(0) # Union Area union = w1 * h1 + w2 * h2 - inter + eps # IoU iou = inter / union if CIoU or DIoU or GIoU: cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(b2_x1) # convex (smallest enclosing box) width ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1) # convex height if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 c2 = cw**2 + ch**2 + eps # convex diagonal squared rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 v = (4 / math.pi**2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2) with torch.no_grad(): alpha = v / (v - iou + (1 + eps)) return iou - (rho2 / c2 + v * alpha) # CIoU return iou - rho2 / c2 # DIoU c_area = cw * ch + eps # convex area return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf return iou # IoU def mask_iou(mask1, mask2, eps=1e-7): """ Calculate masks IoU. Args: mask1 (torch.Tensor): A tensor of shape (N, n) where N is the number of ground truth objects and n is the product of image width and height. mask2 (torch.Tensor): A tensor of shape (M, n) where M is the number of predicted objects and n is the product of image width and height. eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7. Returns: (torch.Tensor): A tensor of shape (N, M) representing masks IoU. """ intersection = torch.matmul(mask1, mask2.T).clamp_(0) union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection return intersection / (union + eps) def kpt_iou(kpt1, kpt2, area, sigma, eps=1e-7): """ Calculate Object Keypoint Similarity (OKS). Args: kpt1 (torch.Tensor): A tensor of shape (N, 17, 3) representing ground truth keypoints. kpt2 (torch.Tensor): A tensor of shape (M, 17, 3) representing predicted keypoints. area (torch.Tensor): A tensor of shape (N,) representing areas from ground truth. sigma (list): A list containing 17 values representing keypoint scales. eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7. Returns: (torch.Tensor): A tensor of shape (N, M) representing keypoint similarities. """ d = (kpt1[:, None, :, 0] - kpt2[..., 0]) ** 2 + (kpt1[:, None, :, 1] - kpt2[..., 1]) ** 2 # (N, M, 17) sigma = torch.tensor(sigma, device=kpt1.device, dtype=kpt1.dtype) # (17, ) kpt_mask = kpt1[..., 2] != 0 # (N, 17) e = d / (2 * sigma) ** 2 / (area[:, None, None] + eps) / 2 # from cocoeval # e = d / ((area[None, :, None] + eps) * sigma) ** 2 / 2 # from formula return (torch.exp(-e) * kpt_mask[:, None]).sum(-1) / (kpt_mask.sum(-1)[:, None] + eps) def _get_covariance_matrix(boxes): """ Generating covariance matrix from obbs. Args: boxes (torch.Tensor): A tensor of shape (N, 5) representing rotated bounding boxes, with xywhr format. Returns: (torch.Tensor): Covariance metrixs corresponding to original rotated bounding boxes. """ # Gaussian bounding boxes, ignore the center points (the first two columns) because they are not needed here. gbbs = torch.cat((torch.pow(boxes[:, 2:4], 2) / 12, boxes[:, 4:]), dim=-1) a, b, c = gbbs.split(1, dim=-1) return ( a * torch.cos(c) ** 2 + b * torch.sin(c) ** 2, a * torch.sin(c) ** 2 + b * torch.cos(c) ** 2, a * torch.cos(c) * torch.sin(c) - b * torch.sin(c) * torch.cos(c), ) def probiou(obb1, obb2, CIoU=False, eps=1e-7): """ Calculate the prob iou between oriented bounding boxes, https://arxiv.org/pdf/2106.06072v1.pdf. Args: obb1 (torch.Tensor): A tensor of shape (N, 5) representing ground truth obbs, with xywhr format. obb2 (torch.Tensor): A tensor of shape (N, 5) representing predicted obbs, with xywhr format. eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7. Returns: (torch.Tensor): A tensor of shape (N, ) representing obb similarities. """ x1, y1 = obb1[..., :2].split(1, dim=-1) x2, y2 = obb2[..., :2].split(1, dim=-1) a1, b1, c1 = _get_covariance_matrix(obb1) a2, b2, c2 = _get_covariance_matrix(obb2) t1 = ( ((a1 + a2) * (torch.pow(y1 - y2, 2)) + (b1 + b2) * (torch.pow(x1 - x2, 2))) / ((a1 + a2) * (b1 + b2) - (torch.pow(c1 + c2, 2)) + eps) ) * 0.25 t2 = (((c1 + c2) * (x2 - x1) * (y1 - y2)) / ((a1 + a2) * (b1 + b2) - (torch.pow(c1 + c2, 2)) + eps)) * 0.5 t3 = ( torch.log( ((a1 + a2) * (b1 + b2) - (torch.pow(c1 + c2, 2))) / (4 * torch.sqrt((a1 * b1 - torch.pow(c1, 2)).clamp_(0) * (a2 * b2 - torch.pow(c2, 2)).clamp_(0)) + eps) + eps ) * 0.5 ) bd = t1 + t2 + t3 bd = torch.clamp(bd, eps, 100.0) hd = torch.sqrt(1.0 - torch.exp(-bd) + eps) iou = 1 - hd if CIoU: # only include the wh aspect ratio part w1, h1 = obb1[..., 2:4].split(1, dim=-1) w2, h2 = obb2[..., 2:4].split(1, dim=-1) v = (4 / math.pi**2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2) with torch.no_grad(): alpha = v / (v - iou + (1 + eps)) return iou - v * alpha # CIoU return iou def batch_probiou(obb1, obb2, eps=1e-7): """ Calculate the prob iou between oriented bounding boxes, https://arxiv.org/pdf/2106.06072v1.pdf. Args: obb1 (torch.Tensor | np.ndarray): A tensor of shape (N, 5) representing ground truth obbs, with xywhr format. obb2 (torch.Tensor | np.ndarray): A tensor of shape (M, 5) representing predicted obbs, with xywhr format. eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7. Returns: (torch.Tensor): A tensor of shape (N, M) representing obb similarities. """ obb1 = torch.from_numpy(obb1) if isinstance(obb1, np.ndarray) else obb1 obb2 = torch.from_numpy(obb2) if isinstance(obb2, np.ndarray) else obb2 x1, y1 = obb1[..., :2].split(1, dim=-1) x2, y2 = (x.squeeze(-1)[None] for x in obb2[..., :2].split(1, dim=-1)) a1, b1, c1 = _get_covariance_matrix(obb1) a2, b2, c2 = (x.squeeze(-1)[None] for x in _get_covariance_matrix(obb2)) t1 = ( ((a1 + a2) * (torch.pow(y1 - y2, 2)) + (b1 + b2) * (torch.pow(x1 - x2, 2))) / ((a1 + a2) * (b1 + b2) - (torch.pow(c1 + c2, 2)) + eps) ) * 0.25 t2 = (((c1 + c2) * (x2 - x1) * (y1 - y2)) / ((a1 + a2) * (b1 + b2) - (torch.pow(c1 + c2, 2)) + eps)) * 0.5 t3 = ( torch.log( ((a1 + a2) * (b1 + b2) - (torch.pow(c1 + c2, 2))) / (4 * torch.sqrt((a1 * b1 - torch.pow(c1, 2)).clamp_(0) * (a2 * b2 - torch.pow(c2, 2)).clamp_(0)) + eps) + eps ) * 0.5 ) bd = t1 + t2 + t3 bd = torch.clamp(bd, eps, 100.0) hd = torch.sqrt(1.0 - torch.exp(-bd) + eps) return 1 - hd def smooth_BCE(eps=0.1): """ Computes smoothed positive and negative Binary Cross-Entropy targets. This function calculates positive and negative label smoothing BCE targets based on a given epsilon value. For implementation details, refer to https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441. Args: eps (float, optional): The epsilon value for label smoothing. Defaults to 0.1. Returns: (tuple): A tuple containing the positive and negative label smoothing BCE targets. """ return 1.0 - 0.5 * eps, 0.5 * eps class ConfusionMatrix: """ A class for calculating and updating a confusion matrix for object detection and classification tasks. Attributes: task (str): The type of task, either 'detect' or 'classify'. matrix (np.array): The confusion matrix, with dimensions depending on the task. nc (int): The number of classes. conf (float): The confidence threshold for detections. iou_thres (float): The Intersection over Union threshold. """ def __init__(self, nc, conf=0.25, iou_thres=0.45, task="detect"): """Initialize attributes for the YOLO model.""" self.task = task self.matrix = np.zeros((nc + 1, nc + 1)) if self.task == "detect" else np.zeros((nc, nc)) self.nc = nc # number of classes self.conf = 0.25 if conf in (None, 0.001) else conf # apply 0.25 if default val conf is passed self.iou_thres = iou_thres def process_cls_preds(self, preds, targets): """ Update confusion matrix for classification task. Args: preds (Array[N, min(nc,5)]): Predicted class labels. targets (Array[N, 1]): Ground truth class labels. """ preds, targets = torch.cat(preds)[:, 0], torch.cat(targets) for p, t in zip(preds.cpu().numpy(), targets.cpu().numpy()): self.matrix[p][t] += 1 def process_batch(self, detections, gt_bboxes, gt_cls): """ Update confusion matrix for object detection task. Args: detections (Array[N, 6]): Detected bounding boxes and their associated information. Each row should contain (x1, y1, x2, y2, conf, class). gt_bboxes (Array[M, 4]): Ground truth bounding boxes with xyxy format. gt_cls (Array[M]): The class labels. """ if gt_cls.shape[0] == 0: # Check if labels is empty if detections is not None: detections = detections[detections[:, 4] > self.conf] detection_classes = detections[:, 5].int() for dc in detection_classes: self.matrix[dc, self.nc] += 1 # false positives return if detections is None: gt_classes = gt_cls.int() for gc in gt_classes: self.matrix[self.nc, gc] += 1 # background FN return detections = detections[detections[:, 4] > self.conf] gt_classes = gt_cls.int() detection_classes = detections[:, 5].int() iou = box_iou(gt_bboxes, detections[:, :4]) x = torch.where(iou > self.iou_thres) if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] else: matches = np.zeros((0, 3)) n = matches.shape[0] > 0 m0, m1, _ = matches.transpose().astype(int) for i, gc in enumerate(gt_classes): j = m0 == i if n and sum(j) == 1: self.matrix[detection_classes[m1[j]], gc] += 1 # correct else: self.matrix[self.nc, gc] += 1 # true background if n: for i, dc in enumerate(detection_classes): if not any(m1 == i): self.matrix[dc, self.nc] += 1 # predicted background def matrix(self): """Returns the confusion matrix.""" return self.matrix def tp_fp(self): """Returns true positives and false positives.""" tp = self.matrix.diagonal() # true positives fp = self.matrix.sum(1) - tp # false positives # fn = self.matrix.sum(0) - tp # false negatives (missed detections) return (tp[:-1], fp[:-1]) if self.task == "detect" else (tp, fp) # remove background class if task=detect @TryExcept("WARNING ⚠️ ConfusionMatrix plot failure") @plt_settings() def plot(self, normalize=True, save_dir="", names=(), on_plot=None): """ Plot the confusion matrix using seaborn and save it to a file. Args: normalize (bool): Whether to normalize the confusion matrix. save_dir (str): Directory where the plot will be saved. names (tuple): Names of classes, used as labels on the plot. on_plot (func): An optional callback to pass plots path and data when they are rendered. """ import seaborn as sn array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1e-9) if normalize else 1) # normalize columns array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True) nc, nn = self.nc, len(names) # number of classes, names sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels ticklabels = (list(names) + ["background"]) if labels else "auto" with warnings.catch_warnings(): warnings.simplefilter("ignore") # suppress empty matrix RuntimeWarning: All-NaN slice encountered sn.heatmap( array, ax=ax, annot=nc < 30, annot_kws={"size": 8}, cmap="Blues", fmt=".2f" if normalize else ".0f", square=True, vmin=0.0, xticklabels=ticklabels, yticklabels=ticklabels, ).set_facecolor((1, 1, 1)) title = "Confusion Matrix" + " Normalized" * normalize ax.set_xlabel("True") ax.set_ylabel("Predicted") ax.set_title(title) plot_fname = Path(save_dir) / f'{title.lower().replace(" ", "_")}.png' fig.savefig(plot_fname, dpi=250) plt.close(fig) if on_plot: on_plot(plot_fname) def print(self): """Print the confusion matrix to the console.""" for i in range(self.nc + 1): LOGGER.info(" ".join(map(str, self.matrix[i]))) def smooth(y, f=0.05): """Box filter of fraction f.""" nf = round(len(y) * f * 2) // 2 + 1 # number of filter elements (must be odd) p = np.ones(nf // 2) # ones padding yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded return np.convolve(yp, np.ones(nf) / nf, mode="valid") # y-smoothed @plt_settings() def plot_pr_curve(px, py, ap, save_dir=Path("pr_curve.png"), names=(), on_plot=None): """Plots a precision-recall curve.""" fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) py = np.stack(py, axis=1) if 0 < len(names) < 21: # display per-class legend if < 21 classes for i, y in enumerate(py.T): ax.plot(px, y, linewidth=1, label=f"{names[i]} {ap[i, 0]:.3f}") # plot(recall, precision) else: ax.plot(px, py, linewidth=1, color="grey") # plot(recall, precision) ax.plot(px, py.mean(1), linewidth=3, color="blue", label="all classes %.3f mAP@0.5" % ap[:, 0].mean()) ax.set_xlabel("Recall") ax.set_ylabel("Precision") ax.set_xlim(0, 1) ax.set_ylim(0, 1) ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") ax.set_title("Precision-Recall Curve") fig.savefig(save_dir, dpi=250) plt.close(fig) if on_plot: on_plot(save_dir) @plt_settings() def plot_mc_curve(px, py, save_dir=Path("mc_curve.png"), names=(), xlabel="Confidence", ylabel="Metric", on_plot=None): """Plots a metric-confidence curve.""" fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) if 0 < len(names) < 21: # display per-class legend if < 21 classes for i, y in enumerate(py): ax.plot(px, y, linewidth=1, label=f"{names[i]}") # plot(confidence, metric) else: ax.plot(px, py.T, linewidth=1, color="grey") # plot(confidence, metric) y = smooth(py.mean(0), 0.05) ax.plot(px, y, linewidth=3, color="blue", label=f"all classes {y.max():.2f} at {px[y.argmax()]:.3f}") ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_xlim(0, 1) ax.set_ylim(0, 1) ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") ax.set_title(f"{ylabel}-Confidence Curve") fig.savefig(save_dir, dpi=250) plt.close(fig) if on_plot: on_plot(save_dir) def compute_ap(recall, precision): """ Compute the average precision (AP) given the recall and precision curves. Args: recall (list): The recall curve. precision (list): The precision curve. Returns: (float): Average precision. (np.ndarray): Precision envelope curve. (np.ndarray): Modified recall curve with sentinel values added at the beginning and end. """ # Append sentinel values to beginning and end mrec = np.concatenate(([0.0], recall, [1.0])) mpre = np.concatenate(([1.0], precision, [0.0])) # Compute the precision envelope mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) # Integrate area under curve method = "interp" # methods: 'continuous', 'interp' if method == "interp": x = np.linspace(0, 1, 101) # 101-point interp (COCO) ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate else: # 'continuous' i = np.where(mrec[1:] != mrec[:-1])[0] # points where x-axis (recall) changes ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve return ap, mpre, mrec def ap_per_class( tp, conf, pred_cls, target_cls, plot=False, on_plot=None, save_dir=Path(), names=(), eps=1e-16, prefix="" ): """ Computes the average precision per class for object detection evaluation. Args: tp (np.ndarray): Binary array indicating whether the detection is correct (True) or not (False). conf (np.ndarray): Array of confidence scores of the detections. pred_cls (np.ndarray): Array of predicted classes of the detections. target_cls (np.ndarray): Array of true classes of the detections. plot (bool, optional): Whether to plot PR curves or not. Defaults to False. on_plot (func, optional): A callback to pass plots path and data when they are rendered. Defaults to None. save_dir (Path, optional): Directory to save the PR curves. Defaults to an empty path. names (tuple, optional): Tuple of class names to plot PR curves. Defaults to an empty tuple. eps (float, optional): A small value to avoid division by zero. Defaults to 1e-16. prefix (str, optional): A prefix string for saving the plot files. Defaults to an empty string. Returns: (tuple): A tuple of six arrays and one array of unique classes, where: tp (np.ndarray): True positive counts at threshold given by max F1 metric for each class.Shape: (nc,). fp (np.ndarray): False positive counts at threshold given by max F1 metric for each class. Shape: (nc,). p (np.ndarray): Precision values at threshold given by max F1 metric for each class. Shape: (nc,). r (np.ndarray): Recall values at threshold given by max F1 metric for each class. Shape: (nc,). f1 (np.ndarray): F1-score values at threshold given by max F1 metric for each class. Shape: (nc,). ap (np.ndarray): Average precision for each class at different IoU thresholds. Shape: (nc, 10). unique_classes (np.ndarray): An array of unique classes that have data. Shape: (nc,). p_curve (np.ndarray): Precision curves for each class. Shape: (nc, 1000). r_curve (np.ndarray): Recall curves for each class. Shape: (nc, 1000). f1_curve (np.ndarray): F1-score curves for each class. Shape: (nc, 1000). x (np.ndarray): X-axis values for the curves. Shape: (1000,). prec_values: Precision values at mAP@0.5 for each class. Shape: (nc, 1000). """ # Sort by objectness i = np.argsort(-conf) tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] # Find unique classes unique_classes, nt = np.unique(target_cls, return_counts=True) nc = unique_classes.shape[0] # number of classes, number of detections # Create Precision-Recall curve and compute AP for each class x, prec_values = np.linspace(0, 1, 1000), [] # Average precision, precision and recall curves ap, p_curve, r_curve = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) for ci, c in enumerate(unique_classes): i = pred_cls == c n_l = nt[ci] # number of labels n_p = i.sum() # number of predictions if n_p == 0 or n_l == 0: continue # Accumulate FPs and TPs fpc = (1 - tp[i]).cumsum(0) tpc = tp[i].cumsum(0) # Recall recall = tpc / (n_l + eps) # recall curve r_curve[ci] = np.interp(-x, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases # Precision precision = tpc / (tpc + fpc) # precision curve p_curve[ci] = np.interp(-x, -conf[i], precision[:, 0], left=1) # p at pr_score # AP from recall-precision curve for j in range(tp.shape[1]): ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) if plot and j == 0: prec_values.append(np.interp(x, mrec, mpre)) # precision at mAP@0.5 prec_values = np.array(prec_values) # (nc, 1000) # Compute F1 (harmonic mean of precision and recall) f1_curve = 2 * p_curve * r_curve / (p_curve + r_curve + eps) names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data names = dict(enumerate(names)) # to dict if plot: plot_pr_curve(x, prec_values, ap, save_dir / f"{prefix}PR_curve.png", names, on_plot=on_plot) plot_mc_curve(x, f1_curve, save_dir / f"{prefix}F1_curve.png", names, ylabel="F1", on_plot=on_plot) plot_mc_curve(x, p_curve, save_dir / f"{prefix}P_curve.png", names, ylabel="Precision", on_plot=on_plot) plot_mc_curve(x, r_curve, save_dir / f"{prefix}R_curve.png", names, ylabel="Recall", on_plot=on_plot) i = smooth(f1_curve.mean(0), 0.1).argmax() # max F1 index p, r, f1 = p_curve[:, i], r_curve[:, i], f1_curve[:, i] # max-F1 precision, recall, F1 values tp = (r * nt).round() # true positives fp = (tp / (p + eps) - tp).round() # false positives return tp, fp, p, r, f1, ap, unique_classes.astype(int), p_curve, r_curve, f1_curve, x, prec_values class Metric(SimpleClass): """ Class for computing evaluation metrics for YOLOv8 model. Attributes: p (list): Precision for each class. Shape: (nc,). r (list): Recall for each class. Shape: (nc,). f1 (list): F1 score for each class. Shape: (nc,). all_ap (list): AP scores for all classes and all IoU thresholds. Shape: (nc, 10). ap_class_index (list): Index of class for each AP score. Shape: (nc,). nc (int): Number of classes. Methods: ap50(): AP at IoU threshold of 0.5 for all classes. Returns: List of AP scores. Shape: (nc,) or []. ap(): AP at IoU thresholds from 0.5 to 0.95 for all classes. Returns: List of AP scores. Shape: (nc,) or []. mp(): Mean precision of all classes. Returns: Float. mr(): Mean recall of all classes. Returns: Float. map50(): Mean AP at IoU threshold of 0.5 for all classes. Returns: Float. map75(): Mean AP at IoU threshold of 0.75 for all classes. Returns: Float. map(): Mean AP at IoU thresholds from 0.5 to 0.95 for all classes. Returns: Float. mean_results(): Mean of results, returns mp, mr, map50, map. class_result(i): Class-aware result, returns p[i], r[i], ap50[i], ap[i]. maps(): mAP of each class. Returns: Array of mAP scores, shape: (nc,). fitness(): Model fitness as a weighted combination of metrics. Returns: Float. update(results): Update metric attributes with new evaluation results. """ def __init__(self) -> None: """Initializes a Metric instance for computing evaluation metrics for the YOLOv8 model.""" self.p = [] # (nc, ) self.r = [] # (nc, ) self.f1 = [] # (nc, ) self.all_ap = [] # (nc, 10) self.ap_class_index = [] # (nc, ) self.nc = 0 @property def ap50(self): """ Returns the Average Precision (AP) at an IoU threshold of 0.5 for all classes. Returns: (np.ndarray, list): Array of shape (nc,) with AP50 values per class, or an empty list if not available. """ return self.all_ap[:, 0] if len(self.all_ap) else [] @property def ap(self): """ Returns the Average Precision (AP) at an IoU threshold of 0.5-0.95 for all classes. Returns: (np.ndarray, list): Array of shape (nc,) with AP50-95 values per class, or an empty list if not available. """ return self.all_ap.mean(1) if len(self.all_ap) else [] @property def mp(self): """ Returns the Mean Precision of all classes. Returns: (float): The mean precision of all classes. """ return self.p.mean() if len(self.p) else 0.0 @property def mr(self): """ Returns the Mean Recall of all classes. Returns: (float): The mean recall of all classes. """ return self.r.mean() if len(self.r) else 0.0 @property def map50(self): """ Returns the mean Average Precision (mAP) at an IoU threshold of 0.5. Returns: (float): The mAP at an IoU threshold of 0.5. """ return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0 @property def map75(self): """ Returns the mean Average Precision (mAP) at an IoU threshold of 0.75. Returns: (float): The mAP at an IoU threshold of 0.75. """ return self.all_ap[:, 5].mean() if len(self.all_ap) else 0.0 @property def map(self): """ Returns the mean Average Precision (mAP) over IoU thresholds of 0.5 - 0.95 in steps of 0.05. Returns: (float): The mAP over IoU thresholds of 0.5 - 0.95 in steps of 0.05. """ return self.all_ap.mean() if len(self.all_ap) else 0.0 def mean_results(self): """Mean of results, return mp, mr, map50, map.""" return [self.mp, self.mr, self.map50, self.map] def class_result(self, i): """Class-aware result, return p[i], r[i], ap50[i], ap[i].""" return self.p[i], self.r[i], self.ap50[i], self.ap[i] @property def maps(self): """MAP of each class.""" maps = np.zeros(self.nc) + self.map for i, c in enumerate(self.ap_class_index): maps[c] = self.ap[i] return maps def fitness(self): """Model fitness as a weighted combination of metrics.""" w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] return (np.array(self.mean_results()) * w).sum() def update(self, results): """ Updates the evaluation metrics of the model with a new set of results. Args: results (tuple): A tuple containing the following evaluation metrics: - p (list): Precision for each class. Shape: (nc,). - r (list): Recall for each class. Shape: (nc,). - f1 (list): F1 score for each class. Shape: (nc,). - all_ap (list): AP scores for all classes and all IoU thresholds. Shape: (nc, 10). - ap_class_index (list): Index of class for each AP score. Shape: (nc,). Side Effects: Updates the class attributes `self.p`, `self.r`, `self.f1`, `self.all_ap`, and `self.ap_class_index` based on the values provided in the `results` tuple. """ ( self.p, self.r, self.f1, self.all_ap, self.ap_class_index, self.p_curve, self.r_curve, self.f1_curve, self.px, self.prec_values, ) = results @property def curves(self): """Returns a list of curves for accessing specific metrics curves.""" return [] @property def curves_results(self): """Returns a list of curves for accessing specific metrics curves.""" return [ [self.px, self.prec_values, "Recall", "Precision"], [self.px, self.f1_curve, "Confidence", "F1"], [self.px, self.p_curve, "Confidence", "Precision"], [self.px, self.r_curve, "Confidence", "Recall"], ] class DetMetrics(SimpleClass): """ This class is a utility class for computing detection metrics such as precision, recall, and mean average precision (mAP) of an object detection model. Args: save_dir (Path): A path to the directory where the output plots will be saved. Defaults to current directory. plot (bool): A flag that indicates whether to plot precision-recall curves for each class. Defaults to False. on_plot (func): An optional callback to pass plots path and data when they are rendered. Defaults to None. names (tuple of str): A tuple of strings that represents the names of the classes. Defaults to an empty tuple. Attributes: save_dir (Path): A path to the directory where the output plots will be saved. plot (bool): A flag that indicates whether to plot the precision-recall curves for each class. on_plot (func): An optional callback to pass plots path and data when they are rendered. names (tuple of str): A tuple of strings that represents the names of the classes. box (Metric): An instance of the Metric class for storing the results of the detection metrics. speed (dict): A dictionary for storing the execution time of different parts of the detection process. Methods: process(tp, conf, pred_cls, target_cls): Updates the metric results with the latest batch of predictions. keys: Returns a list of keys for accessing the computed detection metrics. mean_results: Returns a list of mean values for the computed detection metrics. class_result(i): Returns a list of values for the computed detection metrics for a specific class. maps: Returns a dictionary of mean average precision (mAP) values for different IoU thresholds. fitness: Computes the fitness score based on the computed detection metrics. ap_class_index: Returns a list of class indices sorted by their average precision (AP) values. results_dict: Returns a dictionary that maps detection metric keys to their computed values. curves: TODO curves_results: TODO """ def __init__(self, save_dir=Path("."), plot=False, on_plot=None, names=()) -> None: """Initialize a DetMetrics instance with a save directory, plot flag, callback function, and class names.""" self.save_dir = save_dir self.plot = plot self.on_plot = on_plot self.names = names self.box = Metric() self.speed = {"preprocess": 0.0, "inference": 0.0, "loss": 0.0, "postprocess": 0.0} self.task = "detect" def process(self, tp, conf, pred_cls, target_cls): """Process predicted results for object detection and update metrics.""" results = ap_per_class( tp, conf, pred_cls, target_cls, plot=self.plot, save_dir=self.save_dir, names=self.names, on_plot=self.on_plot, )[2:] self.box.nc = len(self.names) self.box.update(results) @property def keys(self): """Returns a list of keys for accessing specific metrics.""" return ["metrics/precision(B)", "metrics/recall(B)", "metrics/mAP50(B)", "metrics/mAP50-95(B)"] def mean_results(self): """Calculate mean of detected objects & return precision, recall, mAP50, and mAP50-95.""" return self.box.mean_results() def class_result(self, i): """Return the result of evaluating the performance of an object detection model on a specific class.""" return self.box.class_result(i) @property def maps(self): """Returns mean Average Precision (mAP) scores per class.""" return self.box.maps @property def fitness(self): """Returns the fitness of box object.""" return self.box.fitness() @property def ap_class_index(self): """Returns the average precision index per class.""" return self.box.ap_class_index @property def results_dict(self): """Returns dictionary of computed performance metrics and statistics.""" return dict(zip(self.keys + ["fitness"], self.mean_results() + [self.fitness])) @property def curves(self): """Returns a list of curves for accessing specific metrics curves.""" return ["Precision-Recall(B)", "F1-Confidence(B)", "Precision-Confidence(B)", "Recall-Confidence(B)"] @property def curves_results(self): """Returns dictionary of computed performance metrics and statistics.""" return self.box.curves_results class SegmentMetrics(SimpleClass): """ Calculates and aggregates detection and segmentation metrics over a given set of classes. Args: save_dir (Path): Path to the directory where the output plots should be saved. Default is the current directory. plot (bool): Whether to save the detection and segmentation plots. Default is False. on_plot (func): An optional callback to pass plots path and data when they are rendered. Defaults to None. names (list): List of class names. Default is an empty list. Attributes: save_dir (Path): Path to the directory where the output plots should be saved. plot (bool): Whether to save the detection and segmentation plots. on_plot (func): An optional callback to pass plots path and data when they are rendered. names (list): List of class names. box (Metric): An instance of the Metric class to calculate box detection metrics. seg (Metric): An instance of the Metric class to calculate mask segmentation metrics. speed (dict): Dictionary to store the time taken in different phases of inference. Methods: process(tp_m, tp_b, conf, pred_cls, target_cls): Processes metrics over the given set of predictions. mean_results(): Returns the mean of the detection and segmentation metrics over all the classes. class_result(i): Returns the detection and segmentation metrics of class `i`. maps: Returns the mean Average Precision (mAP) scores for IoU thresholds ranging from 0.50 to 0.95. fitness: Returns the fitness scores, which are a single weighted combination of metrics. ap_class_index: Returns the list of indices of classes used to compute Average Precision (AP). results_dict: Returns the dictionary containing all the detection and segmentation metrics and fitness score. """ def __init__(self, save_dir=Path("."), plot=False, on_plot=None, names=()) -> None: """Initialize a SegmentMetrics instance with a save directory, plot flag, callback function, and class names.""" self.save_dir = save_dir self.plot = plot self.on_plot = on_plot self.names = names self.box = Metric() self.seg = Metric() self.speed = {"preprocess": 0.0, "inference": 0.0, "loss": 0.0, "postprocess": 0.0} self.task = "segment" def process(self, tp, tp_m, conf, pred_cls, target_cls): """ Processes the detection and segmentation metrics over the given set of predictions. Args: tp (list): List of True Positive boxes. tp_m (list): List of True Positive masks. conf (list): List of confidence scores. pred_cls (list): List of predicted classes. target_cls (list): List of target classes. """ results_mask = ap_per_class( tp_m, conf, pred_cls, target_cls, plot=self.plot, on_plot=self.on_plot, save_dir=self.save_dir, names=self.names, prefix="Mask", )[2:] self.seg.nc = len(self.names) self.seg.update(results_mask) results_box = ap_per_class( tp, conf, pred_cls, target_cls, plot=self.plot, on_plot=self.on_plot, save_dir=self.save_dir, names=self.names, prefix="Box", )[2:] self.box.nc = len(self.names) self.box.update(results_box) @property def keys(self): """Returns a list of keys for accessing metrics.""" return [ "metrics/precision(B)", "metrics/recall(B)", "metrics/mAP50(B)", "metrics/mAP50-95(B)", "metrics/precision(M)", "metrics/recall(M)", "metrics/mAP50(M)", "metrics/mAP50-95(M)", ] def mean_results(self): """Return the mean metrics for bounding box and segmentation results.""" return self.box.mean_results() + self.seg.mean_results() def class_result(self, i): """Returns classification results for a specified class index.""" return self.box.class_result(i) + self.seg.class_result(i) @property def maps(self): """Returns mAP scores for object detection and semantic segmentation models.""" return self.box.maps + self.seg.maps @property def fitness(self): """Get the fitness score for both segmentation and bounding box models.""" return self.seg.fitness() + self.box.fitness() @property def ap_class_index(self): """Boxes and masks have the same ap_class_index.""" return self.box.ap_class_index @property def results_dict(self): """Returns results of object detection model for evaluation.""" return dict(zip(self.keys + ["fitness"], self.mean_results() + [self.fitness])) @property def curves(self): """Returns a list of curves for accessing specific metrics curves.""" return [ "Precision-Recall(B)", "F1-Confidence(B)", "Precision-Confidence(B)", "Recall-Confidence(B)", "Precision-Recall(M)", "F1-Confidence(M)", "Precision-Confidence(M)", "Recall-Confidence(M)", ] @property def curves_results(self): """Returns dictionary of computed performance metrics and statistics.""" return self.box.curves_results + self.seg.curves_results class PoseMetrics(SegmentMetrics): """ Calculates and aggregates detection and pose metrics over a given set of classes. Args: save_dir (Path): Path to the directory where the output plots should be saved. Default is the current directory. plot (bool): Whether to save the detection and segmentation plots. Default is False. on_plot (func): An optional callback to pass plots path and data when they are rendered. Defaults to None. names (list): List of class names. Default is an empty list. Attributes: save_dir (Path): Path to the directory where the output plots should be saved. plot (bool): Whether to save the detection and segmentation plots. on_plot (func): An optional callback to pass plots path and data when they are rendered. names (list): List of class names. box (Metric): An instance of the Metric class to calculate box detection metrics. pose (Metric): An instance of the Metric class to calculate mask segmentation metrics. speed (dict): Dictionary to store the time taken in different phases of inference. Methods: process(tp_m, tp_b, conf, pred_cls, target_cls): Processes metrics over the given set of predictions. mean_results(): Returns the mean of the detection and segmentation metrics over all the classes. class_result(i): Returns the detection and segmentation metrics of class `i`. maps: Returns the mean Average Precision (mAP) scores for IoU thresholds ranging from 0.50 to 0.95. fitness: Returns the fitness scores, which are a single weighted combination of metrics. ap_class_index: Returns the list of indices of classes used to compute Average Precision (AP). results_dict: Returns the dictionary containing all the detection and segmentation metrics and fitness score. """ def __init__(self, save_dir=Path("."), plot=False, on_plot=None, names=()) -> None: """Initialize the PoseMetrics class with directory path, class names, and plotting options.""" super().__init__(save_dir, plot, names) self.save_dir = save_dir self.plot = plot self.on_plot = on_plot self.names = names self.box = Metric() self.pose = Metric() self.speed = {"preprocess": 0.0, "inference": 0.0, "loss": 0.0, "postprocess": 0.0} self.task = "pose" def process(self, tp, tp_p, conf, pred_cls, target_cls): """ Processes the detection and pose metrics over the given set of predictions. Args: tp (list): List of True Positive boxes. tp_p (list): List of True Positive keypoints. conf (list): List of confidence scores. pred_cls (list): List of predicted classes. target_cls (list): List of target classes. """ results_pose = ap_per_class( tp_p, conf, pred_cls, target_cls, plot=self.plot, on_plot=self.on_plot, save_dir=self.save_dir, names=self.names, prefix="Pose", )[2:] self.pose.nc = len(self.names) self.pose.update(results_pose) results_box = ap_per_class( tp, conf, pred_cls, target_cls, plot=self.plot, on_plot=self.on_plot, save_dir=self.save_dir, names=self.names, prefix="Box", )[2:] self.box.nc = len(self.names) self.box.update(results_box) @property def keys(self): """Returns list of evaluation metric keys.""" return [ "metrics/precision(B)", "metrics/recall(B)", "metrics/mAP50(B)", "metrics/mAP50-95(B)", "metrics/precision(P)", "metrics/recall(P)", "metrics/mAP50(P)", "metrics/mAP50-95(P)", ] def mean_results(self): """Return the mean results of box and pose.""" return self.box.mean_results() + self.pose.mean_results() def class_result(self, i): """Return the class-wise detection results for a specific class i.""" return self.box.class_result(i) + self.pose.class_result(i) @property def maps(self): """Returns the mean average precision (mAP) per class for both box and pose detections.""" return self.box.maps + self.pose.maps @property def fitness(self): """Computes classification metrics and speed using the `targets` and `pred` inputs.""" return self.pose.fitness() + self.box.fitness() @property def curves(self): """Returns a list of curves for accessing specific metrics curves.""" return [ "Precision-Recall(B)", "F1-Confidence(B)", "Precision-Confidence(B)", "Recall-Confidence(B)", "Precision-Recall(P)", "F1-Confidence(P)", "Precision-Confidence(P)", "Recall-Confidence(P)", ] @property def curves_results(self): """Returns dictionary of computed performance metrics and statistics.""" return self.box.curves_results + self.pose.curves_results class ClassifyMetrics(SimpleClass): """ Class for computing classification metrics including top-1 and top-5 accuracy. Attributes: top1 (float): The top-1 accuracy. top5 (float): The top-5 accuracy. speed (Dict[str, float]): A dictionary containing the time taken for each step in the pipeline. Properties: fitness (float): The fitness of the model, which is equal to top-5 accuracy. results_dict (Dict[str, Union[float, str]]): A dictionary containing the classification metrics and fitness. keys (List[str]): A list of keys for the results_dict. Methods: process(targets, pred): Processes the targets and predictions to compute classification metrics. """ def __init__(self) -> None: """Initialize a ClassifyMetrics instance.""" self.top1 = 0 self.top5 = 0 self.speed = {"preprocess": 0.0, "inference": 0.0, "loss": 0.0, "postprocess": 0.0} self.task = "classify" def process(self, targets, pred): """Target classes and predicted classes.""" pred, targets = torch.cat(pred), torch.cat(targets) correct = (targets[:, None] == pred).float() acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1) # (top1, top5) accuracy self.top1, self.top5 = acc.mean(0).tolist() @property def fitness(self): """Returns mean of top-1 and top-5 accuracies as fitness score.""" return (self.top1 + self.top5) / 2 @property def results_dict(self): """Returns a dictionary with model's performance metrics and fitness score.""" return dict(zip(self.keys + ["fitness"], [self.top1, self.top5, self.fitness])) @property def keys(self): """Returns a list of keys for the results_dict property.""" return ["metrics/accuracy_top1", "metrics/accuracy_top5"] @property def curves(self): """Returns a list of curves for accessing specific metrics curves.""" return [] @property def curves_results(self): """Returns a list of curves for accessing specific metrics curves.""" return [] class OBBMetrics(SimpleClass): def __init__(self, save_dir=Path("."), plot=False, on_plot=None, names=()) -> None: self.save_dir = save_dir self.plot = plot self.on_plot = on_plot self.names = names self.box = Metric() self.speed = {"preprocess": 0.0, "inference": 0.0, "loss": 0.0, "postprocess": 0.0} def process(self, tp, conf, pred_cls, target_cls): """Process predicted results for object detection and update metrics.""" results = ap_per_class( tp, conf, pred_cls, target_cls, plot=self.plot, save_dir=self.save_dir, names=self.names, on_plot=self.on_plot, )[2:] self.box.nc = len(self.names) self.box.update(results) @property def keys(self): """Returns a list of keys for accessing specific metrics.""" return ["metrics/precision(B)", "metrics/recall(B)", "metrics/mAP50(B)", "metrics/mAP50-95(B)"] def mean_results(self): """Calculate mean of detected objects & return precision, recall, mAP50, and mAP50-95.""" return self.box.mean_results() def class_result(self, i): """Return the result of evaluating the performance of an object detection model on a specific class.""" return self.box.class_result(i) @property def maps(self): """Returns mean Average Precision (mAP) scores per class.""" return self.box.maps @property def fitness(self): """Returns the fitness of box object.""" return self.box.fitness() @property def ap_class_index(self): """Returns the average precision index per class.""" return self.box.ap_class_index @property def results_dict(self): """Returns dictionary of computed performance metrics and statistics.""" return dict(zip(self.keys + ["fitness"], self.mean_results() + [self.fitness])) @property def curves(self): """Returns a list of curves for accessing specific metrics curves.""" return [] @property def curves_results(self): """Returns a list of curves for accessing specific metrics curves.""" return []
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/metrics.py
Python
unknown
53,358
# Ultralytics YOLO 🚀, AGPL-3.0 license import contextlib import math import re import time import cv2 import numpy as np import torch import torch.nn.functional as F import torchvision from ultralytics.utils import LOGGER from ultralytics.utils.metrics import batch_probiou class Profile(contextlib.ContextDecorator): """ YOLOv8 Profile class. Use as a decorator with @Profile() or as a context manager with 'with Profile():'. Example: ```python from ultralytics.utils.ops import Profile with Profile(device=device) as dt: pass # slow operation here print(dt) # prints "Elapsed time is 9.5367431640625e-07 s" ``` """ def __init__(self, t=0.0, device: torch.device = None): """ Initialize the Profile class. Args: t (float): Initial time. Defaults to 0.0. device (torch.device): Devices used for model inference. Defaults to None (cpu). """ self.t = t self.device = device self.cuda = bool(device and str(device).startswith("cuda")) def __enter__(self): """Start timing.""" self.start = self.time() return self def __exit__(self, type, value, traceback): # noqa """Stop timing.""" self.dt = self.time() - self.start # delta-time self.t += self.dt # accumulate dt def __str__(self): """Returns a human-readable string representing the accumulated elapsed time in the profiler.""" return f"Elapsed time is {self.t} s" def time(self): """Get current time.""" if self.cuda: torch.cuda.synchronize(self.device) return time.time() def segment2box(segment, width=640, height=640): """ Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy). Args: segment (torch.Tensor): the segment label width (int): the width of the image. Defaults to 640 height (int): The height of the image. Defaults to 640 Returns: (np.ndarray): the minimum and maximum x and y values of the segment. """ x, y = segment.T # segment xy inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) x = x[inside] y = y[inside] return ( np.array([x.min(), y.min(), x.max(), y.max()], dtype=segment.dtype) if any(x) else np.zeros(4, dtype=segment.dtype) ) # xyxy def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None, padding=True, xywh=False): """ Rescales bounding boxes (in the format of xyxy by default) from the shape of the image they were originally specified in (img1_shape) to the shape of a different image (img0_shape). Args: img1_shape (tuple): The shape of the image that the bounding boxes are for, in the format of (height, width). boxes (torch.Tensor): the bounding boxes of the objects in the image, in the format of (x1, y1, x2, y2) img0_shape (tuple): the shape of the target image, in the format of (height, width). ratio_pad (tuple): a tuple of (ratio, pad) for scaling the boxes. If not provided, the ratio and pad will be calculated based on the size difference between the two images. padding (bool): If True, assuming the boxes is based on image augmented by yolo style. If False then do regular rescaling. xywh (bool): The box format is xywh or not, default=False. Returns: boxes (torch.Tensor): The scaled bounding boxes, in the format of (x1, y1, x2, y2) """ if ratio_pad is None: # calculate from img0_shape gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new pad = ( round((img1_shape[1] - img0_shape[1] * gain) / 2 - 0.1), round((img1_shape[0] - img0_shape[0] * gain) / 2 - 0.1), ) # wh padding else: gain = ratio_pad[0][0] pad = ratio_pad[1] if padding: boxes[..., 0] -= pad[0] # x padding boxes[..., 1] -= pad[1] # y padding if not xywh: boxes[..., 2] -= pad[0] # x padding boxes[..., 3] -= pad[1] # y padding boxes[..., :4] /= gain return clip_boxes(boxes, img0_shape) def make_divisible(x, divisor): """ Returns the nearest number that is divisible by the given divisor. Args: x (int): The number to make divisible. divisor (int | torch.Tensor): The divisor. Returns: (int): The nearest number divisible by the divisor. """ if isinstance(divisor, torch.Tensor): divisor = int(divisor.max()) # to int return math.ceil(x / divisor) * divisor def nms_rotated(boxes, scores, threshold=0.45): """ NMS for obbs, powered by probiou and fast-nms. Args: boxes (torch.Tensor): (N, 5), xywhr. scores (torch.Tensor): (N, ). threshold (float): Iou threshold. Returns: """ if len(boxes) == 0: return np.empty((0,), dtype=np.int8) sorted_idx = torch.argsort(scores, descending=True) boxes = boxes[sorted_idx] ious = batch_probiou(boxes, boxes).triu_(diagonal=1) pick = torch.nonzero(ious.max(dim=0)[0] < threshold).squeeze_(-1) return sorted_idx[pick] def non_max_suppression( prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, labels=(), max_det=300, nc=0, # number of classes (optional) max_time_img=0.05, max_nms=30000, max_wh=7680, rotated=False, ): """ Perform non-maximum suppression (NMS) on a set of boxes, with support for masks and multiple labels per box. Args: prediction (torch.Tensor): A tensor of shape (batch_size, num_classes + 4 + num_masks, num_boxes) containing the predicted boxes, classes, and masks. The tensor should be in the format output by a model, such as YOLO. conf_thres (float): The confidence threshold below which boxes will be filtered out. Valid values are between 0.0 and 1.0. iou_thres (float): The IoU threshold below which boxes will be filtered out during NMS. Valid values are between 0.0 and 1.0. classes (List[int]): A list of class indices to consider. If None, all classes will be considered. agnostic (bool): If True, the model is agnostic to the number of classes, and all classes will be considered as one. multi_label (bool): If True, each box may have multiple labels. labels (List[List[Union[int, float, torch.Tensor]]]): A list of lists, where each inner list contains the apriori labels for a given image. The list should be in the format output by a dataloader, with each label being a tuple of (class_index, x1, y1, x2, y2). max_det (int): The maximum number of boxes to keep after NMS. nc (int, optional): The number of classes output by the model. Any indices after this will be considered masks. max_time_img (float): The maximum time (seconds) for processing one image. max_nms (int): The maximum number of boxes into torchvision.ops.nms(). max_wh (int): The maximum box width and height in pixels Returns: (List[torch.Tensor]): A list of length batch_size, where each element is a tensor of shape (num_boxes, 6 + num_masks) containing the kept boxes, with columns (x1, y1, x2, y2, confidence, class, mask1, mask2, ...). """ # Checks assert 0 <= conf_thres <= 1, f"Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0" assert 0 <= iou_thres <= 1, f"Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0" if isinstance(prediction, (list, tuple)): # YOLOv8 model in validation model, output = (inference_out, loss_out) prediction = prediction[0] # select only inference output bs = prediction.shape[0] # batch size nc = nc or (prediction.shape[1] - 4) # number of classes nm = prediction.shape[1] - nc - 4 mi = 4 + nc # mask start index xc = prediction[:, 4:mi].amax(1) > conf_thres # candidates # Settings # min_wh = 2 # (pixels) minimum box width and height time_limit = 2.0 + max_time_img * bs # seconds to quit after multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) prediction = prediction.transpose(-1, -2) # shape(1,84,6300) to shape(1,6300,84) if not rotated: prediction[..., :4] = xywh2xyxy(prediction[..., :4]) # xywh to xyxy t = time.time() output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs for xi, x in enumerate(prediction): # image index, image inference # Apply constraints # x[((x[:, 2:4] < min_wh) | (x[:, 2:4] > max_wh)).any(1), 4] = 0 # width-height x = x[xc[xi]] # confidence # Cat apriori labels if autolabelling if labels and len(labels[xi]) and not rotated: lb = labels[xi] v = torch.zeros((len(lb), nc + nm + 4), device=x.device) v[:, :4] = xywh2xyxy(lb[:, 1:5]) # box v[range(len(lb)), lb[:, 0].long() + 4] = 1.0 # cls x = torch.cat((x, v), 0) # If none remain process next image if not x.shape[0]: continue # Detections matrix nx6 (xyxy, conf, cls) box, cls, mask = x.split((4, nc, nm), 1) if multi_label: i, j = torch.where(cls > conf_thres) x = torch.cat((box[i], x[i, 4 + j, None], j[:, None].float(), mask[i]), 1) else: # best class only conf, j = cls.max(1, keepdim=True) x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres] # Filter by class if classes is not None: x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] # Check shape n = x.shape[0] # number of boxes if not n: # no boxes continue if n > max_nms: # excess boxes x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence and remove excess boxes # Batched NMS c = x[:, 5:6] * (0 if agnostic else max_wh) # classes scores = x[:, 4] # scores if rotated: boxes = torch.cat((x[:, :2] + c, x[:, 2:4], x[:, -1:]), dim=-1) # xywhr i = nms_rotated(boxes, scores, iou_thres) else: boxes = x[:, :4] + c # boxes (offset by class) i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS i = i[:max_det] # limit detections # # Experimental # merge = False # use merge-NMS # if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) # # Update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) # from .metrics import box_iou # iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix # weights = iou * scores[None] # box weights # x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes # redundant = True # require redundant detections # if redundant: # i = i[iou.sum(1) > 1] # require redundancy output[xi] = x[i] if (time.time() - t) > time_limit: LOGGER.warning(f"WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded") break # time limit exceeded return output def clip_boxes(boxes, shape): """ Takes a list of bounding boxes and a shape (height, width) and clips the bounding boxes to the shape. Args: boxes (torch.Tensor): the bounding boxes to clip shape (tuple): the shape of the image Returns: (torch.Tensor | numpy.ndarray): Clipped boxes """ if isinstance(boxes, torch.Tensor): # faster individually (WARNING: inplace .clamp_() Apple MPS bug) boxes[..., 0] = boxes[..., 0].clamp(0, shape[1]) # x1 boxes[..., 1] = boxes[..., 1].clamp(0, shape[0]) # y1 boxes[..., 2] = boxes[..., 2].clamp(0, shape[1]) # x2 boxes[..., 3] = boxes[..., 3].clamp(0, shape[0]) # y2 else: # np.array (faster grouped) boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2 boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2 return boxes def clip_coords(coords, shape): """ Clip line coordinates to the image boundaries. Args: coords (torch.Tensor | numpy.ndarray): A list of line coordinates. shape (tuple): A tuple of integers representing the size of the image in the format (height, width). Returns: (torch.Tensor | numpy.ndarray): Clipped coordinates """ if isinstance(coords, torch.Tensor): # faster individually (WARNING: inplace .clamp_() Apple MPS bug) coords[..., 0] = coords[..., 0].clamp(0, shape[1]) # x coords[..., 1] = coords[..., 1].clamp(0, shape[0]) # y else: # np.array (faster grouped) coords[..., 0] = coords[..., 0].clip(0, shape[1]) # x coords[..., 1] = coords[..., 1].clip(0, shape[0]) # y return coords def scale_image(masks, im0_shape, ratio_pad=None): """ Takes a mask, and resizes it to the original image size. Args: masks (np.ndarray): resized and padded masks/images, [h, w, num]/[h, w, 3]. im0_shape (tuple): the original image shape ratio_pad (tuple): the ratio of the padding to the original image. Returns: masks (torch.Tensor): The masks that are being returned. """ # Rescale coordinates (xyxy) from im1_shape to im0_shape im1_shape = masks.shape if im1_shape[:2] == im0_shape[:2]: return masks if ratio_pad is None: # calculate from im0_shape gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding else: # gain = ratio_pad[0][0] pad = ratio_pad[1] top, left = int(pad[1]), int(pad[0]) # y, x bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0]) if len(masks.shape) < 2: raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}') masks = masks[top:bottom, left:right] masks = cv2.resize(masks, (im0_shape[1], im0_shape[0])) if len(masks.shape) == 2: masks = masks[:, :, None] return masks def xyxy2xywh(x): """ Convert bounding box coordinates from (x1, y1, x2, y2) format to (x, y, width, height) format where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner. Args: x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x1, y1, x2, y2) format. Returns: y (np.ndarray | torch.Tensor): The bounding box coordinates in (x, y, width, height) format. """ assert x.shape[-1] == 4, f"input shape last dimension expected 4 but input shape is {x.shape}" y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x) # faster than clone/copy y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center y[..., 2] = x[..., 2] - x[..., 0] # width y[..., 3] = x[..., 3] - x[..., 1] # height return y def xywh2xyxy(x): """ Convert bounding box coordinates from (x, y, width, height) format to (x1, y1, x2, y2) format where (x1, y1) is the top-left corner and (x2, y2) is the bottom-right corner. Args: x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x, y, width, height) format. Returns: y (np.ndarray | torch.Tensor): The bounding box coordinates in (x1, y1, x2, y2) format. """ assert x.shape[-1] == 4, f"input shape last dimension expected 4 but input shape is {x.shape}" y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x) # faster than clone/copy dw = x[..., 2] / 2 # half-width dh = x[..., 3] / 2 # half-height y[..., 0] = x[..., 0] - dw # top left x y[..., 1] = x[..., 1] - dh # top left y y[..., 2] = x[..., 0] + dw # bottom right x y[..., 3] = x[..., 1] + dh # bottom right y return y def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): """ Convert normalized bounding box coordinates to pixel coordinates. Args: x (np.ndarray | torch.Tensor): The bounding box coordinates. w (int): Width of the image. Defaults to 640 h (int): Height of the image. Defaults to 640 padw (int): Padding width. Defaults to 0 padh (int): Padding height. Defaults to 0 Returns: y (np.ndarray | torch.Tensor): The coordinates of the bounding box in the format [x1, y1, x2, y2] where x1,y1 is the top-left corner, x2,y2 is the bottom-right corner of the bounding box. """ assert x.shape[-1] == 4, f"input shape last dimension expected 4 but input shape is {x.shape}" y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x) # faster than clone/copy y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y return y def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): """ Convert bounding box coordinates from (x1, y1, x2, y2) format to (x, y, width, height, normalized) format. x, y, width and height are normalized to image dimensions. Args: x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x1, y1, x2, y2) format. w (int): The width of the image. Defaults to 640 h (int): The height of the image. Defaults to 640 clip (bool): If True, the boxes will be clipped to the image boundaries. Defaults to False eps (float): The minimum value of the box's width and height. Defaults to 0.0 Returns: y (np.ndarray | torch.Tensor): The bounding box coordinates in (x, y, width, height, normalized) format """ if clip: x = clip_boxes(x, (h - eps, w - eps)) assert x.shape[-1] == 4, f"input shape last dimension expected 4 but input shape is {x.shape}" y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x) # faster than clone/copy y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center y[..., 2] = (x[..., 2] - x[..., 0]) / w # width y[..., 3] = (x[..., 3] - x[..., 1]) / h # height return y def xywh2ltwh(x): """ Convert the bounding box format from [x, y, w, h] to [x1, y1, w, h], where x1, y1 are the top-left coordinates. Args: x (np.ndarray | torch.Tensor): The input tensor with the bounding box coordinates in the xywh format Returns: y (np.ndarray | torch.Tensor): The bounding box coordinates in the xyltwh format """ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y return y def xyxy2ltwh(x): """ Convert nx4 bounding boxes from [x1, y1, x2, y2] to [x1, y1, w, h], where xy1=top-left, xy2=bottom-right. Args: x (np.ndarray | torch.Tensor): The input tensor with the bounding boxes coordinates in the xyxy format Returns: y (np.ndarray | torch.Tensor): The bounding box coordinates in the xyltwh format. """ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[..., 2] = x[..., 2] - x[..., 0] # width y[..., 3] = x[..., 3] - x[..., 1] # height return y def ltwh2xywh(x): """ Convert nx4 boxes from [x1, y1, w, h] to [x, y, w, h] where xy1=top-left, xy=center. Args: x (torch.Tensor): the input tensor Returns: y (np.ndarray | torch.Tensor): The bounding box coordinates in the xywh format. """ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[..., 0] = x[..., 0] + x[..., 2] / 2 # center x y[..., 1] = x[..., 1] + x[..., 3] / 2 # center y return y def xyxyxyxy2xywhr(corners): """ Convert batched Oriented Bounding Boxes (OBB) from [xy1, xy2, xy3, xy4] to [xywh, rotation]. Rotation values are expected in degrees from 0 to 90. Args: corners (numpy.ndarray | torch.Tensor): Input corners of shape (n, 8). Returns: (numpy.ndarray | torch.Tensor): Converted data in [cx, cy, w, h, rotation] format of shape (n, 5). """ is_torch = isinstance(corners, torch.Tensor) points = corners.cpu().numpy() if is_torch else corners points = points.reshape(len(corners), -1, 2) rboxes = [] for pts in points: # NOTE: Use cv2.minAreaRect to get accurate xywhr, # especially some objects are cut off by augmentations in dataloader. (x, y), (w, h), angle = cv2.minAreaRect(pts) rboxes.append([x, y, w, h, angle / 180 * np.pi]) return ( torch.tensor(rboxes, device=corners.device, dtype=corners.dtype) if is_torch else np.asarray(rboxes, dtype=points.dtype) ) # rboxes def xywhr2xyxyxyxy(rboxes): """ Convert batched Oriented Bounding Boxes (OBB) from [xywh, rotation] to [xy1, xy2, xy3, xy4]. Rotation values should be in degrees from 0 to 90. Args: rboxes (numpy.ndarray | torch.Tensor): Input data in [cx, cy, w, h, rotation] format of shape (n, 5) or (b, n, 5). Returns: (numpy.ndarray | torch.Tensor): Converted corner points of shape (n, 4, 2) or (b, n, 4, 2). """ is_numpy = isinstance(rboxes, np.ndarray) cos, sin = (np.cos, np.sin) if is_numpy else (torch.cos, torch.sin) ctr = rboxes[..., :2] w, h, angle = (rboxes[..., i : i + 1] for i in range(2, 5)) cos_value, sin_value = cos(angle), sin(angle) vec1 = [w / 2 * cos_value, w / 2 * sin_value] vec2 = [-h / 2 * sin_value, h / 2 * cos_value] vec1 = np.concatenate(vec1, axis=-1) if is_numpy else torch.cat(vec1, dim=-1) vec2 = np.concatenate(vec2, axis=-1) if is_numpy else torch.cat(vec2, dim=-1) pt1 = ctr + vec1 + vec2 pt2 = ctr + vec1 - vec2 pt3 = ctr - vec1 - vec2 pt4 = ctr - vec1 + vec2 return np.stack([pt1, pt2, pt3, pt4], axis=-2) if is_numpy else torch.stack([pt1, pt2, pt3, pt4], dim=-2) def ltwh2xyxy(x): """ It converts the bounding box from [x1, y1, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right. Args: x (np.ndarray | torch.Tensor): the input image Returns: y (np.ndarray | torch.Tensor): the xyxy coordinates of the bounding boxes. """ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[..., 2] = x[..., 2] + x[..., 0] # width y[..., 3] = x[..., 3] + x[..., 1] # height return y def segments2boxes(segments): """ It converts segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) Args: segments (list): list of segments, each segment is a list of points, each point is a list of x, y coordinates Returns: (np.ndarray): the xywh coordinates of the bounding boxes. """ boxes = [] for s in segments: x, y = s.T # segment xy boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy return xyxy2xywh(np.array(boxes)) # cls, xywh def resample_segments(segments, n=1000): """ Inputs a list of segments (n,2) and returns a list of segments (n,2) up-sampled to n points each. Args: segments (list): a list of (n,2) arrays, where n is the number of points in the segment. n (int): number of points to resample the segment to. Defaults to 1000 Returns: segments (list): the resampled segments. """ for i, s in enumerate(segments): s = np.concatenate((s, s[0:1, :]), axis=0) x = np.linspace(0, len(s) - 1, n) xp = np.arange(len(s)) segments[i] = ( np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)], dtype=np.float32).reshape(2, -1).T ) # segment xy return segments def crop_mask(masks, boxes): """ It takes a mask and a bounding box, and returns a mask that is cropped to the bounding box. Args: masks (torch.Tensor): [n, h, w] tensor of masks boxes (torch.Tensor): [n, 4] tensor of bbox coordinates in relative point form Returns: (torch.Tensor): The masks are being cropped to the bounding box. """ n, h, w = masks.shape x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(n,1,1) r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,1,w) c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(1,h,1) return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2)) def process_mask_upsample(protos, masks_in, bboxes, shape): """ Takes the output of the mask head, and applies the mask to the bounding boxes. This produces masks of higher quality but is slower. Args: protos (torch.Tensor): [mask_dim, mask_h, mask_w] masks_in (torch.Tensor): [n, mask_dim], n is number of masks after nms bboxes (torch.Tensor): [n, 4], n is number of masks after nms shape (tuple): the size of the input image (h,w) Returns: (torch.Tensor): The upsampled masks. """ c, mh, mw = protos.shape # CHW masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW masks = crop_mask(masks, bboxes) # CHW return masks.gt_(0.5) def process_mask(protos, masks_in, bboxes, shape, upsample=False): """ Apply masks to bounding boxes using the output of the mask head. Args: protos (torch.Tensor): A tensor of shape [mask_dim, mask_h, mask_w]. masks_in (torch.Tensor): A tensor of shape [n, mask_dim], where n is the number of masks after NMS. bboxes (torch.Tensor): A tensor of shape [n, 4], where n is the number of masks after NMS. shape (tuple): A tuple of integers representing the size of the input image in the format (h, w). upsample (bool): A flag to indicate whether to upsample the mask to the original image size. Default is False. Returns: (torch.Tensor): A binary mask tensor of shape [n, h, w], where n is the number of masks after NMS, and h and w are the height and width of the input image. The mask is applied to the bounding boxes. """ c, mh, mw = protos.shape # CHW ih, iw = shape masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW downsampled_bboxes = bboxes.clone() downsampled_bboxes[:, 0] *= mw / iw downsampled_bboxes[:, 2] *= mw / iw downsampled_bboxes[:, 3] *= mh / ih downsampled_bboxes[:, 1] *= mh / ih masks = crop_mask(masks, downsampled_bboxes) # CHW if upsample: masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW return masks.gt_(0.5) def process_mask_native(protos, masks_in, bboxes, shape): """ It takes the output of the mask head, and crops it after upsampling to the bounding boxes. Args: protos (torch.Tensor): [mask_dim, mask_h, mask_w] masks_in (torch.Tensor): [n, mask_dim], n is number of masks after nms bboxes (torch.Tensor): [n, 4], n is number of masks after nms shape (tuple): the size of the input image (h,w) Returns: masks (torch.Tensor): The returned masks with dimensions [h, w, n] """ c, mh, mw = protos.shape # CHW masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) masks = scale_masks(masks[None], shape)[0] # CHW masks = crop_mask(masks, bboxes) # CHW return masks.gt_(0.5) def scale_masks(masks, shape, padding=True): """ Rescale segment masks to shape. Args: masks (torch.Tensor): (N, C, H, W). shape (tuple): Height and width. padding (bool): If True, assuming the boxes is based on image augmented by yolo style. If False then do regular rescaling. """ mh, mw = masks.shape[2:] gain = min(mh / shape[0], mw / shape[1]) # gain = old / new pad = [mw - shape[1] * gain, mh - shape[0] * gain] # wh padding if padding: pad[0] /= 2 pad[1] /= 2 top, left = (int(pad[1]), int(pad[0])) if padding else (0, 0) # y, x bottom, right = (int(mh - pad[1]), int(mw - pad[0])) masks = masks[..., top:bottom, left:right] masks = F.interpolate(masks, shape, mode="bilinear", align_corners=False) # NCHW return masks def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None, normalize=False, padding=True): """ Rescale segment coordinates (xy) from img1_shape to img0_shape. Args: img1_shape (tuple): The shape of the image that the coords are from. coords (torch.Tensor): the coords to be scaled of shape n,2. img0_shape (tuple): the shape of the image that the segmentation is being applied to. ratio_pad (tuple): the ratio of the image size to the padded image size. normalize (bool): If True, the coordinates will be normalized to the range [0, 1]. Defaults to False. padding (bool): If True, assuming the boxes is based on image augmented by yolo style. If False then do regular rescaling. Returns: coords (torch.Tensor): The scaled coordinates. """ if ratio_pad is None: # calculate from img0_shape gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding else: gain = ratio_pad[0][0] pad = ratio_pad[1] if padding: coords[..., 0] -= pad[0] # x padding coords[..., 1] -= pad[1] # y padding coords[..., 0] /= gain coords[..., 1] /= gain coords = clip_coords(coords, img0_shape) if normalize: coords[..., 0] /= img0_shape[1] # width coords[..., 1] /= img0_shape[0] # height return coords def regularize_rboxes(rboxes): """ Regularize rotated boxes in range [0, pi/2]. Args: rboxes (torch.Tensor): (N, 5), xywhr. Returns: (torch.Tensor): The regularized boxes. """ x, y, w, h, t = rboxes.unbind(dim=-1) # Swap edge and angle if h >= w w_ = torch.where(w > h, w, h) h_ = torch.where(w > h, h, w) t = torch.where(w > h, t, t + math.pi / 2) % math.pi return torch.stack([x, y, w_, h_, t], dim=-1) # regularized boxes def masks2segments(masks, strategy="largest"): """ It takes a list of masks(n,h,w) and returns a list of segments(n,xy) Args: masks (torch.Tensor): the output of the model, which is a tensor of shape (batch_size, 160, 160) strategy (str): 'concat' or 'largest'. Defaults to largest Returns: segments (List): list of segment masks """ segments = [] for x in masks.int().cpu().numpy().astype("uint8"): c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] if c: if strategy == "concat": # concatenate all segments c = np.concatenate([x.reshape(-1, 2) for x in c]) elif strategy == "largest": # select largest segment c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) else: c = np.zeros((0, 2)) # no segments found segments.append(c.astype("float32")) return segments def convert_torch2numpy_batch(batch: torch.Tensor) -> np.ndarray: """ Convert a batch of FP32 torch tensors (0.0-1.0) to a NumPy uint8 array (0-255), changing from BCHW to BHWC layout. Args: batch (torch.Tensor): Input tensor batch of shape (Batch, Channels, Height, Width) and dtype torch.float32. Returns: (np.ndarray): Output NumPy array batch of shape (Batch, Height, Width, Channels) and dtype uint8. """ return (batch.permute(0, 2, 3, 1).contiguous() * 255).clamp(0, 255).to(torch.uint8).cpu().numpy() def clean_str(s): """ Cleans a string by replacing special characters with underscore _ Args: s (str): a string needing special characters replaced Returns: (str): a string with special characters replaced by an underscore _ """ return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/ops.py
Python
unknown
32,936
# Ultralytics YOLO 🚀, AGPL-3.0 license """Monkey patches to update/extend functionality of existing functions.""" import time from pathlib import Path import cv2 import numpy as np import torch # OpenCV Multilanguage-friendly functions ------------------------------------------------------------------------------ _imshow = cv2.imshow # copy to avoid recursion errors def imread(filename: str, flags: int = cv2.IMREAD_COLOR): """ Read an image from a file. Args: filename (str): Path to the file to read. flags (int, optional): Flag that can take values of cv2.IMREAD_*. Defaults to cv2.IMREAD_COLOR. Returns: (np.ndarray): The read image. """ return cv2.imdecode(np.fromfile(filename, np.uint8), flags) def imwrite(filename: str, img: np.ndarray, params=None): """ Write an image to a file. Args: filename (str): Path to the file to write. img (np.ndarray): Image to write. params (list of ints, optional): Additional parameters. See OpenCV documentation. Returns: (bool): True if the file was written, False otherwise. """ try: cv2.imencode(Path(filename).suffix, img, params)[1].tofile(filename) return True except Exception: return False def imshow(winname: str, mat: np.ndarray): """ Displays an image in the specified window. Args: winname (str): Name of the window. mat (np.ndarray): Image to be shown. """ _imshow(winname.encode("unicode_escape").decode(), mat) # PyTorch functions ---------------------------------------------------------------------------------------------------- _torch_save = torch.save # copy to avoid recursion errors def torch_save(*args, **kwargs): """ Use dill (if exists) to serialize the lambda functions where pickle does not do this. Also adds 3 retries with exponential standoff in case of save failure to improve robustness to transient issues. Args: *args (tuple): Positional arguments to pass to torch.save. **kwargs (dict): Keyword arguments to pass to torch.save. """ try: import dill as pickle # noqa except ImportError: import pickle if "pickle_module" not in kwargs: kwargs["pickle_module"] = pickle # noqa for i in range(4): # 3 retries try: return _torch_save(*args, **kwargs) except RuntimeError: # unable to save, possibly waiting for device to flush or anti-virus to finish scanning if i == 3: raise time.sleep((2**i) / 2) # exponential standoff 0.5s, 1.0s, 2.0s
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/patches.py
Python
unknown
2,659
# Ultralytics YOLO 🚀, AGPL-3.0 license import contextlib import math import warnings from pathlib import Path import cv2 import matplotlib.pyplot as plt import numpy as np import torch from PIL import Image, ImageDraw, ImageFont from PIL import __version__ as pil_version from ultralytics.utils import LOGGER, TryExcept, ops, plt_settings, threaded from .checks import check_font, check_version, is_ascii from .files import increment_path class Colors: """ Ultralytics default color palette https://ultralytics.com/. This class provides methods to work with the Ultralytics color palette, including converting hex color codes to RGB values. Attributes: palette (list of tuple): List of RGB color values. n (int): The number of colors in the palette. pose_palette (np.array): A specific color palette array with dtype np.uint8. """ def __init__(self): """Initialize colors as hex = matplotlib.colors.TABLEAU_COLORS.values().""" hexs = ( "FF3838", "FF9D97", "FF701F", "FFB21D", "CFD231", "48F90A", "92CC17", "3DDB86", "1A9334", "00D4BB", "2C99A8", "00C2FF", "344593", "6473FF", "0018EC", "8438FF", "520085", "CB38FF", "FF95C8", "FF37C7", ) self.palette = [self.hex2rgb(f"#{c}") for c in hexs] self.n = len(self.palette) self.pose_palette = np.array( [ [255, 128, 0], [255, 153, 51], [255, 178, 102], [230, 230, 0], [255, 153, 255], [153, 204, 255], [255, 102, 255], [255, 51, 255], [102, 178, 255], [51, 153, 255], [255, 153, 153], [255, 102, 102], [255, 51, 51], [153, 255, 153], [102, 255, 102], [51, 255, 51], [0, 255, 0], [0, 0, 255], [255, 0, 0], [255, 255, 255], ], dtype=np.uint8, ) def __call__(self, i, bgr=False): """Converts hex color codes to RGB values.""" c = self.palette[int(i) % self.n] return (c[2], c[1], c[0]) if bgr else c @staticmethod def hex2rgb(h): """Converts hex color codes to RGB values (i.e. default PIL order).""" return tuple(int(h[1 + i : 1 + i + 2], 16) for i in (0, 2, 4)) colors = Colors() # create instance for 'from utils.plots import colors' class Annotator: """ Ultralytics Annotator for train/val mosaics and JPGs and predictions annotations. Attributes: im (Image.Image or numpy array): The image to annotate. pil (bool): Whether to use PIL or cv2 for drawing annotations. font (ImageFont.truetype or ImageFont.load_default): Font used for text annotations. lw (float): Line width for drawing. skeleton (List[List[int]]): Skeleton structure for keypoints. limb_color (List[int]): Color palette for limbs. kpt_color (List[int]): Color palette for keypoints. """ def __init__(self, im, line_width=None, font_size=None, font="Arial.ttf", pil=False, example="abc"): """Initialize the Annotator class with image and line width along with color palette for keypoints and limbs.""" assert im.data.contiguous, "Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images." non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic self.pil = pil or non_ascii self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width if self.pil: # use PIL self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) try: font = check_font("Arial.Unicode.ttf" if non_ascii else font) size = font_size or max(round(sum(self.im.size) / 2 * 0.035), 12) self.font = ImageFont.truetype(str(font), size) except Exception: self.font = ImageFont.load_default() # Deprecation fix for w, h = getsize(string) -> _, _, w, h = getbox(string) if check_version(pil_version, "9.2.0"): self.font.getsize = lambda x: self.font.getbbox(x)[2:4] # text width, height else: # use cv2 self.im = im if im.flags.writeable else im.copy() self.tf = max(self.lw - 1, 1) # font thickness self.sf = self.lw / 3 # font scale # Pose self.skeleton = [ [16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7], ] self.limb_color = colors.pose_palette[[9, 9, 9, 9, 7, 7, 7, 0, 0, 0, 0, 0, 16, 16, 16, 16, 16, 16, 16]] self.kpt_color = colors.pose_palette[[16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 9, 9]] def box_label(self, box, label="", color=(128, 128, 128), txt_color=(255, 255, 255), rotated=False): """Add one xyxy box to image with label.""" if isinstance(box, torch.Tensor): box = box.tolist() if self.pil or not is_ascii(label): if rotated: p1 = box[0] # NOTE: PIL-version polygon needs tuple type. self.draw.polygon([tuple(b) for b in box], width=self.lw, outline=color) else: p1 = (box[0], box[1]) self.draw.rectangle(box, width=self.lw, outline=color) # box if label: w, h = self.font.getsize(label) # text width, height outside = p1[1] - h >= 0 # label fits outside box self.draw.rectangle( (p1[0], p1[1] - h if outside else p1[1], p1[0] + w + 1, p1[1] + 1 if outside else p1[1] + h + 1), fill=color, ) # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 self.draw.text((p1[0], p1[1] - h if outside else p1[1]), label, fill=txt_color, font=self.font) else: # cv2 if rotated: p1 = [int(b) for b in box[0]] # NOTE: cv2-version polylines needs np.asarray type. cv2.polylines(self.im, [np.asarray(box, dtype=int)], True, color, self.lw) else: p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) if label: w, h = cv2.getTextSize(label, 0, fontScale=self.sf, thickness=self.tf)[0] # text width, height outside = p1[1] - h >= 3 p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled cv2.putText( self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.sf, txt_color, thickness=self.tf, lineType=cv2.LINE_AA, ) def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False): """ Plot masks on image. Args: masks (tensor): Predicted masks on cuda, shape: [n, h, w] colors (List[List[Int]]): Colors for predicted masks, [[r, g, b] * n] im_gpu (tensor): Image is in cuda, shape: [3, h, w], range: [0, 1] alpha (float): Mask transparency: 0.0 fully transparent, 1.0 opaque retina_masks (bool): Whether to use high resolution masks or not. Defaults to False. """ if self.pil: # Convert to numpy first self.im = np.asarray(self.im).copy() if len(masks) == 0: self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255 if im_gpu.device != masks.device: im_gpu = im_gpu.to(masks.device) colors = torch.tensor(colors, device=masks.device, dtype=torch.float32) / 255.0 # shape(n,3) colors = colors[:, None, None] # shape(n,1,1,3) masks = masks.unsqueeze(3) # shape(n,h,w,1) masks_color = masks * (colors * alpha) # shape(n,h,w,3) inv_alpha_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) mcs = masks_color.max(dim=0).values # shape(n,h,w,3) im_gpu = im_gpu.flip(dims=[0]) # flip channel im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) im_gpu = im_gpu * inv_alpha_masks[-1] + mcs im_mask = im_gpu * 255 im_mask_np = im_mask.byte().cpu().numpy() self.im[:] = im_mask_np if retina_masks else ops.scale_image(im_mask_np, self.im.shape) if self.pil: # Convert im back to PIL and update draw self.fromarray(self.im) def kpts(self, kpts, shape=(640, 640), radius=5, kpt_line=True): """ Plot keypoints on the image. Args: kpts (tensor): Predicted keypoints with shape [17, 3]. Each keypoint has (x, y, confidence). shape (tuple): Image shape as a tuple (h, w), where h is the height and w is the width. radius (int, optional): Radius of the drawn keypoints. Default is 5. kpt_line (bool, optional): If True, the function will draw lines connecting keypoints for human pose. Default is True. Note: `kpt_line=True` currently only supports human pose plotting. """ if self.pil: # Convert to numpy first self.im = np.asarray(self.im).copy() nkpt, ndim = kpts.shape is_pose = nkpt == 17 and ndim in {2, 3} kpt_line &= is_pose # `kpt_line=True` for now only supports human pose plotting for i, k in enumerate(kpts): color_k = [int(x) for x in self.kpt_color[i]] if is_pose else colors(i) x_coord, y_coord = k[0], k[1] if x_coord % shape[1] != 0 and y_coord % shape[0] != 0: if len(k) == 3: conf = k[2] if conf < 0.5: continue cv2.circle(self.im, (int(x_coord), int(y_coord)), radius, color_k, -1, lineType=cv2.LINE_AA) if kpt_line: ndim = kpts.shape[-1] for i, sk in enumerate(self.skeleton): pos1 = (int(kpts[(sk[0] - 1), 0]), int(kpts[(sk[0] - 1), 1])) pos2 = (int(kpts[(sk[1] - 1), 0]), int(kpts[(sk[1] - 1), 1])) if ndim == 3: conf1 = kpts[(sk[0] - 1), 2] conf2 = kpts[(sk[1] - 1), 2] if conf1 < 0.5 or conf2 < 0.5: continue if pos1[0] % shape[1] == 0 or pos1[1] % shape[0] == 0 or pos1[0] < 0 or pos1[1] < 0: continue if pos2[0] % shape[1] == 0 or pos2[1] % shape[0] == 0 or pos2[0] < 0 or pos2[1] < 0: continue cv2.line(self.im, pos1, pos2, [int(x) for x in self.limb_color[i]], thickness=2, lineType=cv2.LINE_AA) if self.pil: # Convert im back to PIL and update draw self.fromarray(self.im) def rectangle(self, xy, fill=None, outline=None, width=1): """Add rectangle to image (PIL-only).""" self.draw.rectangle(xy, fill, outline, width) def text(self, xy, text, txt_color=(255, 255, 255), anchor="top", box_style=False): """Adds text to an image using PIL or cv2.""" if anchor == "bottom": # start y from font bottom w, h = self.font.getsize(text) # text width, height xy[1] += 1 - h if self.pil: if box_style: w, h = self.font.getsize(text) self.draw.rectangle((xy[0], xy[1], xy[0] + w + 1, xy[1] + h + 1), fill=txt_color) # Using `txt_color` for background and draw fg with white color txt_color = (255, 255, 255) if "\n" in text: lines = text.split("\n") _, h = self.font.getsize(text) for line in lines: self.draw.text(xy, line, fill=txt_color, font=self.font) xy[1] += h else: self.draw.text(xy, text, fill=txt_color, font=self.font) else: if box_style: w, h = cv2.getTextSize(text, 0, fontScale=self.sf, thickness=self.tf)[0] # text width, height outside = xy[1] - h >= 3 p2 = xy[0] + w, xy[1] - h - 3 if outside else xy[1] + h + 3 cv2.rectangle(self.im, xy, p2, txt_color, -1, cv2.LINE_AA) # filled # Using `txt_color` for background and draw fg with white color txt_color = (255, 255, 255) cv2.putText(self.im, text, xy, 0, self.sf, txt_color, thickness=self.tf, lineType=cv2.LINE_AA) def fromarray(self, im): """Update self.im from a numpy array.""" self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) def result(self): """Return annotated image as array.""" return np.asarray(self.im) def show(self, title=None): """Show the annotated image.""" (self.im if isinstance(self.im, Image.Image) else Image.fromarray(self.im[..., ::-1])).show(title) def save(self, filename="image.jpg"): """Save the annotated image to 'filename'.""" (self.im if isinstance(self.im, Image.Image) else Image.fromarray(self.im[..., ::-1])).save(filename) def draw_region(self, reg_pts=None, color=(0, 255, 0), thickness=5): """ Draw region line. Args: reg_pts (list): Region Points (for line 2 points, for region 4 points) color (tuple): Region Color value thickness (int): Region area thickness value """ cv2.polylines(self.im, [np.array(reg_pts, dtype=np.int32)], isClosed=True, color=color, thickness=thickness) def draw_centroid_and_tracks(self, track, color=(255, 0, 255), track_thickness=2): """ Draw centroid point and track trails. Args: track (list): object tracking points for trails display color (tuple): tracks line color track_thickness (int): track line thickness value """ points = np.hstack(track).astype(np.int32).reshape((-1, 1, 2)) cv2.polylines(self.im, [points], isClosed=False, color=color, thickness=track_thickness) cv2.circle(self.im, (int(track[-1][0]), int(track[-1][1])), track_thickness * 2, color, -1) def count_labels(self, counts=0, count_txt_size=2, color=(255, 255, 255), txt_color=(0, 0, 0)): """ Plot counts for object counter. Args: counts (int): objects counts value count_txt_size (int): text size for counts display color (tuple): background color of counts display txt_color (tuple): text color of counts display """ self.tf = count_txt_size tl = self.tf or round(0.002 * (self.im.shape[0] + self.im.shape[1]) / 2) + 1 tf = max(tl - 1, 1) # Get text size for in_count and out_count t_size_in = cv2.getTextSize(str(counts), 0, fontScale=tl / 2, thickness=tf)[0] # Calculate positions for counts label text_width = t_size_in[0] text_x = (self.im.shape[1] - text_width) // 2 # Center x-coordinate text_y = t_size_in[1] # Create a rounded rectangle for in_count cv2.rectangle( self.im, (text_x - 5, text_y - 5), (text_x + text_width + 7, text_y + t_size_in[1] + 7), color, -1 ) cv2.putText( self.im, str(counts), (text_x, text_y + t_size_in[1]), 0, tl / 2, txt_color, self.tf, lineType=cv2.LINE_AA ) @staticmethod def estimate_pose_angle(a, b, c): """ Calculate the pose angle for object. Args: a (float) : The value of pose point a b (float): The value of pose point b c (float): The value o pose point c Returns: angle (degree): Degree value of angle between three points """ a, b, c = np.array(a), np.array(b), np.array(c) radians = np.arctan2(c[1] - b[1], c[0] - b[0]) - np.arctan2(a[1] - b[1], a[0] - b[0]) angle = np.abs(radians * 180.0 / np.pi) if angle > 180.0: angle = 360 - angle return angle def draw_specific_points(self, keypoints, indices=[2, 5, 7], shape=(640, 640), radius=2): """ Draw specific keypoints for gym steps counting. Args: keypoints (list): list of keypoints data to be plotted indices (list): keypoints ids list to be plotted shape (tuple): imgsz for model inference radius (int): Keypoint radius value """ nkpts, ndim = keypoints.shape nkpts == 17 and ndim == 3 for i, k in enumerate(keypoints): if i in indices: x_coord, y_coord = k[0], k[1] if x_coord % shape[1] != 0 and y_coord % shape[0] != 0: if len(k) == 3: conf = k[2] if conf < 0.5: continue cv2.circle(self.im, (int(x_coord), int(y_coord)), radius, (0, 255, 0), -1, lineType=cv2.LINE_AA) return self.im def plot_angle_and_count_and_stage(self, angle_text, count_text, stage_text, center_kpt, line_thickness=2): """ Plot the pose angle, count value and step stage. Args: angle_text (str): angle value for workout monitoring count_text (str): counts value for workout monitoring stage_text (str): stage decision for workout monitoring center_kpt (int): centroid pose index for workout monitoring line_thickness (int): thickness for text display """ angle_text, count_text, stage_text = (f" {angle_text:.2f}", f"Steps : {count_text}", f" {stage_text}") font_scale = 0.6 + (line_thickness / 10.0) # Draw angle (angle_text_width, angle_text_height), _ = cv2.getTextSize(angle_text, 0, font_scale, line_thickness) angle_text_position = (int(center_kpt[0]), int(center_kpt[1])) angle_background_position = (angle_text_position[0], angle_text_position[1] - angle_text_height - 5) angle_background_size = (angle_text_width + 2 * 5, angle_text_height + 2 * 5 + (line_thickness * 2)) cv2.rectangle( self.im, angle_background_position, ( angle_background_position[0] + angle_background_size[0], angle_background_position[1] + angle_background_size[1], ), (255, 255, 255), -1, ) cv2.putText(self.im, angle_text, angle_text_position, 0, font_scale, (0, 0, 0), line_thickness) # Draw Counts (count_text_width, count_text_height), _ = cv2.getTextSize(count_text, 0, font_scale, line_thickness) count_text_position = (angle_text_position[0], angle_text_position[1] + angle_text_height + 20) count_background_position = ( angle_background_position[0], angle_background_position[1] + angle_background_size[1] + 5, ) count_background_size = (count_text_width + 10, count_text_height + 10 + (line_thickness * 2)) cv2.rectangle( self.im, count_background_position, ( count_background_position[0] + count_background_size[0], count_background_position[1] + count_background_size[1], ), (255, 255, 255), -1, ) cv2.putText(self.im, count_text, count_text_position, 0, font_scale, (0, 0, 0), line_thickness) # Draw Stage (stage_text_width, stage_text_height), _ = cv2.getTextSize(stage_text, 0, font_scale, line_thickness) stage_text_position = (int(center_kpt[0]), int(center_kpt[1]) + angle_text_height + count_text_height + 40) stage_background_position = (stage_text_position[0], stage_text_position[1] - stage_text_height - 5) stage_background_size = (stage_text_width + 10, stage_text_height + 10) cv2.rectangle( self.im, stage_background_position, ( stage_background_position[0] + stage_background_size[0], stage_background_position[1] + stage_background_size[1], ), (255, 255, 255), -1, ) cv2.putText(self.im, stage_text, stage_text_position, 0, font_scale, (0, 0, 0), line_thickness) def seg_bbox(self, mask, mask_color=(255, 0, 255), det_label=None, track_label=None): """ Function for drawing segmented object in bounding box shape. Args: mask (list): masks data list for instance segmentation area plotting mask_color (tuple): mask foreground color det_label (str): Detection label text track_label (str): Tracking label text """ cv2.polylines(self.im, [np.int32([mask])], isClosed=True, color=mask_color, thickness=2) label = f"Track ID: {track_label}" if track_label else det_label text_size, _ = cv2.getTextSize(label, 0, 0.7, 1) cv2.rectangle( self.im, (int(mask[0][0]) - text_size[0] // 2 - 10, int(mask[0][1]) - text_size[1] - 10), (int(mask[0][0]) + text_size[0] // 2 + 5, int(mask[0][1] + 5)), mask_color, -1, ) cv2.putText( self.im, label, (int(mask[0][0]) - text_size[0] // 2, int(mask[0][1]) - 5), 0, 0.7, (255, 255, 255), 2 ) def plot_distance_and_line(self, distance_m, distance_mm, centroids, line_color, centroid_color): """ Plot the distance and line on frame. Args: distance_m (float): Distance between two bbox centroids in meters. distance_mm (float): Distance between two bbox centroids in millimeters. centroids (list): Bounding box centroids data. line_color (RGB): Distance line color. centroid_color (RGB): Bounding box centroid color. """ (text_width_m, text_height_m), _ = cv2.getTextSize( f"Distance M: {distance_m:.2f}m", cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2 ) cv2.rectangle(self.im, (15, 25), (15 + text_width_m + 10, 25 + text_height_m + 20), (255, 255, 255), -1) cv2.putText( self.im, f"Distance M: {distance_m:.2f}m", (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 0), 2, cv2.LINE_AA, ) (text_width_mm, text_height_mm), _ = cv2.getTextSize( f"Distance MM: {distance_mm:.2f}mm", cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2 ) cv2.rectangle(self.im, (15, 75), (15 + text_width_mm + 10, 75 + text_height_mm + 20), (255, 255, 255), -1) cv2.putText( self.im, f"Distance MM: {distance_mm:.2f}mm", (20, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 0), 2, cv2.LINE_AA, ) cv2.line(self.im, centroids[0], centroids[1], line_color, 3) cv2.circle(self.im, centroids[0], 6, centroid_color, -1) cv2.circle(self.im, centroids[1], 6, centroid_color, -1) def visioneye(self, box, center_point, color=(235, 219, 11), pin_color=(255, 0, 255), thickness=2, pins_radius=10): """ Function for pinpoint human-vision eye mapping and plotting. Args: box (list): Bounding box coordinates center_point (tuple): center point for vision eye view color (tuple): object centroid and line color value pin_color (tuple): visioneye point color value thickness (int): int value for line thickness pins_radius (int): visioneye point radius value """ center_bbox = int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2) cv2.circle(self.im, center_point, pins_radius, pin_color, -1) cv2.circle(self.im, center_bbox, pins_radius, color, -1) cv2.line(self.im, center_point, center_bbox, color, thickness) @TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395 @plt_settings() def plot_labels(boxes, cls, names=(), save_dir=Path(""), on_plot=None): """Plot training labels including class histograms and box statistics.""" import pandas as pd import seaborn as sn # Filter matplotlib>=3.7.2 warning and Seaborn use_inf and is_categorical FutureWarnings warnings.filterwarnings("ignore", category=UserWarning, message="The figure layout has changed to tight") warnings.filterwarnings("ignore", category=FutureWarning) # Plot dataset labels LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") nc = int(cls.max() + 1) # number of classes boxes = boxes[:1000000] # limit to 1M boxes x = pd.DataFrame(boxes, columns=["x", "y", "width", "height"]) # Seaborn correlogram sn.pairplot(x, corner=True, diag_kind="auto", kind="hist", diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) plt.savefig(save_dir / "labels_correlogram.jpg", dpi=200) plt.close() # Matplotlib labels ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() y = ax[0].hist(cls, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) for i in range(nc): y[2].patches[i].set_color([x / 255 for x in colors(i)]) ax[0].set_ylabel("instances") if 0 < len(names) < 30: ax[0].set_xticks(range(len(names))) ax[0].set_xticklabels(list(names.values()), rotation=90, fontsize=10) else: ax[0].set_xlabel("classes") sn.histplot(x, x="x", y="y", ax=ax[2], bins=50, pmax=0.9) sn.histplot(x, x="width", y="height", ax=ax[3], bins=50, pmax=0.9) # Rectangles boxes[:, 0:2] = 0.5 # center boxes = ops.xywh2xyxy(boxes) * 1000 img = Image.fromarray(np.ones((1000, 1000, 3), dtype=np.uint8) * 255) for cls, box in zip(cls[:500], boxes[:500]): ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot ax[1].imshow(img) ax[1].axis("off") for a in [0, 1, 2, 3]: for s in ["top", "right", "left", "bottom"]: ax[a].spines[s].set_visible(False) fname = save_dir / "labels.jpg" plt.savefig(fname, dpi=200) plt.close() if on_plot: on_plot(fname) def save_one_box(xyxy, im, file=Path("im.jpg"), gain=1.02, pad=10, square=False, BGR=False, save=True): """ Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop. This function takes a bounding box and an image, and then saves a cropped portion of the image according to the bounding box. Optionally, the crop can be squared, and the function allows for gain and padding adjustments to the bounding box. Args: xyxy (torch.Tensor or list): A tensor or list representing the bounding box in xyxy format. im (numpy.ndarray): The input image. file (Path, optional): The path where the cropped image will be saved. Defaults to 'im.jpg'. gain (float, optional): A multiplicative factor to increase the size of the bounding box. Defaults to 1.02. pad (int, optional): The number of pixels to add to the width and height of the bounding box. Defaults to 10. square (bool, optional): If True, the bounding box will be transformed into a square. Defaults to False. BGR (bool, optional): If True, the image will be saved in BGR format, otherwise in RGB. Defaults to False. save (bool, optional): If True, the cropped image will be saved to disk. Defaults to True. Returns: (numpy.ndarray): The cropped image. Example: ```python from ultralytics.utils.plotting import save_one_box xyxy = [50, 50, 150, 150] im = cv2.imread('image.jpg') cropped_im = save_one_box(xyxy, im, file='cropped.jpg', square=True) ``` """ if not isinstance(xyxy, torch.Tensor): # may be list xyxy = torch.stack(xyxy) b = ops.xyxy2xywh(xyxy.view(-1, 4)) # boxes if square: b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad xyxy = ops.xywh2xyxy(b).long() xyxy = ops.clip_boxes(xyxy, im.shape) crop = im[int(xyxy[0, 1]) : int(xyxy[0, 3]), int(xyxy[0, 0]) : int(xyxy[0, 2]), :: (1 if BGR else -1)] if save: file.parent.mkdir(parents=True, exist_ok=True) # make directory f = str(increment_path(file).with_suffix(".jpg")) # cv2.imwrite(f, crop) # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0) # save RGB return crop @threaded def plot_images( images, batch_idx, cls, bboxes=np.zeros(0, dtype=np.float32), confs=None, masks=np.zeros(0, dtype=np.uint8), kpts=np.zeros((0, 51), dtype=np.float32), paths=None, fname="images.jpg", names=None, on_plot=None, max_subplots=16, save=True, ): """Plot image grid with labels.""" if isinstance(images, torch.Tensor): images = images.cpu().float().numpy() if isinstance(cls, torch.Tensor): cls = cls.cpu().numpy() if isinstance(bboxes, torch.Tensor): bboxes = bboxes.cpu().numpy() if isinstance(masks, torch.Tensor): masks = masks.cpu().numpy().astype(int) if isinstance(kpts, torch.Tensor): kpts = kpts.cpu().numpy() if isinstance(batch_idx, torch.Tensor): batch_idx = batch_idx.cpu().numpy() max_size = 1920 # max image size bs, _, h, w = images.shape # batch size, _, height, width bs = min(bs, max_subplots) # limit plot images ns = np.ceil(bs**0.5) # number of subplots (square) if np.max(images[0]) <= 1: images *= 255 # de-normalise (optional) # Build Image mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init for i in range(bs): x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin mosaic[y : y + h, x : x + w, :] = images[i].transpose(1, 2, 0) # Resize (optional) scale = max_size / ns / max(h, w) if scale < 1: h = math.ceil(scale * h) w = math.ceil(scale * w) mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) # Annotate fs = int((h + w) * ns * 0.01) # font size annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) for i in range(bs): x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders if paths: annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames if len(cls) > 0: idx = batch_idx == i classes = cls[idx].astype("int") labels = confs is None if len(bboxes): boxes = bboxes[idx] conf = confs[idx] if confs is not None else None # check for confidence presence (label vs pred) is_obb = boxes.shape[-1] == 5 # xywhr boxes = ops.xywhr2xyxyxyxy(boxes) if is_obb else ops.xywh2xyxy(boxes) if len(boxes): if boxes[:, :4].max() <= 1.1: # if normalized with tolerance 0.1 boxes[..., 0::2] *= w # scale to pixels boxes[..., 1::2] *= h elif scale < 1: # absolute coords need scale if image scales boxes[..., :4] *= scale boxes[..., 0::2] += x boxes[..., 1::2] += y for j, box in enumerate(boxes.astype(np.int64).tolist()): c = classes[j] color = colors(c) c = names.get(c, c) if names else c if labels or conf[j] > 0.25: # 0.25 conf thresh label = f"{c}" if labels else f"{c} {conf[j]:.1f}" annotator.box_label(box, label, color=color, rotated=is_obb) elif len(classes): for c in classes: color = colors(c) c = names.get(c, c) if names else c annotator.text((x, y), f"{c}", txt_color=color, box_style=True) # Plot keypoints if len(kpts): kpts_ = kpts[idx].copy() if len(kpts_): if kpts_[..., 0].max() <= 1.01 or kpts_[..., 1].max() <= 1.01: # if normalized with tolerance .01 kpts_[..., 0] *= w # scale to pixels kpts_[..., 1] *= h elif scale < 1: # absolute coords need scale if image scales kpts_ *= scale kpts_[..., 0] += x kpts_[..., 1] += y for j in range(len(kpts_)): if labels or conf[j] > 0.25: # 0.25 conf thresh annotator.kpts(kpts_[j]) # Plot masks if len(masks): if idx.shape[0] == masks.shape[0]: # overlap_masks=False image_masks = masks[idx] else: # overlap_masks=True image_masks = masks[[i]] # (1, 640, 640) nl = idx.sum() index = np.arange(nl).reshape((nl, 1, 1)) + 1 image_masks = np.repeat(image_masks, nl, axis=0) image_masks = np.where(image_masks == index, 1.0, 0.0) im = np.asarray(annotator.im).copy() for j in range(len(image_masks)): if labels or conf[j] > 0.25: # 0.25 conf thresh color = colors(classes[j]) mh, mw = image_masks[j].shape if mh != h or mw != w: mask = image_masks[j].astype(np.uint8) mask = cv2.resize(mask, (w, h)) mask = mask.astype(bool) else: mask = image_masks[j].astype(bool) with contextlib.suppress(Exception): im[y : y + h, x : x + w, :][mask] = ( im[y : y + h, x : x + w, :][mask] * 0.4 + np.array(color) * 0.6 ) annotator.fromarray(im) if not save: return np.asarray(annotator.im) annotator.im.save(fname) # save if on_plot: on_plot(fname) @plt_settings() def plot_results(file="path/to/results.csv", dir="", segment=False, pose=False, classify=False, on_plot=None): """ Plot training results from a results CSV file. The function supports various types of data including segmentation, pose estimation, and classification. Plots are saved as 'results.png' in the directory where the CSV is located. Args: file (str, optional): Path to the CSV file containing the training results. Defaults to 'path/to/results.csv'. dir (str, optional): Directory where the CSV file is located if 'file' is not provided. Defaults to ''. segment (bool, optional): Flag to indicate if the data is for segmentation. Defaults to False. pose (bool, optional): Flag to indicate if the data is for pose estimation. Defaults to False. classify (bool, optional): Flag to indicate if the data is for classification. Defaults to False. on_plot (callable, optional): Callback function to be executed after plotting. Takes filename as an argument. Defaults to None. Example: ```python from ultralytics.utils.plotting import plot_results plot_results('path/to/results.csv', segment=True) ``` """ import pandas as pd from scipy.ndimage import gaussian_filter1d save_dir = Path(file).parent if file else Path(dir) if classify: fig, ax = plt.subplots(2, 2, figsize=(6, 6), tight_layout=True) index = [1, 4, 2, 3] elif segment: fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True) index = [1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12] elif pose: fig, ax = plt.subplots(2, 9, figsize=(21, 6), tight_layout=True) index = [1, 2, 3, 4, 5, 6, 7, 10, 11, 14, 15, 16, 17, 18, 8, 9, 12, 13] else: fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) index = [1, 2, 3, 4, 5, 8, 9, 10, 6, 7] ax = ax.ravel() files = list(save_dir.glob("results*.csv")) assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot." for f in files: try: data = pd.read_csv(f) s = [x.strip() for x in data.columns] x = data.values[:, 0] for i, j in enumerate(index): y = data.values[:, j].astype("float") # y[y == 0] = np.nan # don't show zero values ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=8) # actual results ax[i].plot(x, gaussian_filter1d(y, sigma=3), ":", label="smooth", linewidth=2) # smoothing line ax[i].set_title(s[j], fontsize=12) # if j in [8, 9, 10]: # share train and val loss y axes # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) except Exception as e: LOGGER.warning(f"WARNING: Plotting error for {f}: {e}") ax[1].legend() fname = save_dir / "results.png" fig.savefig(fname, dpi=200) plt.close() if on_plot: on_plot(fname) def plt_color_scatter(v, f, bins=20, cmap="viridis", alpha=0.8, edgecolors="none"): """ Plots a scatter plot with points colored based on a 2D histogram. Args: v (array-like): Values for the x-axis. f (array-like): Values for the y-axis. bins (int, optional): Number of bins for the histogram. Defaults to 20. cmap (str, optional): Colormap for the scatter plot. Defaults to 'viridis'. alpha (float, optional): Alpha for the scatter plot. Defaults to 0.8. edgecolors (str, optional): Edge colors for the scatter plot. Defaults to 'none'. Examples: >>> v = np.random.rand(100) >>> f = np.random.rand(100) >>> plt_color_scatter(v, f) """ # Calculate 2D histogram and corresponding colors hist, xedges, yedges = np.histogram2d(v, f, bins=bins) colors = [ hist[ min(np.digitize(v[i], xedges, right=True) - 1, hist.shape[0] - 1), min(np.digitize(f[i], yedges, right=True) - 1, hist.shape[1] - 1), ] for i in range(len(v)) ] # Scatter plot plt.scatter(v, f, c=colors, cmap=cmap, alpha=alpha, edgecolors=edgecolors) def plot_tune_results(csv_file="tune_results.csv"): """ Plot the evolution results stored in an 'tune_results.csv' file. The function generates a scatter plot for each key in the CSV, color-coded based on fitness scores. The best-performing configurations are highlighted on the plots. Args: csv_file (str, optional): Path to the CSV file containing the tuning results. Defaults to 'tune_results.csv'. Examples: >>> plot_tune_results('path/to/tune_results.csv') """ import pandas as pd from scipy.ndimage import gaussian_filter1d # Scatter plots for each hyperparameter csv_file = Path(csv_file) data = pd.read_csv(csv_file) num_metrics_columns = 1 keys = [x.strip() for x in data.columns][num_metrics_columns:] x = data.values fitness = x[:, 0] # fitness j = np.argmax(fitness) # max fitness index n = math.ceil(len(keys) ** 0.5) # columns and rows in plot plt.figure(figsize=(10, 10), tight_layout=True) for i, k in enumerate(keys): v = x[:, i + num_metrics_columns] mu = v[j] # best single result plt.subplot(n, n, i + 1) plt_color_scatter(v, fitness, cmap="viridis", alpha=0.8, edgecolors="none") plt.plot(mu, fitness.max(), "k+", markersize=15) plt.title(f"{k} = {mu:.3g}", fontdict={"size": 9}) # limit to 40 characters plt.tick_params(axis="both", labelsize=8) # Set axis label size to 8 if i % n != 0: plt.yticks([]) file = csv_file.with_name("tune_scatter_plots.png") # filename plt.savefig(file, dpi=200) plt.close() LOGGER.info(f"Saved {file}") # Fitness vs iteration x = range(1, len(fitness) + 1) plt.figure(figsize=(10, 6), tight_layout=True) plt.plot(x, fitness, marker="o", linestyle="none", label="fitness") plt.plot(x, gaussian_filter1d(fitness, sigma=3), ":", label="smoothed", linewidth=2) # smoothing line plt.title("Fitness vs Iteration") plt.xlabel("Iteration") plt.ylabel("Fitness") plt.grid(True) plt.legend() file = csv_file.with_name("tune_fitness.png") # filename plt.savefig(file, dpi=200) plt.close() LOGGER.info(f"Saved {file}") def output_to_target(output, max_det=300): """Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting.""" targets = [] for i, o in enumerate(output): box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1) j = torch.full((conf.shape[0], 1), i) targets.append(torch.cat((j, cls, ops.xyxy2xywh(box), conf), 1)) targets = torch.cat(targets, 0).numpy() return targets[:, 0], targets[:, 1], targets[:, 2:-1], targets[:, -1] def output_to_rotated_target(output, max_det=300): """Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting.""" targets = [] for i, o in enumerate(output): box, conf, cls, angle = o[:max_det].cpu().split((4, 1, 1, 1), 1) j = torch.full((conf.shape[0], 1), i) targets.append(torch.cat((j, cls, box, angle, conf), 1)) targets = torch.cat(targets, 0).numpy() return targets[:, 0], targets[:, 1], targets[:, 2:-1], targets[:, -1] def feature_visualization(x, module_type, stage, n=32, save_dir=Path("runs/detect/exp")): """ Visualize feature maps of a given model module during inference. Args: x (torch.Tensor): Features to be visualized. module_type (str): Module type. stage (int): Module stage within the model. n (int, optional): Maximum number of feature maps to plot. Defaults to 32. save_dir (Path, optional): Directory to save results. Defaults to Path('runs/detect/exp'). """ for m in ["Detect", "Pose", "Segment"]: if m in module_type: return batch, channels, height, width = x.shape # batch, channels, height, width if height > 1 and width > 1: f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels n = min(n, channels) # number of plots fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols ax = ax.ravel() plt.subplots_adjust(wspace=0.05, hspace=0.05) for i in range(n): ax[i].imshow(blocks[i].squeeze()) # cmap='gray' ax[i].axis("off") LOGGER.info(f"Saving {f}... ({n}/{channels})") plt.savefig(f, dpi=300, bbox_inches="tight") plt.close() np.save(str(f.with_suffix(".npy")), x[0].cpu().numpy()) # npy save
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/plotting.py
Python
unknown
44,819
# Ultralytics YOLO 🚀, AGPL-3.0 license import torch import torch.nn as nn from .checks import check_version from .metrics import bbox_iou, probiou from .ops import xywhr2xyxyxyxy TORCH_1_10 = check_version(torch.__version__, "1.10.0") class TaskAlignedAssigner(nn.Module): """ A task-aligned assigner for object detection. This class assigns ground-truth (gt) objects to anchors based on the task-aligned metric, which combines both classification and localization information. Attributes: topk (int): The number of top candidates to consider. num_classes (int): The number of object classes. alpha (float): The alpha parameter for the classification component of the task-aligned metric. beta (float): The beta parameter for the localization component of the task-aligned metric. eps (float): A small value to prevent division by zero. """ def __init__(self, topk=13, num_classes=80, alpha=1.0, beta=6.0, eps=1e-9): """Initialize a TaskAlignedAssigner object with customizable hyperparameters.""" super().__init__() self.topk = topk self.num_classes = num_classes self.bg_idx = num_classes self.alpha = alpha self.beta = beta self.eps = eps @torch.no_grad() def forward(self, pd_scores, pd_bboxes, anc_points, gt_labels, gt_bboxes, mask_gt): """ Compute the task-aligned assignment. Reference code is available at https://github.com/Nioolek/PPYOLOE_pytorch/blob/master/ppyoloe/assigner/tal_assigner.py. Args: pd_scores (Tensor): shape(bs, num_total_anchors, num_classes) pd_bboxes (Tensor): shape(bs, num_total_anchors, 4) anc_points (Tensor): shape(num_total_anchors, 2) gt_labels (Tensor): shape(bs, n_max_boxes, 1) gt_bboxes (Tensor): shape(bs, n_max_boxes, 4) mask_gt (Tensor): shape(bs, n_max_boxes, 1) Returns: target_labels (Tensor): shape(bs, num_total_anchors) target_bboxes (Tensor): shape(bs, num_total_anchors, 4) target_scores (Tensor): shape(bs, num_total_anchors, num_classes) fg_mask (Tensor): shape(bs, num_total_anchors) target_gt_idx (Tensor): shape(bs, num_total_anchors) """ self.bs = pd_scores.shape[0] self.n_max_boxes = gt_bboxes.shape[1] if self.n_max_boxes == 0: device = gt_bboxes.device return ( torch.full_like(pd_scores[..., 0], self.bg_idx).to(device), torch.zeros_like(pd_bboxes).to(device), torch.zeros_like(pd_scores).to(device), torch.zeros_like(pd_scores[..., 0]).to(device), torch.zeros_like(pd_scores[..., 0]).to(device), ) mask_pos, align_metric, overlaps = self.get_pos_mask( pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points, mask_gt ) target_gt_idx, fg_mask, mask_pos = self.select_highest_overlaps(mask_pos, overlaps, self.n_max_boxes) # Assigned target target_labels, target_bboxes, target_scores = self.get_targets(gt_labels, gt_bboxes, target_gt_idx, fg_mask) # Normalize align_metric *= mask_pos pos_align_metrics = align_metric.amax(dim=-1, keepdim=True) # b, max_num_obj pos_overlaps = (overlaps * mask_pos).amax(dim=-1, keepdim=True) # b, max_num_obj norm_align_metric = (align_metric * pos_overlaps / (pos_align_metrics + self.eps)).amax(-2).unsqueeze(-1) target_scores = target_scores * norm_align_metric return target_labels, target_bboxes, target_scores, fg_mask.bool(), target_gt_idx def get_pos_mask(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points, mask_gt): """Get in_gts mask, (b, max_num_obj, h*w).""" mask_in_gts = self.select_candidates_in_gts(anc_points, gt_bboxes) # Get anchor_align metric, (b, max_num_obj, h*w) align_metric, overlaps = self.get_box_metrics(pd_scores, pd_bboxes, gt_labels, gt_bboxes, mask_in_gts * mask_gt) # Get topk_metric mask, (b, max_num_obj, h*w) mask_topk = self.select_topk_candidates(align_metric, topk_mask=mask_gt.expand(-1, -1, self.topk).bool()) # Merge all mask to a final mask, (b, max_num_obj, h*w) mask_pos = mask_topk * mask_in_gts * mask_gt return mask_pos, align_metric, overlaps def get_box_metrics(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, mask_gt): """Compute alignment metric given predicted and ground truth bounding boxes.""" na = pd_bboxes.shape[-2] mask_gt = mask_gt.bool() # b, max_num_obj, h*w overlaps = torch.zeros([self.bs, self.n_max_boxes, na], dtype=pd_bboxes.dtype, device=pd_bboxes.device) bbox_scores = torch.zeros([self.bs, self.n_max_boxes, na], dtype=pd_scores.dtype, device=pd_scores.device) ind = torch.zeros([2, self.bs, self.n_max_boxes], dtype=torch.long) # 2, b, max_num_obj ind[0] = torch.arange(end=self.bs).view(-1, 1).expand(-1, self.n_max_boxes) # b, max_num_obj ind[1] = gt_labels.squeeze(-1) # b, max_num_obj # Get the scores of each grid for each gt cls bbox_scores[mask_gt] = pd_scores[ind[0], :, ind[1]][mask_gt] # b, max_num_obj, h*w # (b, max_num_obj, 1, 4), (b, 1, h*w, 4) pd_boxes = pd_bboxes.unsqueeze(1).expand(-1, self.n_max_boxes, -1, -1)[mask_gt] gt_boxes = gt_bboxes.unsqueeze(2).expand(-1, -1, na, -1)[mask_gt] overlaps[mask_gt] = self.iou_calculation(gt_boxes, pd_boxes) align_metric = bbox_scores.pow(self.alpha) * overlaps.pow(self.beta) return align_metric, overlaps def iou_calculation(self, gt_bboxes, pd_bboxes): """Iou calculation for horizontal bounding boxes.""" return bbox_iou(gt_bboxes, pd_bboxes, xywh=False, CIoU=True).squeeze(-1).clamp_(0) def select_topk_candidates(self, metrics, largest=True, topk_mask=None): """ Select the top-k candidates based on the given metrics. Args: metrics (Tensor): A tensor of shape (b, max_num_obj, h*w), where b is the batch size, max_num_obj is the maximum number of objects, and h*w represents the total number of anchor points. largest (bool): If True, select the largest values; otherwise, select the smallest values. topk_mask (Tensor): An optional boolean tensor of shape (b, max_num_obj, topk), where topk is the number of top candidates to consider. If not provided, the top-k values are automatically computed based on the given metrics. Returns: (Tensor): A tensor of shape (b, max_num_obj, h*w) containing the selected top-k candidates. """ # (b, max_num_obj, topk) topk_metrics, topk_idxs = torch.topk(metrics, self.topk, dim=-1, largest=largest) if topk_mask is None: topk_mask = (topk_metrics.max(-1, keepdim=True)[0] > self.eps).expand_as(topk_idxs) # (b, max_num_obj, topk) topk_idxs.masked_fill_(~topk_mask, 0) # (b, max_num_obj, topk, h*w) -> (b, max_num_obj, h*w) count_tensor = torch.zeros(metrics.shape, dtype=torch.int8, device=topk_idxs.device) ones = torch.ones_like(topk_idxs[:, :, :1], dtype=torch.int8, device=topk_idxs.device) for k in range(self.topk): # Expand topk_idxs for each value of k and add 1 at the specified positions count_tensor.scatter_add_(-1, topk_idxs[:, :, k : k + 1], ones) # count_tensor.scatter_add_(-1, topk_idxs, torch.ones_like(topk_idxs, dtype=torch.int8, device=topk_idxs.device)) # Filter invalid bboxes count_tensor.masked_fill_(count_tensor > 1, 0) return count_tensor.to(metrics.dtype) def get_targets(self, gt_labels, gt_bboxes, target_gt_idx, fg_mask): """ Compute target labels, target bounding boxes, and target scores for the positive anchor points. Args: gt_labels (Tensor): Ground truth labels of shape (b, max_num_obj, 1), where b is the batch size and max_num_obj is the maximum number of objects. gt_bboxes (Tensor): Ground truth bounding boxes of shape (b, max_num_obj, 4). target_gt_idx (Tensor): Indices of the assigned ground truth objects for positive anchor points, with shape (b, h*w), where h*w is the total number of anchor points. fg_mask (Tensor): A boolean tensor of shape (b, h*w) indicating the positive (foreground) anchor points. Returns: (Tuple[Tensor, Tensor, Tensor]): A tuple containing the following tensors: - target_labels (Tensor): Shape (b, h*w), containing the target labels for positive anchor points. - target_bboxes (Tensor): Shape (b, h*w, 4), containing the target bounding boxes for positive anchor points. - target_scores (Tensor): Shape (b, h*w, num_classes), containing the target scores for positive anchor points, where num_classes is the number of object classes. """ # Assigned target labels, (b, 1) batch_ind = torch.arange(end=self.bs, dtype=torch.int64, device=gt_labels.device)[..., None] target_gt_idx = target_gt_idx + batch_ind * self.n_max_boxes # (b, h*w) target_labels = gt_labels.long().flatten()[target_gt_idx] # (b, h*w) # Assigned target boxes, (b, max_num_obj, 4) -> (b, h*w, 4) target_bboxes = gt_bboxes.view(-1, gt_bboxes.shape[-1])[target_gt_idx] # Assigned target scores target_labels.clamp_(0) # 10x faster than F.one_hot() target_scores = torch.zeros( (target_labels.shape[0], target_labels.shape[1], self.num_classes), dtype=torch.int64, device=target_labels.device, ) # (b, h*w, 80) target_scores.scatter_(2, target_labels.unsqueeze(-1), 1) fg_scores_mask = fg_mask[:, :, None].repeat(1, 1, self.num_classes) # (b, h*w, 80) target_scores = torch.where(fg_scores_mask > 0, target_scores, 0) return target_labels, target_bboxes, target_scores @staticmethod def select_candidates_in_gts(xy_centers, gt_bboxes, eps=1e-9): """ Select the positive anchor center in gt. Args: xy_centers (Tensor): shape(h*w, 2) gt_bboxes (Tensor): shape(b, n_boxes, 4) Returns: (Tensor): shape(b, n_boxes, h*w) """ n_anchors = xy_centers.shape[0] bs, n_boxes, _ = gt_bboxes.shape lt, rb = gt_bboxes.view(-1, 1, 4).chunk(2, 2) # left-top, right-bottom bbox_deltas = torch.cat((xy_centers[None] - lt, rb - xy_centers[None]), dim=2).view(bs, n_boxes, n_anchors, -1) # return (bbox_deltas.min(3)[0] > eps).to(gt_bboxes.dtype) return bbox_deltas.amin(3).gt_(eps) @staticmethod def select_highest_overlaps(mask_pos, overlaps, n_max_boxes): """ If an anchor box is assigned to multiple gts, the one with the highest IoI will be selected. Args: mask_pos (Tensor): shape(b, n_max_boxes, h*w) overlaps (Tensor): shape(b, n_max_boxes, h*w) Returns: target_gt_idx (Tensor): shape(b, h*w) fg_mask (Tensor): shape(b, h*w) mask_pos (Tensor): shape(b, n_max_boxes, h*w) """ # (b, n_max_boxes, h*w) -> (b, h*w) fg_mask = mask_pos.sum(-2) if fg_mask.max() > 1: # one anchor is assigned to multiple gt_bboxes mask_multi_gts = (fg_mask.unsqueeze(1) > 1).expand(-1, n_max_boxes, -1) # (b, n_max_boxes, h*w) max_overlaps_idx = overlaps.argmax(1) # (b, h*w) is_max_overlaps = torch.zeros(mask_pos.shape, dtype=mask_pos.dtype, device=mask_pos.device) is_max_overlaps.scatter_(1, max_overlaps_idx.unsqueeze(1), 1) mask_pos = torch.where(mask_multi_gts, is_max_overlaps, mask_pos).float() # (b, n_max_boxes, h*w) fg_mask = mask_pos.sum(-2) # Find each grid serve which gt(index) target_gt_idx = mask_pos.argmax(-2) # (b, h*w) return target_gt_idx, fg_mask, mask_pos class RotatedTaskAlignedAssigner(TaskAlignedAssigner): def iou_calculation(self, gt_bboxes, pd_bboxes): """Iou calculation for rotated bounding boxes.""" return probiou(gt_bboxes, pd_bboxes).squeeze(-1).clamp_(0) @staticmethod def select_candidates_in_gts(xy_centers, gt_bboxes): """ Select the positive anchor center in gt for rotated bounding boxes. Args: xy_centers (Tensor): shape(h*w, 2) gt_bboxes (Tensor): shape(b, n_boxes, 5) Returns: (Tensor): shape(b, n_boxes, h*w) """ # (b, n_boxes, 5) --> (b, n_boxes, 4, 2) corners = xywhr2xyxyxyxy(gt_bboxes) # (b, n_boxes, 1, 2) a, b, _, d = corners.split(1, dim=-2) ab = b - a ad = d - a # (b, n_boxes, h*w, 2) ap = xy_centers - a norm_ab = (ab * ab).sum(dim=-1) norm_ad = (ad * ad).sum(dim=-1) ap_dot_ab = (ap * ab).sum(dim=-1) ap_dot_ad = (ap * ad).sum(dim=-1) return (ap_dot_ab >= 0) & (ap_dot_ab <= norm_ab) & (ap_dot_ad >= 0) & (ap_dot_ad <= norm_ad) # is_in_box def make_anchors(feats, strides, grid_cell_offset=0.5): """Generate anchors from features.""" anchor_points, stride_tensor = [], [] assert feats is not None dtype, device = feats[0].dtype, feats[0].device for i, stride in enumerate(strides): _, _, h, w = feats[i].shape sx = torch.arange(end=w, device=device, dtype=dtype) + grid_cell_offset # shift x sy = torch.arange(end=h, device=device, dtype=dtype) + grid_cell_offset # shift y sy, sx = torch.meshgrid(sy, sx, indexing="ij") if TORCH_1_10 else torch.meshgrid(sy, sx) anchor_points.append(torch.stack((sx, sy), -1).view(-1, 2)) stride_tensor.append(torch.full((h * w, 1), stride, dtype=dtype, device=device)) return torch.cat(anchor_points), torch.cat(stride_tensor) def dist2bbox(distance, anchor_points, xywh=True, dim=-1): """Transform distance(ltrb) to box(xywh or xyxy).""" lt, rb = distance.chunk(2, dim) x1y1 = anchor_points - lt x2y2 = anchor_points + rb if xywh: c_xy = (x1y1 + x2y2) / 2 wh = x2y2 - x1y1 return torch.cat((c_xy, wh), dim) # xywh bbox return torch.cat((x1y1, x2y2), dim) # xyxy bbox def bbox2dist(anchor_points, bbox, reg_max): """Transform bbox(xyxy) to dist(ltrb).""" x1y1, x2y2 = bbox.chunk(2, -1) return torch.cat((anchor_points - x1y1, x2y2 - anchor_points), -1).clamp_(0, reg_max - 0.01) # dist (lt, rb) def dist2rbox(pred_dist, pred_angle, anchor_points, dim=-1): """ Decode predicted object bounding box coordinates from anchor points and distribution. Args: pred_dist (torch.Tensor): Predicted rotated distance, (bs, h*w, 4). pred_angle (torch.Tensor): Predicted angle, (bs, h*w, 1). anchor_points (torch.Tensor): Anchor points, (h*w, 2). Returns: (torch.Tensor): Predicted rotated bounding boxes, (bs, h*w, 4). """ lt, rb = pred_dist.split(2, dim=dim) cos, sin = torch.cos(pred_angle), torch.sin(pred_angle) # (bs, h*w, 1) xf, yf = ((rb - lt) / 2).split(1, dim=dim) x, y = xf * cos - yf * sin, xf * sin + yf * cos xy = torch.cat([x, y], dim=dim) + anchor_points return torch.cat([xy, lt + rb], dim=dim)
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/tal.py
Python
unknown
16,017
# Ultralytics YOLO 🚀, AGPL-3.0 license import math import os import platform import random import time from contextlib import contextmanager from copy import deepcopy from pathlib import Path from typing import Union import numpy as np import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F import torchvision from ultralytics.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, __version__ from ultralytics.utils.checks import check_version try: import thop except ImportError: thop = None TORCH_1_9 = check_version(torch.__version__, "1.9.0") TORCH_2_0 = check_version(torch.__version__, "2.0.0") TORCHVISION_0_10 = check_version(torchvision.__version__, "0.10.0") TORCHVISION_0_11 = check_version(torchvision.__version__, "0.11.0") TORCHVISION_0_13 = check_version(torchvision.__version__, "0.13.0") @contextmanager def torch_distributed_zero_first(local_rank: int): """Decorator to make all processes in distributed training wait for each local_master to do something.""" initialized = torch.distributed.is_available() and torch.distributed.is_initialized() if initialized and local_rank not in (-1, 0): dist.barrier(device_ids=[local_rank]) yield if initialized and local_rank == 0: dist.barrier(device_ids=[0]) def smart_inference_mode(): """Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator.""" def decorate(fn): """Applies appropriate torch decorator for inference mode based on torch version.""" if TORCH_1_9 and torch.is_inference_mode_enabled(): return fn # already in inference_mode, act as a pass-through else: return (torch.inference_mode if TORCH_1_9 else torch.no_grad)()(fn) return decorate def get_cpu_info(): """Return a string with system CPU information, i.e. 'Apple M2'.""" import cpuinfo # pip install py-cpuinfo k = "brand_raw", "hardware_raw", "arch_string_raw" # info keys sorted by preference (not all keys always available) info = cpuinfo.get_cpu_info() # info dict string = info.get(k[0] if k[0] in info else k[1] if k[1] in info else k[2], "unknown") return string.replace("(R)", "").replace("CPU ", "").replace("@ ", "") def select_device(device="", batch=0, newline=False, verbose=True): """ Selects the appropriate PyTorch device based on the provided arguments. The function takes a string specifying the device or a torch.device object and returns a torch.device object representing the selected device. The function also validates the number of available devices and raises an exception if the requested device(s) are not available. Args: device (str | torch.device, optional): Device string or torch.device object. Options are 'None', 'cpu', or 'cuda', or '0' or '0,1,2,3'. Defaults to an empty string, which auto-selects the first available GPU, or CPU if no GPU is available. batch (int, optional): Batch size being used in your model. Defaults to 0. newline (bool, optional): If True, adds a newline at the end of the log string. Defaults to False. verbose (bool, optional): If True, logs the device information. Defaults to True. Returns: (torch.device): Selected device. Raises: ValueError: If the specified device is not available or if the batch size is not a multiple of the number of devices when using multiple GPUs. Examples: >>> select_device('cuda:0') device(type='cuda', index=0) >>> select_device('cpu') device(type='cpu') Note: Sets the 'CUDA_VISIBLE_DEVICES' environment variable for specifying which GPUs to use. """ if isinstance(device, torch.device): return device s = f"Ultralytics YOLOv{__version__} 🚀 Python-{platform.python_version()} torch-{torch.__version__} " device = str(device).lower() for remove in "cuda:", "none", "(", ")", "[", "]", "'", " ": device = device.replace(remove, "") # to string, 'cuda:0' -> '0' and '(0, 1)' -> '0,1' cpu = device == "cpu" mps = device in ("mps", "mps:0") # Apple Metal Performance Shaders (MPS) if cpu or mps: os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # force torch.cuda.is_available() = False elif device: # non-cpu device requested if device == "cuda": device = "0" visible = os.environ.get("CUDA_VISIBLE_DEVICES", None) os.environ["CUDA_VISIBLE_DEVICES"] = device # set environment variable - must be before assert is_available() if not (torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(",", ""))): LOGGER.info(s) install = ( "See https://pytorch.org/get-started/locally/ for up-to-date torch install instructions if no " "CUDA devices are seen by torch.\n" if torch.cuda.device_count() == 0 else "" ) raise ValueError( f"Invalid CUDA 'device={device}' requested." f" Use 'device=cpu' or pass valid CUDA device(s) if available," f" i.e. 'device=0' or 'device=0,1,2,3' for Multi-GPU.\n" f"\ntorch.cuda.is_available(): {torch.cuda.is_available()}" f"\ntorch.cuda.device_count(): {torch.cuda.device_count()}" f"\nos.environ['CUDA_VISIBLE_DEVICES']: {visible}\n" f"{install}" ) if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available devices = device.split(",") if device else "0" # range(torch.cuda.device_count()) # i.e. 0,1,6,7 n = len(devices) # device count if n > 1 and batch > 0 and batch % n != 0: # check batch_size is divisible by device_count raise ValueError( f"'batch={batch}' must be a multiple of GPU count {n}. Try 'batch={batch // n * n}' or " f"'batch={batch // n * n + n}', the nearest batch sizes evenly divisible by {n}." ) space = " " * (len(s) + 1) for i, d in enumerate(devices): p = torch.cuda.get_device_properties(i) s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB arg = "cuda:0" elif mps and TORCH_2_0 and torch.backends.mps.is_available(): # Prefer MPS if available s += f"MPS ({get_cpu_info()})\n" arg = "mps" else: # revert to CPU s += f"CPU ({get_cpu_info()})\n" arg = "cpu" if verbose: LOGGER.info(s if newline else s.rstrip()) return torch.device(arg) def time_sync(): """PyTorch-accurate time.""" if torch.cuda.is_available(): torch.cuda.synchronize() return time.time() def fuse_conv_and_bn(conv, bn): """Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/.""" fusedconv = ( nn.Conv2d( conv.in_channels, conv.out_channels, kernel_size=conv.kernel_size, stride=conv.stride, padding=conv.padding, dilation=conv.dilation, groups=conv.groups, bias=True, ) .requires_grad_(False) .to(conv.weight.device) ) # Prepare filters w_conv = conv.weight.clone().view(conv.out_channels, -1) w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) # Prepare spatial bias b_conv = torch.zeros(conv.weight.shape[0], device=conv.weight.device) if conv.bias is None else conv.bias b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) return fusedconv def fuse_deconv_and_bn(deconv, bn): """Fuse ConvTranspose2d() and BatchNorm2d() layers.""" fuseddconv = ( nn.ConvTranspose2d( deconv.in_channels, deconv.out_channels, kernel_size=deconv.kernel_size, stride=deconv.stride, padding=deconv.padding, output_padding=deconv.output_padding, dilation=deconv.dilation, groups=deconv.groups, bias=True, ) .requires_grad_(False) .to(deconv.weight.device) ) # Prepare filters w_deconv = deconv.weight.clone().view(deconv.out_channels, -1) w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) fuseddconv.weight.copy_(torch.mm(w_bn, w_deconv).view(fuseddconv.weight.shape)) # Prepare spatial bias b_conv = torch.zeros(deconv.weight.shape[1], device=deconv.weight.device) if deconv.bias is None else deconv.bias b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) fuseddconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) return fuseddconv def model_info(model, detailed=False, verbose=True, imgsz=640): """ Model information. imgsz may be int or list, i.e. imgsz=640 or imgsz=[640, 320]. """ if not verbose: return n_p = get_num_params(model) # number of parameters n_g = get_num_gradients(model) # number of gradients n_l = len(list(model.modules())) # number of layers if detailed: LOGGER.info( f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}" ) for i, (name, p) in enumerate(model.named_parameters()): name = name.replace("module_list.", "") LOGGER.info( "%5g %40s %9s %12g %20s %10.3g %10.3g %10s" % (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std(), p.dtype) ) flops = get_flops(model, imgsz) fused = " (fused)" if getattr(model, "is_fused", lambda: False)() else "" fs = f", {flops:.1f} GFLOPs" if flops else "" yaml_file = getattr(model, "yaml_file", "") or getattr(model, "yaml", {}).get("yaml_file", "") model_name = Path(yaml_file).stem.replace("yolo", "YOLO") or "Model" LOGGER.info(f"{model_name} summary{fused}: {n_l} layers, {n_p} parameters, {n_g} gradients{fs}") return n_l, n_p, n_g, flops def get_num_params(model): """Return the total number of parameters in a YOLO model.""" return sum(x.numel() for x in model.parameters()) def get_num_gradients(model): """Return the total number of parameters with gradients in a YOLO model.""" return sum(x.numel() for x in model.parameters() if x.requires_grad) def model_info_for_loggers(trainer): """ Return model info dict with useful model information. Example: YOLOv8n info for loggers ```python results = {'model/parameters': 3151904, 'model/GFLOPs': 8.746, 'model/speed_ONNX(ms)': 41.244, 'model/speed_TensorRT(ms)': 3.211, 'model/speed_PyTorch(ms)': 18.755} ``` """ if trainer.args.profile: # profile ONNX and TensorRT times from ultralytics.utils.benchmarks import ProfileModels results = ProfileModels([trainer.last], device=trainer.device).profile()[0] results.pop("model/name") else: # only return PyTorch times from most recent validation results = { "model/parameters": get_num_params(trainer.model), "model/GFLOPs": round(get_flops(trainer.model), 3), } results["model/speed_PyTorch(ms)"] = round(trainer.validator.speed["inference"], 3) return results def get_flops(model, imgsz=640): """Return a YOLO model's FLOPs.""" if not thop: return 0.0 # if not installed return 0.0 GFLOPs try: model = de_parallel(model) p = next(model.parameters()) if not isinstance(imgsz, list): imgsz = [imgsz, imgsz] # expand if int/float try: # Use stride size for input tensor stride = max(int(model.stride.max()), 32) if hasattr(model, "stride") else 32 # max stride im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format flops = thop.profile(deepcopy(model), inputs=[im], verbose=False)[0] / 1e9 * 2 # stride GFLOPs return flops * imgsz[0] / stride * imgsz[1] / stride # imgsz GFLOPs except Exception: # Use actual image size for input tensor (i.e. required for RTDETR models) im = torch.empty((1, p.shape[1], *imgsz), device=p.device) # input image in BCHW format return thop.profile(deepcopy(model), inputs=[im], verbose=False)[0] / 1e9 * 2 # imgsz GFLOPs except Exception: return 0.0 def get_flops_with_torch_profiler(model, imgsz=640): """Compute model FLOPs (thop alternative).""" if TORCH_2_0: model = de_parallel(model) p = next(model.parameters()) stride = (max(int(model.stride.max()), 32) if hasattr(model, "stride") else 32) * 2 # max stride im = torch.zeros((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format with torch.profiler.profile(with_flops=True) as prof: model(im) flops = sum(x.flops for x in prof.key_averages()) / 1e9 imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float flops = flops * imgsz[0] / stride * imgsz[1] / stride # 640x640 GFLOPs return flops return 0 def initialize_weights(model): """Initialize model weights to random values.""" for m in model.modules(): t = type(m) if t is nn.Conv2d: pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif t is nn.BatchNorm2d: m.eps = 1e-3 m.momentum = 0.03 elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: m.inplace = True def scale_img(img, ratio=1.0, same_shape=False, gs=32): """Scales and pads an image tensor of shape img(bs,3,y,x) based on given ratio and grid size gs, optionally retaining the original shape. """ if ratio == 1.0: return img h, w = img.shape[2:] s = (int(h * ratio), int(w * ratio)) # new size img = F.interpolate(img, size=s, mode="bilinear", align_corners=False) # resize if not same_shape: # pad/crop img h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean def make_divisible(x, divisor): """Returns nearest x divisible by divisor.""" if isinstance(divisor, torch.Tensor): divisor = int(divisor.max()) # to int return math.ceil(x / divisor) * divisor def copy_attr(a, b, include=(), exclude=()): """Copies attributes from object 'b' to object 'a', with options to include/exclude certain attributes.""" for k, v in b.__dict__.items(): if (len(include) and k not in include) or k.startswith("_") or k in exclude: continue else: setattr(a, k, v) def get_latest_opset(): """Return second-most (for maturity) recently supported ONNX opset by this version of torch.""" return max(int(k[14:]) for k in vars(torch.onnx) if "symbolic_opset" in k) - 1 # opset def intersect_dicts(da, db, exclude=()): """Returns a dictionary of intersecting keys with matching shapes, excluding 'exclude' keys, using da values.""" return {k: v for k, v in da.items() if k in db and all(x not in k for x in exclude) and v.shape == db[k].shape} def is_parallel(model): """Returns True if model is of type DP or DDP.""" return isinstance(model, (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)) def de_parallel(model): """De-parallelize a model: returns single-GPU model if model is of type DP or DDP.""" return model.module if is_parallel(model) else model def one_cycle(y1=0.0, y2=1.0, steps=100): """Returns a lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf.""" return lambda x: max((1 - math.cos(x * math.pi / steps)) / 2, 0) * (y2 - y1) + y1 def init_seeds(seed=0, deterministic=False): """Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html.""" random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe # torch.backends.cudnn.benchmark = True # AutoBatch problem https://github.com/ultralytics/yolov5/issues/9287 if deterministic: if TORCH_2_0: torch.use_deterministic_algorithms(True, warn_only=True) # warn if deterministic is not possible torch.backends.cudnn.deterministic = True os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" os.environ["PYTHONHASHSEED"] = str(seed) else: LOGGER.warning("WARNING ⚠️ Upgrade to torch>=2.0.0 for deterministic training.") else: torch.use_deterministic_algorithms(False) torch.backends.cudnn.deterministic = False class ModelEMA: """Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models Keeps a moving average of everything in the model state_dict (parameters and buffers) For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage To disable EMA set the `enabled` attribute to `False`. """ def __init__(self, model, decay=0.9999, tau=2000, updates=0): """Create EMA.""" self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA self.updates = updates # number of EMA updates self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs) for p in self.ema.parameters(): p.requires_grad_(False) self.enabled = True def update(self, model): """Update EMA parameters.""" if self.enabled: self.updates += 1 d = self.decay(self.updates) msd = de_parallel(model).state_dict() # model state_dict for k, v in self.ema.state_dict().items(): if v.dtype.is_floating_point: # true for FP16 and FP32 v *= d v += (1 - d) * msd[k].detach() # assert v.dtype == msd[k].dtype == torch.float32, f'{k}: EMA {v.dtype}, model {msd[k].dtype}' def update_attr(self, model, include=(), exclude=("process_group", "reducer")): """Updates attributes and saves stripped model with optimizer removed.""" if self.enabled: copy_attr(self.ema, model, include, exclude) def strip_optimizer(f: Union[str, Path] = "best.pt", s: str = "") -> None: """ Strip optimizer from 'f' to finalize training, optionally save as 's'. Args: f (str): file path to model to strip the optimizer from. Default is 'best.pt'. s (str): file path to save the model with stripped optimizer to. If not provided, 'f' will be overwritten. Returns: None Example: ```python from pathlib import Path from ultralytics.utils.torch_utils import strip_optimizer for f in Path('path/to/weights').rglob('*.pt'): strip_optimizer(f) ``` """ x = torch.load(f, map_location=torch.device("cpu")) if "model" not in x: LOGGER.info(f"Skipping {f}, not a valid Ultralytics model.") return if hasattr(x["model"], "args"): x["model"].args = dict(x["model"].args) # convert from IterableSimpleNamespace to dict args = {**DEFAULT_CFG_DICT, **x["train_args"]} if "train_args" in x else None # combine args if x.get("ema"): x["model"] = x["ema"] # replace model with ema for k in "optimizer", "best_fitness", "ema", "updates": # keys x[k] = None x["epoch"] = -1 x["model"].half() # to FP16 for p in x["model"].parameters(): p.requires_grad = False x["train_args"] = {k: v for k, v in args.items() if k in DEFAULT_CFG_KEYS} # strip non-default keys # x['model'].args = x['train_args'] torch.save(x, s or f) mb = os.path.getsize(s or f) / 1e6 # file size LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB") def profile(input, ops, n=10, device=None): """ Ultralytics speed, memory and FLOPs profiler. Example: ```python from ultralytics.utils.torch_utils import profile input = torch.randn(16, 3, 640, 640) m1 = lambda x: x * torch.sigmoid(x) m2 = nn.SiLU() profile(input, [m1, m2], n=100) # profile over 100 iterations ``` """ results = [] if not isinstance(device, torch.device): device = select_device(device) LOGGER.info( f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" f"{'input':>24s}{'output':>24s}" ) for x in input if isinstance(input, list) else [input]: x = x.to(device) x.requires_grad = True for m in ops if isinstance(ops, list) else [ops]: m = m.to(device) if hasattr(m, "to") else m # device m = m.half() if hasattr(m, "half") and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward try: flops = thop.profile(m, inputs=[x], verbose=False)[0] / 1e9 * 2 if thop else 0 # GFLOPs except Exception: flops = 0 try: for _ in range(n): t[0] = time_sync() y = m(x) t[1] = time_sync() try: (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward() t[2] = time_sync() except Exception: # no backward method # print(e) # for debug t[2] = float("nan") tf += (t[1] - t[0]) * 1000 / n # ms per op forward tb += (t[2] - t[1]) * 1000 / n # ms per op backward mem = torch.cuda.memory_reserved() / 1e9 if torch.cuda.is_available() else 0 # (GB) s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else "list" for x in (x, y)) # shapes p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0 # parameters LOGGER.info(f"{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}") results.append([p, flops, mem, tf, tb, s_in, s_out]) except Exception as e: LOGGER.info(e) results.append(None) torch.cuda.empty_cache() return results class EarlyStopping: """Early stopping class that stops training when a specified number of epochs have passed without improvement.""" def __init__(self, patience=50): """ Initialize early stopping object. Args: patience (int, optional): Number of epochs to wait after fitness stops improving before stopping. """ self.best_fitness = 0.0 # i.e. mAP self.best_epoch = 0 self.patience = patience or float("inf") # epochs to wait after fitness stops improving to stop self.possible_stop = False # possible stop may occur next epoch def __call__(self, epoch, fitness): """ Check whether to stop training. Args: epoch (int): Current epoch of training fitness (float): Fitness value of current epoch Returns: (bool): True if training should stop, False otherwise """ if fitness is None: # check if fitness=None (happens when val=False) return False if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training self.best_epoch = epoch self.best_fitness = fitness delta = epoch - self.best_epoch # epochs without improvement self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch stop = delta >= self.patience # stop training if patience exceeded if stop: LOGGER.info( f"Stopping training early as no improvement observed in last {self.patience} epochs. " f"Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n" f"To update EarlyStopping(patience={self.patience}) pass a new patience value, " f"i.e. `patience=300` or use `patience=0` to disable EarlyStopping." ) return stop
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/torch_utils.py
Python
unknown
25,143
# Ultralytics YOLO 🚀, AGPL-3.0 license from typing import List from urllib.parse import urlsplit import numpy as np class TritonRemoteModel: """ Client for interacting with a remote Triton Inference Server model. Attributes: endpoint (str): The name of the model on the Triton server. url (str): The URL of the Triton server. triton_client: The Triton client (either HTTP or gRPC). InferInput: The input class for the Triton client. InferRequestedOutput: The output request class for the Triton client. input_formats (List[str]): The data types of the model inputs. np_input_formats (List[type]): The numpy data types of the model inputs. input_names (List[str]): The names of the model inputs. output_names (List[str]): The names of the model outputs. """ def __init__(self, url: str, endpoint: str = "", scheme: str = ""): """ Initialize the TritonRemoteModel. Arguments may be provided individually or parsed from a collective 'url' argument of the form <scheme>://<netloc>/<endpoint>/<task_name> Args: url (str): The URL of the Triton server. endpoint (str): The name of the model on the Triton server. scheme (str): The communication scheme ('http' or 'grpc'). """ if not endpoint and not scheme: # Parse all args from URL string splits = urlsplit(url) endpoint = splits.path.strip("/").split("/")[0] scheme = splits.scheme url = splits.netloc self.endpoint = endpoint self.url = url # Choose the Triton client based on the communication scheme if scheme == "http": import tritonclient.http as client # noqa self.triton_client = client.InferenceServerClient(url=self.url, verbose=False, ssl=False) config = self.triton_client.get_model_config(endpoint) else: import tritonclient.grpc as client # noqa self.triton_client = client.InferenceServerClient(url=self.url, verbose=False, ssl=False) config = self.triton_client.get_model_config(endpoint, as_json=True)["config"] # Sort output names alphabetically, i.e. 'output0', 'output1', etc. config["output"] = sorted(config["output"], key=lambda x: x.get("name")) # Define model attributes type_map = {"TYPE_FP32": np.float32, "TYPE_FP16": np.float16, "TYPE_UINT8": np.uint8} self.InferRequestedOutput = client.InferRequestedOutput self.InferInput = client.InferInput self.input_formats = [x["data_type"] for x in config["input"]] self.np_input_formats = [type_map[x] for x in self.input_formats] self.input_names = [x["name"] for x in config["input"]] self.output_names = [x["name"] for x in config["output"]] def __call__(self, *inputs: np.ndarray) -> List[np.ndarray]: """ Call the model with the given inputs. Args: *inputs (List[np.ndarray]): Input data to the model. Returns: (List[np.ndarray]): Model outputs. """ infer_inputs = [] input_format = inputs[0].dtype for i, x in enumerate(inputs): if x.dtype != self.np_input_formats[i]: x = x.astype(self.np_input_formats[i]) infer_input = self.InferInput(self.input_names[i], [*x.shape], self.input_formats[i].replace("TYPE_", "")) infer_input.set_data_from_numpy(x) infer_inputs.append(infer_input) infer_outputs = [self.InferRequestedOutput(output_name) for output_name in self.output_names] outputs = self.triton_client.infer(model_name=self.endpoint, inputs=infer_inputs, outputs=infer_outputs) return [outputs.as_numpy(output_name).astype(input_format) for output_name in self.output_names]
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/triton.py
Python
unknown
3,936
# Ultralytics YOLO 🚀, AGPL-3.0 license import subprocess from ultralytics.cfg import TASK2DATA, TASK2METRIC, get_save_dir from ultralytics.utils import DEFAULT_CFG, DEFAULT_CFG_DICT, LOGGER, NUM_THREADS def run_ray_tune( model, space: dict = None, grace_period: int = 10, gpu_per_trial: int = None, max_samples: int = 10, **train_args ): """ Runs hyperparameter tuning using Ray Tune. Args: model (YOLO): Model to run the tuner on. space (dict, optional): The hyperparameter search space. Defaults to None. grace_period (int, optional): The grace period in epochs of the ASHA scheduler. Defaults to 10. gpu_per_trial (int, optional): The number of GPUs to allocate per trial. Defaults to None. max_samples (int, optional): The maximum number of trials to run. Defaults to 10. train_args (dict, optional): Additional arguments to pass to the `train()` method. Defaults to {}. Returns: (dict): A dictionary containing the results of the hyperparameter search. Example: ```python from ultralytics import YOLO # Load a YOLOv8n model model = YOLO('yolov8n.pt') # Start tuning hyperparameters for YOLOv8n training on the COCO8 dataset result_grid = model.tune(data='coco8.yaml', use_ray=True) ``` """ LOGGER.info("💡 Learn about RayTune at https://docs.ultralytics.com/integrations/ray-tune") if train_args is None: train_args = {} try: subprocess.run("pip install ray[tune]".split(), check=True) import ray from ray import tune from ray.air import RunConfig from ray.air.integrations.wandb import WandbLoggerCallback from ray.tune.schedulers import ASHAScheduler except ImportError: raise ModuleNotFoundError('Tuning hyperparameters requires Ray Tune. Install with: pip install "ray[tune]"') try: import wandb assert hasattr(wandb, "__version__") except (ImportError, AssertionError): wandb = False default_space = { # 'optimizer': tune.choice(['SGD', 'Adam', 'AdamW', 'NAdam', 'RAdam', 'RMSProp']), "lr0": tune.uniform(1e-5, 1e-1), "lrf": tune.uniform(0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) "momentum": tune.uniform(0.6, 0.98), # SGD momentum/Adam beta1 "weight_decay": tune.uniform(0.0, 0.001), # optimizer weight decay 5e-4 "warmup_epochs": tune.uniform(0.0, 5.0), # warmup epochs (fractions ok) "warmup_momentum": tune.uniform(0.0, 0.95), # warmup initial momentum "box": tune.uniform(0.02, 0.2), # box loss gain "cls": tune.uniform(0.2, 4.0), # cls loss gain (scale with pixels) "hsv_h": tune.uniform(0.0, 0.1), # image HSV-Hue augmentation (fraction) "hsv_s": tune.uniform(0.0, 0.9), # image HSV-Saturation augmentation (fraction) "hsv_v": tune.uniform(0.0, 0.9), # image HSV-Value augmentation (fraction) "degrees": tune.uniform(0.0, 45.0), # image rotation (+/- deg) "translate": tune.uniform(0.0, 0.9), # image translation (+/- fraction) "scale": tune.uniform(0.0, 0.9), # image scale (+/- gain) "shear": tune.uniform(0.0, 10.0), # image shear (+/- deg) "perspective": tune.uniform(0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 "flipud": tune.uniform(0.0, 1.0), # image flip up-down (probability) "fliplr": tune.uniform(0.0, 1.0), # image flip left-right (probability) "mosaic": tune.uniform(0.0, 1.0), # image mixup (probability) "mixup": tune.uniform(0.0, 1.0), # image mixup (probability) "copy_paste": tune.uniform(0.0, 1.0), # segment copy-paste (probability) } # Put the model in ray store task = model.task model_in_store = ray.put(model) def _tune(config): """ Trains the YOLO model with the specified hyperparameters and additional arguments. Args: config (dict): A dictionary of hyperparameters to use for training. Returns: None """ model_to_train = ray.get(model_in_store) # get the model from ray store for tuning model_to_train.reset_callbacks() config.update(train_args) results = model_to_train.train(**config) return results.results_dict # Get search space if not space: space = default_space LOGGER.warning("WARNING ⚠️ search space not provided, using default search space.") # Get dataset data = train_args.get("data", TASK2DATA[task]) space["data"] = data if "data" not in train_args: LOGGER.warning(f'WARNING ⚠️ data not provided, using default "data={data}".') # Define the trainable function with allocated resources trainable_with_resources = tune.with_resources(_tune, {"cpu": NUM_THREADS, "gpu": gpu_per_trial or 0}) # Define the ASHA scheduler for hyperparameter search asha_scheduler = ASHAScheduler( time_attr="epoch", metric=TASK2METRIC[task], mode="max", max_t=train_args.get("epochs") or DEFAULT_CFG_DICT["epochs"] or 100, grace_period=grace_period, reduction_factor=3, ) # Define the callbacks for the hyperparameter search tuner_callbacks = [WandbLoggerCallback(project="YOLOv8-tune")] if wandb else [] # Create the Ray Tune hyperparameter search tuner tune_dir = get_save_dir(DEFAULT_CFG, name="tune").resolve() # must be absolute dir tune_dir.mkdir(parents=True, exist_ok=True) tuner = tune.Tuner( trainable_with_resources, param_space=space, tune_config=tune.TuneConfig(scheduler=asha_scheduler, num_samples=max_samples), run_config=RunConfig(callbacks=tuner_callbacks, storage_path=tune_dir), ) # Run the hyperparameter search tuner.fit() # Return the results of the hyperparameter search return tuner.get_results()
2201_75373101/TargetSingleAndBinocularRanging
ultralytics/utils/tuner.py
Python
unknown
6,003
import sys import numpy as np import threading,time,cv2,json,os from ultralytics import YOLO from threading import Thread from PIL import Image, ImageDraw, ImageFont from PyQt5 import QtGui,QtCore,QtWidgets from PyQt5.QtCore import Qt from PyQt5.QtWidgets import * from PyQt5.QtGui import * from PyQt5.QtCore import * from PyQt5.QtWidgets import QFileDialog, QListWidgetItem, QListView, QMessageBox from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.figure import Figure from test import QMainWindow_new class MplCanvas(FigureCanvas): def __init__(self, parent=None, width=5, height=4, dpi=100): fig = Figure(figsize=(width, height), dpi=dpi) self.axes = fig.add_subplot(111, projection='3d') self.axes.set_xlim(0, 100) self.axes.set_ylim(-50,50) self.axes.set_zlim(-50,50) self.axes.invert_yaxis() super(MplCanvas, self).__init__(fig) class myPredict(QObject): singal = pyqtSignal(int) font1 = QtGui.QFont("Adobe 黑体 Std R") font1.setPixelSize(32) font2 = QtGui.QFont("Adobe 黑体 Std R") font2.setPixelSize(24) font3 = QtGui.QFont("Adobe 黑体 Std R") font3.setPixelSize(16) target_list = [] results_list = [] di = [(640, 360), (1280, 720), (1920, 1080)] size_dic = {0: "小", 1: "中", 2: "大"} dil = 1 Tshow = False N, X, Y, Z = [''], [0], [0], [0] def __init__(self): super(myPredict, self).__init__() with open("runin_predict.json", encoding="utf-8") as file: self.D = json.load(file) self.dil = self.D["show_size"] self.event=threading.Event() self.event.set() self.run=True self.create_window() def show_chinese(self, img, text, pos ,fil): img_pil = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) font = ImageFont.truetype(font='msyh.ttc', size=self.D["font_size"]) draw = ImageDraw.Draw(img_pil) draw.text(pos, text, font=font, fill=fil) # PIL中RGB=(255,0,0)表示红色 img_cv = np.array(img_pil) # PIL图片转换为numpy img = cv2.cvtColor(img_cv, cv2.COLOR_RGB2BGR) # PIL格式转换为OpenCV的BGR格式 return img def mat_to_qimage(self, mat): rgb_image = cv2.cvtColor(mat, cv2.COLOR_BGR2RGB) h, w, ch = rgb_image.shape bytes_per_line = ch * w img_qt = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888) return img_qt def create_window(self): # 主界面 self.mainWindow = QMainWindow_new() self.mWw = 590 self.mWh = 880 self.mainWindow.resize(self.mWw, self.mWh) self.mainWindow.move(250, 300) self.mainWindow.setFixedSize(self.mWw, self.mWh) # self.window.setWindowIcon(QIcon(r'ui\image\title.ico')) label_backdrop = QLabel(self.mainWindow) label_backdrop.resize(1050, self.mWh) label_backdrop.setStyleSheet("background-color: rgb(240, 250, 250);") self.label_main_title = QLabel(self.mainWindow) self.label_main_title.resize(self.mWw, 50) self.label_main_title.setText(" 检测目标") self.label_main_title.setFont(QtGui.QFont("Adobe 黑体 Std R", 16)) self.label_main_title.setStyleSheet("background-color: rgb(50, 200, 200);") label_T = QLabel(self.mainWindow) label_T.resize(40, 40) label_T.move(5, 5) label_T.setPixmap(QPixmap(r"ui\label.jpg").scaled(label_T.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)) self.button_main_close = QPushButton(self.mainWindow) self.button_main_close.setStyleSheet("QPushButton {border: none;}") self.button_main_close.setIcon(QIcon(r"ui\close.png")) self.button_main_close.resize(50, 50) self.button_main_close.move(self.mWw - 50, 0) self.button_main_close.clicked.connect(self.mainWindow.close) self.button_main_min = QPushButton(self.mainWindow) self.button_main_min.setStyleSheet("QPushButton {border: none;}") self.button_main_min.setIcon(QIcon(r"ui\min.png")) self.button_main_min.resize(50, 50) self.button_main_min.move(self.mWw - 100, 0) self.button_main_min.clicked.connect(self.mainWindow.showMinimized) # ———————————————————————————————————————————————————————————— self.showWindow = QMainWindow_new() self.showWindow.setFixedSize(self.di[self.dil][0]+self.di[self.dil][1]+80, self.di[self.dil][1]+500) self.showWindow.move(850, 300) self.label_backdrop = QLabel(self.showWindow) self.label_backdrop.resize(self.di[self.dil][0]+self.di[self.dil][1]+80, self.di[self.dil][1]+500) self.label_backdrop.setStyleSheet("background-color: rgb(240, 250, 250);") self.label_show_title = QLabel(self.showWindow) self.label_show_title.resize(self.di[self.dil][0]+self.di[self.dil][1]+80, 50) self.label_show_title.setText(" 效果展示") self.label_show_title.setFont(QtGui.QFont("Adobe 黑体 Std R", 16)) self.label_show_title.setStyleSheet("background-color: rgb(50, 200, 200);") self.label_show_T = QLabel(self.showWindow) self.label_show_T.resize(40, 40) self.label_show_T.move(5, 5) self.label_show_T.setPixmap(QPixmap(r"ui\label.jpg").scaled(self.label_show_T.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)) self.button_show_close = QPushButton(self.showWindow) self.button_show_close.setStyleSheet("QPushButton {border: none;}") self.button_show_close.setIcon(QIcon(r"ui\close.png")) self.button_show_close.resize(50, 50) self.button_show_close.move(self.di[self.dil][0]+self.di[self.dil][1]+30, 0) self.button_show_close.clicked.connect(self.showWindow.close) self.button_show_min = QPushButton(self.showWindow) self.button_show_min.setStyleSheet("QPushButton {border: none;}") self.button_show_min.setIcon(QIcon(r"ui\min.png")) self.button_show_min.resize(50, 50) self.button_show_min.move(self.di[self.dil][0]+self.di[self.dil][1]-20, 0) self.button_show_min.clicked.connect(self.showWindow.showMinimized) # ———————————————————————————————————————————————————————————— self.show_label = QLabel(self.showWindow) self.show_label.setStyleSheet("background-color: rgb(0, 0, 0);") self.show_label.setAlignment(Qt.AlignCenter) self.show_label.setObjectName("图片显示") self.show_label.setCursor(Qt.CrossCursor) self.show_label.resize(self.di[self.dil][0], self.di[self.dil][1]) self.show_label.move(40, 90) # ———————————————————————————————————————————————————————————— self.A3D_label = QWidget(self.showWindow) self.layout1 = QVBoxLayout(self.A3D_label) self.canvas = MplCanvas() self.layout1.addWidget(self.canvas) self.A3D_label.resize(self.di[self.dil][1]+20, self.di[self.dil][1]+20) self.A3D_label.move(self.di[self.dil][0]+30, 80) # ———————————————————————————————————————————————————————————— self.target_label1 = QLabel(self.showWindow) self.target_label1.move(self.di[self.dil][0]+45, self.di[self.dil][1]+90) self.target_label1.resize(self.di[self.dil][1]//3, 370) self.target_label1.setFont(self.font1) self.target_label1.setText("名字(N):{}\n\n坐标(X):{:.2f}\n\n坐标(Y):{:.2f}\n\n坐标(Z):{:.2f}\n\n距离(L):{:.2f}\n\n单位(U): cm".format(None,0,0,0,0)) # ———————————————————————————————————————————————————————————— self.target_label2 = QLabel(self.showWindow) self.target_label2.move(self.di[self.dil][0]+self.di[self.dil][1]//3+40, self.di[self.dil][1]+90) self.target_label2.resize((self.di[self.dil][1]//3)*2, 370) self.target_label2.setStyleSheet("background-color: rgb(0, 0, 0);") # ———————————————————————————————————————————————————————————— self.target_listWidget = QListWidget(self.showWindow) self.target_listWidget.setLayoutDirection(Qt.LeftToRight) self.target_listWidget.resize(self.di[self.dil][0], 150) self.target_listWidget.move(40, self.di[self.dil][1] + 90) self.target_listWidget.setViewMode(QListWidget.IconMode) self.target_listWidget.setResizeMode(QListWidget.Adjust) self.target_listWidget.setMovement(QListWidget.Static) self.target_listWidget.setWrapping(True) self.target_listWidget.setWordWrap(True) self.target_listWidget.setIconSize(QSize(20, 20)) self.target_listWidget.setIconSize(QSize(20, 20)) self.target_listWidget.setFlow(QListView.Flow(1)) self.target_listWidget.setIconSize(QSize(150, 100)) self.target_listWidget.itemSelectionChanged.connect(self.change_target) # ———————————————————————————————————————————————————————————— self.batch_listWidget = QListWidget(self.showWindow) self.batch_listWidget.setLayoutDirection(Qt.LeftToRight) self.batch_listWidget.resize(self.di[self.dil][0], 150) self.batch_listWidget.move(40, self.di[self.dil][1] + 255) self.batch_listWidget.setViewMode(QListWidget.IconMode) self.batch_listWidget.setResizeMode(QListWidget.Adjust) self.batch_listWidget.setMovement(QListWidget.Static) self.batch_listWidget.setWrapping(True) self.batch_listWidget.setWordWrap(True) self.batch_listWidget.setIconSize(QSize(20, 20)) self.batch_listWidget.setFlow(QListView.Flow(1)) self.batch_listWidget.setIconSize(QSize(150, 100)) self.batch_listWidget.itemSelectionChanged.connect(self.change_pictrue) # ———————————————————————————————————————————————————————————— self.pause_button = QPushButton(self.showWindow) self.pause_button.setText("暂停") self.pause_button.resize(80, 40) self.pause_button.move(self.di[self.dil][0] - 140, self.di[self.dil][1] + 420) self.pause_button.clicked.connect(self.pause_predict) # ———————————————————————————————————————————————————————————— self.end_button = QPushButton(self.showWindow) self.end_button.setText("终止") self.end_button.resize(80, 40) self.end_button.move(self.di[self.dil][0] - 40, self.di[self.dil][1] + 420) self.end_button.clicked.connect(self.end_predict) # ———————————————————————————————————————————————————————————— self.progressbar = QProgressBar(self.showWindow) self.progressbar.resize(self.di[self.dil][0] - 180, 40) self.progressbar.move(40, self.di[self.dil][1] + 420) # ———————————————————————————————————————————————————————————— basic_setting = QLabel(self.mainWindow) basic_setting.setText("基础设置") basic_setting.setFont(self.font1) basic_setting.resize(120, 30) basic_setting.move(30, 80) # ———————————————————————————————————————————————————————————— model_label = QLabel(self.mainWindow) model_label.setText("选取模型:") model_label.setFont(self.font2) model_label.resize(120,30) model_label.move(40,150) self.model_lineEdit = QLineEdit(self.mainWindow) self.model_lineEdit.setText(self.D["model"]) self.model_lineEdit.resize(300,30) self.model_lineEdit.move(160,150) model_button = QPushButton(self.mainWindow) model_button.setText("file") model_button.resize(70,30) model_button.move(470,150) model_button.clicked.connect(lambda : self.select_path(item="model")) # ———————————————————————————————————————————————————————————— monocular_label = QLabel(self.mainWindow) monocular_label.setText("单目检测:") monocular_label.setFont(self.font2) monocular_label.resize(120, 30) monocular_label.move(40, 220) self.monocular_radioButton = QRadioButton(self.mainWindow) self.monocular_radioButton.setChecked(self.D["Monocular"]) self.monocular_radioButton.move(180,223) self.monocular_radioButton.toggled.connect(self.set_source) # ———————————————————————————————————————————————————————————— high_label = QLabel(self.mainWindow) high_label.setText("高: cm") high_label.setFont(self.font2) high_label.resize(200, 30) high_label.move(260,220) self.high_lineEdit = QLineEdit(self.mainWindow) self.high_lineEdit.setText(str(self.D["high"])) self.high_lineEdit.resize(60,30) self.high_lineEdit.move(300,220) self.high_lineEdit.setEnabled(self.monocular_radioButton.isChecked()) wide_label = QLabel(self.mainWindow) wide_label.setText("宽: cm") wide_label.setFont(self.font2) wide_label.resize(200,30) wide_label.move(410,220) self.wide_lineEdit = QLineEdit(self.mainWindow) self.wide_lineEdit.setText(str(self.D["wide"])) self.wide_lineEdit.resize(60, 30) self.wide_lineEdit.move(450, 220) self.wide_lineEdit.setEnabled(self.monocular_radioButton.isChecked()) # ———————————————————————————————————————————————————————————— source_label = QLabel(self.mainWindow) source_label.setText("主 视 角:") source_label.setFont(self.font2) source_label.resize(120, 30) source_label.move(40, 270) self.source_lineEdit = QLineEdit(self.mainWindow) self.source_lineEdit.setText(self.D["source"]) self.source_lineEdit.resize(300, 30) self.source_lineEdit.move(160, 270) self.source_lineEdit.setEnabled(self.monocular_radioButton.isChecked()) self.source_batton1 = QPushButton(self.mainWindow) self.source_batton1.setText("f") self.source_batton1.resize(30, 30) self.source_batton1.move(470, 270) self.source_batton1.clicked.connect(lambda: self.select_path(item="source_f")) self.source_batton1.setEnabled(self.monocular_radioButton.isChecked()) self.source_batton2 = QPushButton(self.mainWindow) self.source_batton2.setText("d") self.source_batton2.resize(30, 30) self.source_batton2.move(510, 270) self.source_batton2.clicked.connect(lambda: self.select_path(item="source_d")) self.source_batton2.setEnabled(self.monocular_radioButton.isChecked()) #———————————————————————————————————————————————————————————— focalLengthl_Label = QLabel(self.mainWindow) focalLengthl_Label.setText("焦距l: lfx: px lfy: px") focalLengthl_Label.setFont(self.font2) focalLengthl_Label.resize(500, 30) focalLengthl_Label.move(40, 340) self.fxl_LineEdit = QLineEdit(self.mainWindow) self.fxl_LineEdit.setText(str(self.D["lfx"])) self.fxl_LineEdit.resize(80, 30) self.fxl_LineEdit.move(190, 340) self.fyl_LineEdit = QLineEdit(self.mainWindow) self.fyl_LineEdit.setText(str(self.D["lfy"])) self.fyl_LineEdit.resize(80, 30) self.fyl_LineEdit.move(410, 340) # ———————————————————————————————————————————————————————————— principalPointl_Label = QLabel(self.mainWindow) principalPointl_Label.setText("主点l: lu0: px lv0: px") principalPointl_Label.setFont(self.font2) principalPointl_Label.resize(500, 30) principalPointl_Label.move(40, 390) self.u0l_LineEdit = QLineEdit(self.mainWindow) self.u0l_LineEdit.setText(str(self.D["lu0"])) self.u0l_LineEdit.resize(80, 30) self.u0l_LineEdit.move(190, 390) self.v0l_LineEdit = QLineEdit(self.mainWindow) self.v0l_LineEdit.setText(str(self.D["lv0"])) self.v0l_LineEdit.resize(80, 30) self.v0l_LineEdit.move(410, 390) # ———————————————————————————————————————————————————————————— focalLengthr_Label = QLabel(self.mainWindow) focalLengthr_Label.setText("焦距r: rfx: px rfy: px") focalLengthr_Label.setFont(self.font2) focalLengthr_Label.resize(500, 30) focalLengthr_Label.move(40, 440) self.fxr_LineEdit = QLineEdit(self.mainWindow) self.fxr_LineEdit.setText(str(self.D["rfx"])) self.fxr_LineEdit.resize(80, 30) self.fxr_LineEdit.move(190, 440) self.fxr_LineEdit.setEnabled(not self.monocular_radioButton.isChecked()) self.fyr_LineEdit = QLineEdit(self.mainWindow) self.fyr_LineEdit.setText(str(self.D["rfy"])) self.fyr_LineEdit.resize(80, 30) self.fyr_LineEdit.move(410, 440) self.fyr_LineEdit.setEnabled(not self.monocular_radioButton.isChecked()) # ———————————————————————————————————————————————————————————— principalPointr_Label = QLabel(self.mainWindow) principalPointr_Label.setText("主点r: ru0: px rv0: px") principalPointr_Label.setFont(self.font2) principalPointr_Label.resize(500, 30) principalPointr_Label.move(40, 490) self.u0r_LineEdit = QLineEdit(self.mainWindow) self.u0r_LineEdit.setText(str(self.D["ru0"])) self.u0r_LineEdit.resize(80, 30) self.u0r_LineEdit.move(190, 490) self.u0r_LineEdit.setEnabled(not self.monocular_radioButton.isChecked()) self.v0r_LineEdit = QLineEdit(self.mainWindow) self.v0r_LineEdit.setText(str(self.D["rv0"])) self.v0r_LineEdit.resize(80, 30) self.v0r_LineEdit.move(410, 490) self.v0r_LineEdit.setEnabled(not self.monocular_radioButton.isChecked()) # ———————————————————————————————————————————————————————————— binocular_label = QLabel(self.mainWindow) binocular_label.setText("双目检测:") binocular_label.setFont(self.font2) binocular_label.resize(150, 30) binocular_label.move(40, 560) self.binocular_radiobutton = QRadioButton(self.mainWindow) self.binocular_radiobutton.setChecked(not self.D["Monocular"]) self.binocular_radiobutton.move(180, 563) self.binocular_radiobutton.toggled.connect(self.set_source) # ———————————————————————————————————————————————————————————— # focalLength_label = QLabel(self.mainWindow) # focalLength_label.setText("焦距:") # focalLength_label.setFont(self.font2) # focalLength_label.resize(150,30) # focalLength_label.move(210,510) # # self.focalLength_lineEdit = QLineEdit(self.mainWindow) # self.focalLength_lineEdit.setText(str(self.D["focalLength"])) # self.focalLength_lineEdit.resize(50,30) # self.focalLength_lineEdit.move(280,510) # self.focalLength_lineEdit.setEnabled(self.binocular_radiobutton.isChecked()) # # focalLength_label1 = QLabel(self.mainWindow) # focalLength_label1.setText("pt") # focalLength_label1.setFont(self.font3) # focalLength_label1.resize(50, 30) # focalLength_label1.move(337, 510) # ———————————————————————————————————————————————————————————— spaceBetween_label = QLabel(self.mainWindow) spaceBetween_label.setText("两摄像头的间距: cm") spaceBetween_label.setFont(self.font2) spaceBetween_label.resize(300,30) spaceBetween_label.move(260,560) self.spaceBetween_lineEdit = QLineEdit(self.mainWindow) self.spaceBetween_lineEdit.setText(str(self.D["spaceBetween"])) self.spaceBetween_lineEdit.resize(50, 30) self.spaceBetween_lineEdit.move(450, 560) self.spaceBetween_lineEdit.setEnabled(self.binocular_radiobutton.isChecked()) # ———————————————————————————————————————————————————————————— left_label = QLabel(self.mainWindow) left_label.setText("左 视 角:") left_label.setFont(self.font2) left_label.resize(120, 30) left_label.move(40, 610) self.left_lineEdit = QLineEdit(self.mainWindow) self.left_lineEdit.setText(self.D["leftView"]) self.left_lineEdit.resize(300, 30) self.left_lineEdit.move(160, 610) self.left_lineEdit.setEnabled(self.binocular_radiobutton.isChecked()) self.left_batton1 = QPushButton(self.mainWindow) self.left_batton1.setText("f") self.left_batton1.resize(30, 30) self.left_batton1.move(470, 610) self.left_batton1.setEnabled(self.binocular_radiobutton.isChecked()) self.left_batton1.clicked.connect(lambda :self.select_path(item="left_f")) self.left_batton2 = QPushButton(self.mainWindow) self.left_batton2.setText("d") self.left_batton2.resize(30, 30) self.left_batton2.move(510, 610) self.left_batton2.setEnabled(self.binocular_radiobutton.isChecked()) self.left_batton2.clicked.connect(lambda :self.select_path(item="left_d")) # ———————————————————————————————————————————————————————————— right_label = QLabel(self.mainWindow) right_label.setText("右 视 角:") right_label.setFont(self.font2) right_label.resize(120, 30) right_label.move(40, 660) self.right_lineEdit = QLineEdit(self.mainWindow) self.right_lineEdit.setText(self.D["rightView"]) self.right_lineEdit.resize(300, 30) self.right_lineEdit.move(160, 660) self.right_lineEdit.setEnabled(self.binocular_radiobutton.isChecked()) self.right_batton1 = QPushButton(self.mainWindow) self.right_batton1.setText("f") self.right_batton1.resize(30, 30) self.right_batton1.move(470, 660) self.right_batton1.setEnabled(self.binocular_radiobutton.isChecked()) self.right_batton1.clicked.connect(lambda :self.select_path(item="right_f")) self.right_batton2 = QPushButton(self.mainWindow) self.right_batton2.setText("d") self.right_batton2.resize(30, 30) self.right_batton2.move(510, 660) self.right_batton2.setEnabled(self.binocular_radiobutton.isChecked()) self.right_batton2.clicked.connect(lambda : self.select_path(item="right_d")) # ———————————————————————————————————————————————————————————— self.save_checkBox = QCheckBox(self.mainWindow) self.save_checkBox.setFont(self.font3) self.save_checkBox.setChecked(self.D["save"]) self.save_checkBox.setStyleSheet("QCheckBox::indicator {width: 24px; height: 24px;}") self.save_checkBox.move(515, 730) self.save_checkBox.stateChanged.connect(self.set_save) save_path_label = QLabel(self.mainWindow) save_path_label.setText("保存路径:") save_path_label.setFont(self.font2) save_path_label.resize(120, 30) save_path_label.move(40, 730) self.save_path_lineEdit = QLineEdit(self.mainWindow) self.save_path_lineEdit.setText(self.D["save_path"]) self.save_path_lineEdit.resize(300, 30) self.save_path_lineEdit.move(160, 730) self.save_path_lineEdit.setEnabled(self.save_checkBox.isChecked()) self.save_path_button = QPushButton(self.mainWindow) self.save_path_button.setText("d") self.save_path_button.resize(30, 30) self.save_path_button.move(470, 730) self.save_path_button.clicked.connect(lambda :self.select_path(item="save")) self.save_path_button.setEnabled(self.save_checkBox.isChecked()) Camera_detection=QButtonGroup(self.mainWindow) Camera_detection.addButton(self.binocular_radiobutton) Camera_detection.addButton(self.monocular_radioButton) # ———————————————————————————————————————————————————————————— saveSetting = QPushButton(self.mainWindow) saveSetting.setText("保存设置") saveSetting.setFont(self.font3) saveSetting.resize(120,40) saveSetting.move(40,self.mWh-80) saveSetting.clicked.connect(self.save_setting) # ———————————————————————————————————————————————————————————— startPredict = QPushButton(self.mainWindow) startPredict.setText("开始检测") startPredict.setFont(self.font3) startPredict.resize(120,40) startPredict.move(180,self.mWh-80) startPredict.clicked.connect(self.start_predict) # ———————————————————————————————————————————————————————————— self.detail_set_button = QPushButton(self.mainWindow) self.detail_set_button.setText("详细设置>") self.detail_set_button.setFont(self.font3) self.detail_set_button.resize(120, 40) self.detail_set_button.move(430, self.mWh - 80) self.detail_set_button.clicked.connect(self.detail_set_window) # ———————————————————————————————————————————————————————————— detail_setting = QLabel(self.mainWindow) detail_setting.setFont(self.font1) detail_setting.setText("详细设置") detail_setting.resize(120,30) detail_setting.move(600,80) # ———————————————————————————————————————————————————————————— augment_label = QLabel(self.mainWindow) augment_label.setText("图像增强:") augment_label.setFont(self.font2) augment_label.resize(120, 30) augment_label.move(610, 150) self.augment_button = QCheckBox(self.mainWindow) self.augment_button.setText("开启") self.augment_button.setFont(self.font3) self.augment_button.setChecked(self.D["augment"]) self.augment_button.move(745, 150) # ———————————————————————————————————————————————————————————— half_label = QLabel(self.mainWindow) half_label.setText("半精度:") half_label.setFont(self.font2) half_label.resize(120, 30) half_label.move(840, 150) self.half_button = QCheckBox(self.mainWindow) self.half_button.setText("开启") self.half_button.setFont(self.font3) self.half_button.setChecked(self.D["half"]) self.half_button.move(945, 150) # ———————————————————————————————————————————————————————————— retina_masks_label = QLabel(self.mainWindow) retina_masks_label.setText("分割掩膜:") retina_masks_label.setFont(self.font2) retina_masks_label.resize(120, 30) retina_masks_label.move(610, 210) self.retina_masks_button = QCheckBox(self.mainWindow) self.retina_masks_button.setText("开启") self.retina_masks_button.setFont(self.font3) self.retina_masks_button.setChecked(self.D["retina_masks"]) self.retina_masks_button.move(745, 210) # ———————————————————————————————————————————————————————————— agnostic_nms_label = QLabel(self.mainWindow) agnostic_nms_label.setText("N_M_S:") agnostic_nms_label.setFont(self.font2) agnostic_nms_label.resize(100, 30) agnostic_nms_label.move(840, 210) self.agnostic_nms_button = QCheckBox(self.mainWindow) self.agnostic_nms_button.setText("开启") self.agnostic_nms_button.setFont(self.font3) self.agnostic_nms_button.setChecked(self.D["agnostic_nms"]) self.agnostic_nms_button.move(945, 210) # ———————————————————————————————————————————————————————————— draw_probs_label = QLabel(self.mainWindow) draw_probs_label.setText("绘制分类:") draw_probs_label.setFont(self.font2) draw_probs_label.resize(120, 30) draw_probs_label.move(610, 270) self.draw_probs_checkBox = QCheckBox(self.mainWindow) self.draw_probs_checkBox.setChecked(self.D["draw_probs"]) self.draw_probs_checkBox.setText("开启") self.draw_probs_checkBox.setFont(self.font3) self.draw_probs_checkBox.resize(70, 30) self.draw_probs_checkBox.move(745, 270) # ———————————————————————————————————————————————————————————— use_pil_label = QLabel(self.mainWindow) use_pil_label.setText("使用pil:") use_pil_label.setFont(self.font2) use_pil_label.resize(120, 30) use_pil_label.move(840, 270) self.use_pil_checkBox = QCheckBox(self.mainWindow) self.use_pil_checkBox.setChecked(self.D["use_pil"]) self.use_pil_checkBox.setText("开启") self.use_pil_checkBox.setFont(self.font3) self.use_pil_checkBox.resize(70, 30) self.use_pil_checkBox.move(945, 270) # ———————————————————————————————————————————————————————————— show_label = QLabel(self.mainWindow) show_label.setText("显示设置:") show_label.setFont(self.font2) show_label.resize(120, 30) show_label.move(610, 340) self.show_labels_checkBox = QCheckBox(self.mainWindow) self.show_labels_checkBox.setChecked(self.D["show_labels"]) self.show_labels_checkBox.setText("显示标签") self.show_labels_checkBox.setFont(self.font3) self.show_labels_checkBox.resize(170,30) self.show_labels_checkBox.move(630,390) self.show_conf_checkBox = QCheckBox(self.mainWindow) self.show_conf_checkBox.setChecked(self.D["show_conf"]) self.show_conf_checkBox.setText("显示分数") self.show_conf_checkBox.setFont(self.font3) self.show_conf_checkBox.resize(170, 30) self.show_conf_checkBox.move(830, 390) self.show_coordinate_checkBox = QCheckBox(self.mainWindow) self.show_coordinate_checkBox.setChecked(self.D["show_coordinate"]) self.show_coordinate_checkBox.setText("显示坐标") self.show_coordinate_checkBox.setFont(self.font3) self.show_coordinate_checkBox.resize(170, 30) self.show_coordinate_checkBox.move(630, 440) self.show_fps_checkBox = QCheckBox(self.mainWindow) self.show_fps_checkBox.setChecked(self.D["show_fps"]) self.show_fps_checkBox.setText("显示fps") self.show_fps_checkBox.setFont(self.font3) self.show_fps_checkBox.resize(170, 30) self.show_fps_checkBox.move(830, 440) self.show_boxes_checkBox = QCheckBox(self.mainWindow) self.show_boxes_checkBox.setChecked(self.D["show_boxes"]) self.show_boxes_checkBox.setText("显示边框") self.show_boxes_checkBox.setFont(self.font3) self.show_boxes_checkBox.resize(170, 30) self.show_boxes_checkBox.move(630, 490) self.show_masks_checkBox = QCheckBox(self.mainWindow) self.show_masks_checkBox.setChecked(self.D["show_masks"]) self.show_masks_checkBox.setText("显示掩膜") self.show_masks_checkBox.setFont(self.font3) self.show_masks_checkBox.resize(170, 30) self.show_masks_checkBox.move(830, 490) # ———————————————————————————————————————————————————————————— line_width_label = QLabel(self.mainWindow) line_width_label.setText("边框线宽:") line_width_label.setFont(self.font2) line_width_label.resize(120, 30) line_width_label.move(610, 560) self.line_width_lineEidt = QLineEdit(self.mainWindow) self.line_width_lineEidt.setText(str(self.D["line_width"])) self.line_width_lineEidt.resize(50, 30) self.line_width_lineEidt.move(755, 560) # ———————————————————————————————————————————————————————————— font_size_label = QLabel(self.mainWindow) font_size_label.setText("字体大小:") font_size_label.setFont(self.font2) font_size_label.resize(120, 30) font_size_label.move(840, 560) self.font_size_lineEidt = QLineEdit(self.mainWindow) self.font_size_lineEidt.setText(str(self.D["font_size"])) self.font_size_lineEidt.resize(50, 30) self.font_size_lineEidt.move(960, 560) # ———————————————————————————————————————————————————————————— conf_label = QLabel(self.mainWindow) conf_label.setText("信任度阈值:") conf_label.setFont(self.font2) conf_label.resize(150, 30) conf_label.move(610, 620) self.conf_lineEidt = QLineEdit(self.mainWindow) self.conf_lineEidt.setText(str(self.D["conf"])) self.conf_lineEidt.resize(50, 30) self.conf_lineEidt.move(755, 620) # ———————————————————————————————————————————————————————————— iou_label = QLabel(self.mainWindow) iou_label.setText("iou阈值:") iou_label.setFont(self.font2) iou_label.resize(120, 30) iou_label.move(850, 620) self.iou_lineEidt = QLineEdit(self.mainWindow) self.iou_lineEidt.setText(str(self.D["iou"])) self.iou_lineEidt.resize(50, 30) self.iou_lineEidt.move(960, 620) # ———————————————————————————————————————————————————————————— max_det_label = QLabel(self.mainWindow) max_det_label.setText("最大检测数:") max_det_label.setFont(self.font2) max_det_label.resize(170, 30) max_det_label.move(610, 680) self.max_det_lineEdit = QLineEdit(self.mainWindow) self.max_det_lineEdit.setText(str(self.D["max_det"])) self.max_det_lineEdit.resize(50, 30) self.max_det_lineEdit.move(755, 680) # ———————————————————————————————————————————————————————————— device_label = QLabel(self.mainWindow) device_label.setText("运行设备:") device_label.setFont(self.font2) device_label.resize(170, 30) device_label.move(840, 680) self.device_lineEdit = QLineEdit(self.mainWindow) self.device_lineEdit.setText(str(self.D["device"])) self.device_lineEdit.resize(50, 30) self.device_lineEdit.move(960, 680) # ———————————————————————————————————————————————————————————— device_label = QLabel(self.mainWindow) device_label.setText("运行设备:") device_label.setFont(self.font2) device_label.resize(170, 30) device_label.move(840, 680) self.device_lineEdit = QLineEdit(self.mainWindow) self.device_lineEdit.setText(str(self.D["device"])) self.device_lineEdit.resize(50, 30) self.device_lineEdit.move(960, 680) # ———————————————————————————————————————————————————————————— Binocular_show_label = QLabel(self.mainWindow) Binocular_show_label.setText("双目检测显示图:") Binocular_show_label.setFont(self.font2) Binocular_show_label.resize(200, 30) Binocular_show_label.move(610, 750) Binocular_show_left = QRadioButton(self.mainWindow) Binocular_show_left.setText("左") Binocular_show_left.setFont(self.font3) Binocular_show_left.resize(50, 30) Binocular_show_left.move(810, 750) Binocular_show_right = QRadioButton(self.mainWindow) Binocular_show_right.setText("右") Binocular_show_right.setFont(self.font3) Binocular_show_right.resize(50, 30) Binocular_show_right.move(885, 750) Binocular_show_combine = QRadioButton(self.mainWindow) Binocular_show_combine.setText("合") Binocular_show_combine.setFont(self.font3) Binocular_show_combine.resize(50, 30) Binocular_show_combine.move(960, 750) Binocular_show_combine.setChecked(True) self.layout = QButtonGroup(self.mainWindow) self.layout.addButton(Binocular_show_left) self.layout.addButton(Binocular_show_right) self.layout.addButton(Binocular_show_combine) # ———————————————————————————————————————————————————————————— show_size_label = QLabel(self.mainWindow) show_size_label.setText("效果界面尺寸:") show_size_label.setFont(self.font2) show_size_label.resize(200, 30) show_size_label.move(610, 810) self.show_size = QLabel(self.mainWindow) self.show_size.setText(self.size_dic[self.D["show_size"]]) self.show_size.setFont(self.font2) self.show_size.resize(50, 30) self.show_size.move(770, 810) self.show_size_reduce = QPushButton(self.mainWindow) self.show_size_reduce.setText("-") self.show_size_reduce.resize(30, 30) self.show_size_reduce.move(820, 810) self.show_size_reduce.clicked.connect(lambda :self.set_show_window(0)) self.show_size_big = QPushButton(self.mainWindow) self.show_size_big.setText("+") self.show_size_big.resize(30, 30) self.show_size_big.move(860, 810) self.show_size_big.clicked.connect(lambda: self.set_show_window(1)) self.preview = QPushButton(self.mainWindow) self.preview.setText("预览") self.preview.resize(100, 30) self.preview.move(910, 810) self.preview.clicked.connect(lambda: self.set_show_window(2)) # ———————————————————————————————————————————————————————————— def detail_set_window(self): if self.detail_set_button.text()=="详细设置>": self.mainWindow.setFixedSize(1050, self.mWh) self.label_main_title.resize(1050, 50) self.button_main_close.move(1000, 0) self.button_main_min.move(950, 0) self.detail_set_button.setText("详细设置<") else: self.mainWindow.setFixedSize(590, self.mWh) self.label_main_title.resize(590, 50) self.button_main_close.move(540, 0) self.button_main_min.move(490, 0) self.detail_set_button.setText("详细设置>") def set_show_window(self, i): if i==0 and self.dil!=0: self.dil -= 1 self.show_size.setText(self.size_dic[self.dil]) self.label_backdrop.resize(self.di[self.dil][0] + self.di[self.dil][1] + 80, self.di[self.dil][1] + 500) self.label_show_title.resize(self.di[self.dil][0] + self.di[self.dil][1] + 80, 50) self.button_show_close.move(self.di[self.dil][0] + self.di[self.dil][1] + 30, 0) self.button_show_min.move(self.di[self.dil][0] + self.di[self.dil][1] - 20, 0) self.show_label.resize(self.di[self.dil][0], self.di[self.dil][1]) self.showWindow.setFixedSize(self.di[self.dil][0]+self.di[self.dil][1]+80, self.di[self.dil][1]+500) self.pause_button.move(self.di[self.dil][0] - 140, self.di[self.dil][1] + 420) self.end_button.move(self.di[self.dil][0] - 40, self.di[self.dil][1] + 420) self.progressbar.resize(self.di[self.dil][0] - 180, 40) self.progressbar.move(40, self.di[self.dil][1] + 420) self.A3D_label.resize(self.di[self.dil][1] + 20, self.di[self.dil][1] + 20) self.A3D_label.move(self.di[self.dil][0] + 30, 80) self.batch_listWidget.resize(self.di[self.dil][0], 150) self.batch_listWidget.move(40, self.di[self.dil][1] + 255) self.target_listWidget.resize(self.di[self.dil][0], 150) self.target_listWidget.move(40, self.di[self.dil][1] + 90) self.target_label1.move(self.di[self.dil][0] + 45, self.di[self.dil][1] + 90) self.target_label1.resize(self.di[self.dil][1] // 3, 370) self.target_label2.move(self.di[self.dil][0] + self.di[self.dil][1] // 3 + 40, self.di[self.dil][1] + 90) self.target_label2.resize((self.di[self.dil][1] // 3) * 2, 370) elif i==1 and self.dil!=2: self.dil+=1 self.show_size.setText(self.size_dic[self.dil]) self.label_backdrop.resize(self.di[self.dil][0] + self.di[self.dil][1] + 80, self.di[self.dil][1] + 500) self.label_show_title.resize(self.di[self.dil][0] + self.di[self.dil][1] + 80, 50) self.button_show_close.move(self.di[self.dil][0] + self.di[self.dil][1] + 30, 0) self.button_show_min.move(self.di[self.dil][0] + self.di[self.dil][1] - 20, 0) self.show_label.resize(self.di[self.dil][0], self.di[self.dil][1]) self.showWindow.setFixedSize(self.di[self.dil][0] + self.di[self.dil][1] + 80, self.di[self.dil][1] + 500) self.pause_button.move(self.di[self.dil][0] - 140, self.di[self.dil][1] + 420) self.end_button.move(self.di[self.dil][0] - 40, self.di[self.dil][1] + 420) self.progressbar.resize(self.di[self.dil][0] - 180, 40) self.progressbar.move(40, self.di[self.dil][1] + 420) self.A3D_label.resize(self.di[self.dil][1] + 20, self.di[self.dil][1] + 20) self.A3D_label.move(self.di[self.dil][0] + 30, 80) self.batch_listWidget.resize(self.di[self.dil][0], 150) self.batch_listWidget.move(40, self.di[self.dil][1] + 255) self.target_listWidget.resize(self.di[self.dil][0], 150) self.target_listWidget.move(40, self.di[self.dil][1] + 90) self.target_label1.move(self.di[self.dil][0] + 45, self.di[self.dil][1] + 90) self.target_label1.resize(self.di[self.dil][1] // 3, 370) self.target_label2.move(self.di[self.dil][0] + self.di[self.dil][1] // 3 + 40, self.di[self.dil][1] + 90) self.target_label2.resize((self.di[self.dil][1] // 3) * 2, 370) elif i==2: self.showWindow.show() # 选择路径 def select_path(self,item): if item=="model": path, _ = QFileDialog.getOpenFileName(filter="*.pt;*.ckpt;") if path != "": self.model_lineEdit.setText(path) elif item=="source_f":#选取检测源(文件) path, _ = QFileDialog.getOpenFileName() if path != "": self.source_lineEdit.setText(path) elif item=="source_d":#选取检测源(文件夹) path = QFileDialog.getExistingDirectory() if path != "": self.source_lineEdit.setText(path) elif item=="left_f": path, _ = QFileDialog.getOpenFileName() if path != "": self.left_lineEdit.setText(path) elif item=="left_d": path = QFileDialog.getExistingDirectory() if path != "": self.left_lineEdit.setText(path) elif item=="right_f": path, _ = QFileDialog.getOpenFileName() if path != "": self.right_lineEdit.setText(path) elif item=="right_d": path = QFileDialog.getExistingDirectory() if path != "": self.right_lineEdit.setText(path) elif item=="save":#选择保存路径 path = QFileDialog.getExistingDirectory() if path != "": self.save_path_lineEdit.setText(path) # 设置保存是否可点 def set_save(self): self.save_path_lineEdit.setEnabled(self.save_checkBox.isChecked()) self.save_path_button.setEnabled(self.save_checkBox.isChecked()) # 设置检测源是否可点 def set_source(self): self.source_lineEdit.setEnabled(self.monocular_radioButton.isChecked()) self.source_batton1.setEnabled(self.monocular_radioButton.isChecked()) self.source_batton2.setEnabled(self.monocular_radioButton.isChecked()) self.wide_lineEdit.setEnabled(self.monocular_radioButton.isChecked()) self.high_lineEdit.setEnabled(self.monocular_radioButton.isChecked()) self.left_lineEdit.setEnabled(self.binocular_radiobutton.isChecked()) self.left_batton1.setEnabled(self.binocular_radiobutton.isChecked()) self.left_batton2.setEnabled(self.binocular_radiobutton.isChecked()) self.right_lineEdit.setEnabled(self.binocular_radiobutton.isChecked()) self.right_batton1.setEnabled(self.binocular_radiobutton.isChecked()) self.right_batton2.setEnabled(self.binocular_radiobutton.isChecked()) self.spaceBetween_lineEdit.setEnabled(self.binocular_radiobutton.isChecked()) self.fxr_LineEdit.setEnabled(not self.monocular_radioButton.isChecked()) self.fyr_LineEdit.setEnabled(not self.monocular_radioButton.isChecked()) self.u0r_LineEdit.setEnabled(not self.monocular_radioButton.isChecked()) self.v0r_LineEdit.setEnabled(not self.monocular_radioButton.isChecked()) # 保存设置 def save_setting(self): self.D["model"] = self.model_lineEdit.text() self.D["Binocular"] = self.binocular_radiobutton.isChecked() self.D["high"] = float(self.high_lineEdit.text()) self.D["wide"] = float(self.wide_lineEdit.text()) self.D["Monocular"] = self.monocular_radioButton.isChecked() self.D["spaceBetween"] = float(self.spaceBetween_lineEdit.text()) self.D["lfx"] = float(self.fxl_LineEdit.text()) self.D["lfy"] = float(self.fyl_LineEdit.text()) self.D["lu0"] = float(self.u0l_LineEdit.text()) self.D["lv0"] = float(self.v0l_LineEdit.text()) self.D["rfx"] = float(self.fxr_LineEdit.text()) self.D["rfy"] = float(self.fyr_LineEdit.text()) self.D["ru0"] = float(self.u0r_LineEdit.text()) self.D["rv0"] = float(self.v0r_LineEdit.text()) left = self.left_lineEdit.text() right = self.right_lineEdit.text() if os.path.isdir(left) == os.path.isdir(right): self.D["leftView"] = left self.D["rightView"] = right else: QMessageBox.warning(self.mainWindow, "警告", "请保持左右视图格式一致") self.D["source"] = self.source_lineEdit.text() self.D["high"] = float(self.high_lineEdit.text()) self.D["wide"] = float(self.wide_lineEdit.text()) self.D["save"] = self.save_checkBox.isChecked() self.D["save_path"] = self.save_path_lineEdit.text() self.D["half"] = self.half_button.isChecked() self.D["augment"] = self.augment_button.isChecked() self.D["retina_masks"] = self.retina_masks_button.isChecked() self.D["agnostic_nms"] = self.agnostic_nms_button.isChecked() self.D["draw_probs"] = self.draw_probs_checkBox.isChecked() self.D["use_pil"] = self.use_pil_checkBox.isChecked() self.D["show_labels"] = self.show_labels_checkBox.isChecked() self.D["show_conf"] = self.show_conf_checkBox.isChecked() self.D["show_boxes"] = self.show_boxes_checkBox.isChecked() self.D["show_masks"] = self.show_masks_checkBox.isChecked() self.D["show_coordinate"] = self.show_coordinate_checkBox.isChecked() self.D["show_fps"] = self.show_fps_checkBox.isChecked() line_width = self.line_width_lineEidt.text() if line_width.isdigit(): self.D["line_width"] = int(line_width) else: self.D["line_width"] = None self.D["font_size"] = int(self.font_size_lineEidt.text()) self.D["conf"] = float(self.conf_lineEidt.text()) self.D["iou"] = float(self.iou_lineEidt.text()) self.D["max_det"] = int(self.max_det_lineEdit.text()) device = self.device_lineEdit.text() if device=="None": self.D["device"] = None else: self.D["device"] = device self.D["show_size"] = self.dil with open("runin_predict.json", "w") as file: json.dump(self.D,file,indent=4) QMessageBox.information(self.mainWindow, "操作成功", "设置完成") # 开始检测 def start_predict(self): self.model = YOLO(self.D["model"]) self.batch_listWidget.clear() self.progressbar.setValue(0) self.run=True self.showWindow.show() if self.D["Binocular"] == True: if self.D["leftView"].endswith((".jpg",".png",".bmp")) and self.D["rightView"].endswith((".jpg",".png",".bmp")): if self.D["leftView"] == self.D["rightView"]: image = cv2.imread(self.D["leftView"]) h, w, _ = image.shape wf = w // 2 image_l = image[0:h, 0:wf] image_r = image[0:h, wf:w] else: image_l = cv2.imread(self.D["leftView"]) image_r = cv2.imread(self.D["rightView"]) frame, re1, re2, items = self.binocularPredict(image_l=image_l,image_r=image_r) if self.D["save"]: cv2.imwrite(self.D["save_path"] + "/" + os.path.basename(self.D["leftView"]), frame) return True elif os.path.isdir(self.D["leftView"]) and os.path.isdir(self.D["rightView"]): target = self.binocularBatch elif self.D["leftView"].endswith(".mp4") and self.D["rightView"].endswith(".mp4"): target = self.binocularVidoe else: target = self.binocularRealtime else: if self.D["source"].endswith((".jpg",".png",".bmp")): image = cv2.imread(self.D["source"], 1) frame, re1, re2, items = self.monocularPredict(image=image) if self.D["save"]: cv2.imwrite(self.D["save_path"]+"/"+os.path.basename(self.D["source"]), frame) return True elif os.path.isdir(self.D["source"]): target = self.monocularBatch elif self.D["source"].endswith(".mp4"): target = self.monocularVidoe else: target = self.monocularRealtime Thread(target=target).start() return True # 暂停检测 def pause_predict(self): if self.pause_button.text()=="暂停": self.event.clear() self.pause_button.setText("继续") else: self.event.set() self.pause_button.setText("暂停") # 终止检测 def end_predict(self): self.run = False # 更新进度条 def update_progressbar(self,num): self.progressbar.setValue(num) # 执行检测 def detect(self,source): results = self.model(source=source, augment=self.D["augment"], half=self.D["half"], agnostic_nms=self.D["agnostic_nms"], retina_masks=self.D["retina_masks"], conf=self.D["conf"], iou=self.D["iou"], max_det=self.D["max_det"], device=self.D["device"]) results[0].boxes.data = list(results[0].boxes.data) images=list() for i in results: images.append(i.plot(probs=self.D["draw_probs"], pil=self.D["use_pil"], labels=self.D["show_labels"], conf=self.D["show_conf"], boxes=self.D["show_boxes"], masks=self.D["show_masks"], line_width=self.D["line_width"])) return results, images def show_result(self, frame): img_height, img_width, channels = frame.shape if img_height > img_width: frame = cv2.resize(frame, ((self.di[self.dil][1] * img_width) // img_height, self.di[self.dil][1])) else: frame = cv2.resize(frame, (self.di[self.dil][0], (self.di[self.dil][0] * img_height) // img_width)) img_height, img_width, channels = frame.shape frame_Q = QImage(frame.data, img_width, img_height, channels * img_width,QImage.Format_BGR888) self.show_label.setPixmap(QPixmap(frame_Q)) def plot_3D(self, N, X, Y, Z): self.canvas.axes.cla() self.canvas.axes.plot([0, 100], [0, 0], [0, 0], color='red', linewidth=1, linestyle='-') self.canvas.axes.plot([0, 0], [0, 0], [-50, 50], color='blue', linewidth=1, linestyle='--') self.canvas.axes.plot([0, 0], [-50, 50], [0, 0], color='blue', linewidth=1, linestyle='--') self.canvas.axes.plot(X, Y, Z, 'o') for i in range(len(N)): self.canvas.axes.text(X[i], Y[i], Z[i], N[i]) self.canvas.axes.set_xlabel('Z') self.canvas.axes.set_ylabel('X') self.canvas.axes.set_zlabel('Y') self.canvas.draw() def change_target(self): Idx = self.target_listWidget.currentIndex().row() frame = self.target_list[Idx][0] Nc = self.target_list[Idx][1] Xc = self.target_list[Idx][2] Yc = self.target_list[Idx][3] Zc = self.target_list[Idx][4] Lc = self.target_list[Idx][5] self.target_label1.setText("名字(N):{}\n\n坐标(X):{:.2f}\n\n坐标(Y):{:.2f}\n\n坐标(Z):{:.2f}\n\n距离(L):{:.2f}\n\n单位(U): cm".format(Nc,Xc,Yc,Zc,Lc)) # self.target_label2.resize((self.di[self.dil][1] // 3) * 2, 370) img_height, img_width, channels = frame.shape if img_height > img_width: frame = cv2.resize(frame, ((370 * img_width) // img_height, 370)) else: frame = cv2.resize(frame, ((self.di[self.dil][1]//3)*2, ((self.di[self.dil][1]//3)*2*img_height)//img_width)) img_height, img_width, channels = frame.shape frame_Q = QImage(frame.data, img_width, img_height, channels * img_width, QImage.Format_BGR888) self.target_label2.setPixmap(QPixmap(frame_Q)) def change_pictrue(self): Idx = self.batch_listWidget.currentIndex().row() self.show_result(self.results_list[Idx][0]) self.plot_3D(self.results_list[Idx][1][0], self.results_list[Idx][1][3], self.results_list[Idx][1][1], self.results_list[Idx][1][2]) self.target_listWidget.clear() self.target_list = [] for i in range(len(self.results_list[Idx][2])): target_image = self.results_list[Idx][2][i] N = self.results_list[Idx][1][0][i] X = self.results_list[Idx][1][1][i] Y = self.results_list[Idx][1][2][i] Z = self.results_list[Idx][1][3][i] L = self.results_list[Idx][1][4][i] qt_image = QPixmap.fromImage(self.mat_to_qimage(target_image)) item = QListWidgetItem(QIcon(qt_image), N) self.target_listWidget.addItem(item) self.target_list.append([target_image, N, X, Y, Z, L]) # 单目图片检测 def monocularPredict(self, image=None): t1 = time.time() fx, fy = self.D["lfx"], self.D["lfy"] u0, v0 = self.D["lu0"], self.D["lv0"] coordinates, items = list(), list() N, X, Y, Z, L = [], [], [], [], [] self.target_listWidget.clear() self.target_list = [] try: results, images = self.detect(image) result, frame = results[0], images[0] img_height, img_width, channels = frame.shape for i in range(len(result.boxes.data)): class_name = result.names[int(result.boxes.data[i][5])] x = float(result.boxes.data[i][0]) y = float(result.boxes.data[i][1]) w = float(result.boxes.data[i][2]) h = float(result.boxes.data[i][3]) Zc = ((self.D["high"]*fy/abs(h-y))+(self.D["wide"]*fx/abs(w-x)))/2 x_w, y_h = (x + w) / 2, (y + h) / 2 Xc = -((Zc*(x_w-u0))/fx) Yc = -((Zc*(y_h-v0))/fy) Lc = (Xc**2 + Yc**2 + Zc**2)**0.5 if self.D["show_coordinate"]: frame = self.show_chinese(frame, "x:{:.2f}cm\ny:{:.2f}cm\nz:{:.2f}cm".format(Xc, Yc, Zc), (w+5, y-10), (255,250,0)) target_image = image[int(y):int(h), int(x):int(w)] qt_image = QPixmap.fromImage(self.mat_to_qimage(target_image)) item = QListWidgetItem(QIcon(qt_image), class_name) self.target_listWidget.addItem(item) self.target_list.append([target_image, class_name, Xc, Yc, Zc, Lc]) items.append(target_image) N.append(class_name) X.append(Xc) Y.append(Yc) Z.append(Zc) L.append(Lc) coordinates.append((class_name, Xc, Yc, Zc)) except: frame = image img_height, img_width, channels = frame.shape self.plot_3D(N, Z, X, Y) self.N, self.X, self.Y, self.Z = N, X, Y, Z if self.D["show_fps"]: frame = self.show_chinese(frame,"fps: {:.2f}".format(1/(time.time()-t1)),(img_width-150,30),(255,0,0)) self.show_result(frame) return frame, (N, X, Y, Z, L), coordinates, items # 单目批量检测 def monocularBatch(self): files = os.listdir(self.D["source"]) self.progressbar.setRange(0, len(files)) num_value = 0 self.singal.connect(self.update_progressbar) self.results_list = [] for file in files: path = self.D["source"] self.event.wait() if self.run == False: break if file.endswith((".jpg",".png",".bmp")): path = path + "/" + file image = cv2.imread(path, 1) qt_image = QPixmap.fromImage(self.mat_to_qimage(image)) item = QListWidgetItem(QIcon(qt_image), file) self.batch_listWidget.addItem(item) frame, re1, re2, items = self.monocularPredict(image) self.results_list.append([frame, re1, items]) num_value += 1 self.singal.emit(num_value) if self.D["save"]: cv2.imwrite(self.D["save_path"]+"/"+file, frame) # 单目视频预测 def monocularVidoe(self): video_path = self.D["source"] out = None if self.D["save"]: fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter(self.D["save_path"]+"/"+os.path.basename(self.D["source"]), fourcc, 20.0, (1280, 720)) video = cv2.VideoCapture(video_path) framse = video.get(cv2.CAP_PROP_FRAME_COUNT) self.progressbar.setRange(0, int(framse)) num_value = 0 self.singal.connect(self.update_progressbar) while video.isOpened() and self.run: self.event.wait() success, frame = video.read() if success: frame, re1, re2, items = self.monocularPredict(frame) if self.D["save"]: out.write(cv2.resize(frame,(1280, 720))) num_value += 1 self.singal.emit(num_value) video.release() if self.D["save"]: out.release() # 单目实时检测 def monocularRealtime(self): capture = cv2.VideoCapture(int(self.D["source"])) # 0为电脑内置摄像头 out = None if self.D["save"]: fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter(self.D["save_path"] + "/" + "output.mp4", fourcc, 20.0, (1280, 720)) while (True): self.event.wait() if self.run == False: break ret, frame = capture.read() # 摄像头读取,ret为是否成功打开摄像头,true,false。 frame为视频的每一帧图像 frame = cv2.flip(frame, 1) # 摄像头是和人对立的,将图像左右调换回来正常显示。 frame, re1, re2, items = self.monocularPredict(frame) if self.D["save"]: out.write(cv2.resize(frame,(1280, 720))) capture.release() if self.D["save"]: out.release() # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # 双目图片检测 def binocularPredict(self, image_l=None, image_r=None): t1 = time.time() fxl, fyl = self.D["lfx"], self.D["lfy"] u0l, v0l = self.D["lu0"], self.D["lv0"] fxr, fyr = self.D["rfx"], self.D["rfy"] u0r, v0r = self.D["ru0"], self.D["rv0"] coordinates, items = list(), list() N, X, Y, Z, L = [], [], [], [], [] self.target_listWidget.clear() self.target_list = [] try: results_left, images_left = self.detect(image_l) results_right, images_right = self.detect(image_r) image_left, image_right = images_left[0], images_right[0] result_left, result_right = results_left[0], results_right[0] left_length, right_length = len(result_left.boxes.data), len(result_right.boxes.data) for l in range(left_length): l_id = int(result_left.boxes.data[l][5]) lx = float(result_left.boxes.data[l][0]) ly = float(result_left.boxes.data[l][1]) lw = float(result_left.boxes.data[l][2]) lh = float(result_left.boxes.data[l][3]) lhight, lwidth = lh - ly , lw - lx for r in range(right_length): r_id = int(result_right.boxes.data[r][5]) rx = float(result_right.boxes.data[r][0]) ry = float(result_right.boxes.data[r][1]) rw = float(result_right.boxes.data[r][2]) rh = float(result_right.boxes.data[r][3]) rhight, rwidth = rh - ry, rw - rx if l_id != r_id or abs(result_left.boxes.data[l][4]-result_right.boxes.data[r][4])>1: continue if abs(lhight-rhight)>10 or abs(lwidth-rwidth)>10 or abs(ly-ry)>10 or abs(lh-rh)>10: continue Nc = result_left.names[l_id] x1, x2 = (lx + lw) / 2, (rx + rw) / 2 y1, y2 = (ly + lh) / 2, (ry + rh) / 2 d = ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** (0.5) Zc = (fxl * self.D["spaceBetween"]) / d Xc = -(((Zc * (x1 - u0l)) / fxl) + ((Zc * (x2 - u0r)) / fxr)) Yc = -(((Zc * (y1 - v0l)) / fyl) + ((Zc * (y2 - v0r)) / fyr)) Lc = (Xc ** 2 + Yc ** 2 + Zc ** 2) ** 0.5 if self.D["show_coordinate"]: image_left = self.show_chinese(image_left, "x:{:.2f}cm\ny:{:.2f}cm\nz:{:.2f}cm".format(Xc,Yc,Zc),(lw+5, ly-10),(255, 250, 0)) image_right = self.show_chinese(image_right,"x:{:.2f}cm\ny:{:.2f}cm\nz:{:.2f}cm".format(Xc,Yc,Zc),(rw+5, ry-10),(255, 250, 0)) target_image = image_l[int(ly):int(lh), int(lx):int(lw)] qt_image = QPixmap.fromImage(self.mat_to_qimage(target_image)) item = QListWidgetItem(QIcon(qt_image), Nc+"_l") self.target_listWidget.addItem(item) self.target_list.append([target_image, Nc, Xc, Yc, Zc, Lc]) items.append(target_image) N.append(Nc) X.append(Xc) Y.append(Yc) Z.append(Zc) L.append(Lc) coordinates.append((Nc, Xc, Yc, Zc)) result_right.boxes.data.remove(result_right.boxes.data[r]) break except: image_left, image_right = image_l, image_r self.plot_3D(N, Z, X, Y) self.N, self.X, self.Y, self.Z = N, X, Y, Z ck = self.layout.checkedButton().text() if ck == "左": newImage = image_left elif ck == "右": newImage = image_right else: height, width, channels = image_left.shape image_right = cv2.resize(image_right, (width, height)) newImage = np.zeros((height, width * 2, channels), dtype=np.uint8) newImage[:height, :width] = image_left newImage[:height, width:] = image_right height, width, channels = newImage.shape if self.D["show_fps"]: newImage = self.show_chinese(newImage, "fps: {:.2f}".format(1/(time.time()-t1)), (width-200, 30),(255,0,0)) self.show_result(newImage) return newImage, (N, X, Y, Z, L), coordinates, items # 双目批量检测 def binocularBatch(self): if self.D["leftView"] == self.D["rightView"]: same = True else: same = False num_value = 0 files_left,files_right = os.listdir(self.D["leftView"]),os.listdir(self.D["rightView"]) self.progressbar.setRange(0, len(files_left)) self.singal.connect(self.update_progressbar) t = dict(zip(files_left, files_right)) self.results_list = [] for file_left,file_right in t.items(): self.event.wait() if self.run==False: break if file_left.endswith((".jpg",".png",".bmp")) and file_right.endswith((".jpg",".png",".bmp")): if same: newImage = cv2.imread(self.D["leftView"]+"/"+file_left) h, w, _ = newImage.shape wf = w // 2 image_l = newImage[0:h, 0:wf] image_r = newImage[0:h, wf:w] else: image_l = cv2.imread(self.D["leftView"]+"/"+file_left, 1) image_r = cv2.imread(self.D["rightView"]+"/"+file_right, 1) qt_image_l = QPixmap.fromImage(self.mat_to_qimage(image_l)) qt_image_r = QPixmap.fromImage(self.mat_to_qimage(image_r)) item_l = QListWidgetItem(QIcon(qt_image_l), file_left) item_r = QListWidgetItem(QIcon(qt_image_r), file_right) self.batch_listWidget.addItem(item_l) self.batch_listWidget.addItem(item_r) frame, re1, re2, items = self.binocularPredict(image_l, image_r) self.results_list.append([frame, re1, items]) self.results_list.append([frame, re1, items]) num_value += 1 self.singal.emit(num_value) if self.D["save"]: cv2.imwrite(self.D["save_path"]+"/"+file_left, frame) self.singal.emit(len(files_left)) # 双目视频检测 def binocularVidoe(self): if self.D["leftView"]==self.D["rightView"]: same = True else: same = False out = None if self.D["save"]: fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter(self.D["save_path"] + "/" + os.path.basename(self.D["leftView"]), fourcc, 20.0, (1280, 720)) video_path_left, video_path_right = self.D["leftView"], self.D["rightView"] videol, videor = cv2.VideoCapture(video_path_left), cv2.VideoCapture(video_path_right) framse = videol.get(cv2.CAP_PROP_FRAME_COUNT) self.progressbar.setRange(0, int(framse)) num_value = 0 self.singal.connect(self.update_progressbar) while videol.isOpened() and videor.isOpened() and self.run: self.event.wait() successl, framel = videol.read() successr, framer = videor.read() if successl and successr: if same: image = framel h, w, _ = image.shape wf = w // 2 image_l = image[0:h, 0:wf] image_r = image[0:h, wf:w] else: image_l, image_r = framel, framer frame, re1, re2, items = self.binocularPredict(image_l, image_r) num_value += 1 self.singal.emit(num_value) if self.D["save"]: out.write(cv2.resize(frame, (1280, 720))) videol.release() videor.release() if self.D["save"]: out.release() # 双目实时检测 def binocularRealtime(self): out = None if self.D["save"]: fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter(self.D["save_path"] + "/" + "output.mp4", fourcc, 20.0, (1280, 720)) if self.D["rightView"] == self.D["leftView"]: cap = cv2.VideoCapture(int(self.D["leftView"])) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 2560) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) while self.run: ret, frame = cap.read() self.event.wait() left_frame = frame[0:720, 0:1280] right_frame = frame[0:720, 1280:2560] frame, re1, re2, items = self.binocularPredict(image_l=left_frame, image_r=right_frame) if self.D["save"]: out.write(cv2.resize(frame, (1280, 720))) cap.release() else: cap_l = cv2.VideoCapture(int(self.D["leftView"])) cap_r = cv2.VideoCapture(int(self.D["rightView"])) while self.run: self.event.wait() ret, image_l = cap_l.read() ret, image_r = cap_r.read() frame, re1, re2, items = self.binocularPredict(image_l, image_r) if self.D["save"]: out.write(cv2.resize(frame, (1280, 720))) cap_l.release() cap_r.release() if self.D["save"]: out.release() if __name__=='__main__': QCoreApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling) app = QApplication([]) mypredict = myPredict() mypredict.mainWindow.show() # mypredict.showWindow.show() app.exec_() sys.exit()
2201_75373101/TargetSingleAndBinocularRanging
yolo_predict.py
Python
unknown
74,604
import os,yaml import sys import json from pathlib import Path from PyQt5 import QtGui,QtCore,QtWidgets from PyQt5.QtGui import QIcon, QPixmap from PyQt5.QtCore import Qt from ultralytics import YOLO from threading import Thread from PyQt5.QtCore import * from PyQt5.QtWidgets import * from PyQt5.QtWidgets import QFileDialog,QMessageBox import photoshower,Tensorboradshow from test import QMainWindow_new class Ui_MainWindow(object): font1 = QtGui.QFont("Adobe 黑体 Std R") font1.setPixelSize(32) font2 = QtGui.QFont("Adobe 黑体 Std R") font2.setPixelSize(24) font3 = QtGui.QFont("Adobe 黑体 Std R") font3.setPixelSize(16) singal = pyqtSignal(int) singal_2 = pyqtSignal(int) singal_3 = pyqtSignal(int) singal_4= pyqtSignal(dict) # singals = [None,singal,singal_2,singal_3,singal_4] device_dict = {"cpu":0, 0:1, "tpu":2} def __init__(self): with open('runin_train.json',encoding='utf-8') as file: self.D=json.load(file) def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(1200, 650) label_backdrop = QLabel(MainWindow) label_backdrop.resize(1200, 650) label_backdrop.setStyleSheet("background-color: rgb(240, 250, 250);") label_title = QLabel(MainWindow) label_title.resize(1200, 50) label_title.setText(" 训练模型") label_title.setFont(QtGui.QFont("Adobe 黑体 Std R", 16)) label_title.setStyleSheet("background-color: rgb(50, 200, 200);") label_T = QLabel(MainWindow) label_T.resize(40, 40) label_T.move(5, 5) label_T.setPixmap(QPixmap(r"ui\label.jpg").scaled(label_T.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)) button_close = QPushButton(MainWindow) button_close.setStyleSheet("QPushButton {border: none;}") button_close.setIcon(QIcon(r"ui\close.png")) button_close.resize(50, 50) button_close.move(1150, 0) button_close.clicked.connect(MainWindow.close) button_min = QPushButton(MainWindow) button_min.setStyleSheet("QPushButton {border: none;}") button_min.setIcon(QIcon(r"ui\min.png")) button_min.resize(50, 50) button_min.move(1100, 0) button_min.clicked.connect(MainWindow.showMinimized) # self.centralwidget = QtWidgets.QWidget(MainWindow) # self.centralwidget.setObjectName("centralwidget") # self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget) # self.horizontalLayout.setObjectName("horizontalLayout") # ————————————————————————————————————————————————————————————————————— self.label_set = QtWidgets.QLabel(MainWindow) self.label_set.setGeometry(QtCore.QRect(20, 60, 200, 30)) self.label_set.setObjectName("label_15") # ———————————————————————————————————————————————————————————————————————————— self.label_model = QtWidgets.QLabel(MainWindow) self.label_model.setGeometry(QtCore.QRect(30, 120, 120, 30)) self.label_model.setObjectName("label_7") self.lineEdit_model = QtWidgets.QLineEdit(MainWindow) self.lineEdit_model.setText(self.D['model']) self.lineEdit_model.setGeometry(QtCore.QRect(150, 120, 300, 30)) self.lineEdit_model.setObjectName("lineEdit_4") self.toolButton_model = QtWidgets.QToolButton(MainWindow) self.toolButton_model.setGeometry(QtCore.QRect(470, 120, 60, 30)) self.toolButton_model.setObjectName("toolButton") # ———————————————————————————————————————————————————————————————————————————— self.label_yaml = QtWidgets.QLabel(MainWindow) self.label_yaml.setGeometry(QtCore.QRect(30, 170, 170, 30)) self.label_yaml.setObjectName("label_8") self.lineEdit_yaml = QtWidgets.QLineEdit(MainWindow) self.lineEdit_yaml.setText(self.D['yaml']) self.lineEdit_yaml.setGeometry(QtCore.QRect(150, 170, 300, 30)) self.lineEdit_yaml.setObjectName("lineEdit_5") self.toolButton_yaml = QtWidgets.QToolButton(MainWindow) self.toolButton_yaml.setGeometry(QtCore.QRect(470, 170, 60, 30)) self.toolButton_yaml.setObjectName("toolButton_2") # ———————————————————————————————————————————————————————————————————————————— self.label_path = QtWidgets.QLabel(MainWindow) self.label_path.setGeometry(QtCore.QRect(30, 220, 170, 30)) self.label_path.setObjectName("label_11") self.lineEdit_path = QtWidgets.QLineEdit(MainWindow) self.lineEdit_path.setText(self.D['Path']) self.lineEdit_path.setGeometry(QtCore.QRect(150, 220, 300, 30)) self.lineEdit_path.setObjectName("lineEdit_8") self.toolButton_path = QtWidgets.QToolButton(MainWindow) self.toolButton_path.setGeometry(QtCore.QRect(470, 220, 60, 30)) self.toolButton_path.setObjectName("toolButton_5") # ———————————————————————————————————————————————————————————————————————————— self.label_epochs = QtWidgets.QLabel(MainWindow) self.label_epochs.setGeometry(QtCore.QRect(30, 300, 170, 30)) self.label_epochs.setObjectName("label_5") self.lineEdit_epochs = QtWidgets.QLineEdit(MainWindow) self.lineEdit_epochs.setText(str(self.D['epochs'])) self.lineEdit_epochs.setGeometry(QtCore.QRect(170, 300, 80, 30)) self.lineEdit_epochs.setObjectName("lineEdit_2") # ———————————————————————————————————————————————————————————————————————————— self.label_wait = QtWidgets.QLabel(MainWindow) self.label_wait.setGeometry(QtCore.QRect(310, 300, 120, 30)) self.label_wait.setObjectName("label_10") self.lineEdit_wait = QtWidgets.QLineEdit(MainWindow) self.lineEdit_wait.setText(str(self.D['wait'])) self.lineEdit_wait.setGeometry(QtCore.QRect(450, 300, 80, 30)) self.lineEdit_wait.setObjectName("lineEdit_6") # ———————————————————————————————————————————————————————————————————————————— self.label_imgsz = QtWidgets.QLabel(MainWindow) self.label_imgsz.setGeometry(QtCore.QRect(30, 350, 120, 30)) self.label_imgsz.setObjectName("label_14") self.lineEdit_imgsz = QtWidgets.QLineEdit(MainWindow) self.lineEdit_imgsz.setText(str(self.D['imgsz'])) self.lineEdit_imgsz.setGeometry(QtCore.QRect(170, 350, 80, 30)) self.lineEdit_imgsz.setObjectName("lineEdit_9") # ———————————————————————————————————————————————————————————————————————————— self.label_batch = QtWidgets.QLabel(MainWindow) self.label_batch.setGeometry(QtCore.QRect(310, 350, 120, 30)) self.label_batch.setObjectName("label_13") self.lineEdit_batch = QtWidgets.QLineEdit(MainWindow) self.lineEdit_batch.setText(str(self.D['batch'])) self.lineEdit_batch.setGeometry(QtCore.QRect(450, 350, 80, 30)) self.lineEdit_batch.setObjectName("lineEdit_7") # ———————————————————————————————————————————————————————————————————————————— self.label_name = QtWidgets.QLabel(MainWindow) self.label_name.setGeometry(QtCore.QRect(30, 400, 120, 30)) self.label_name.setObjectName("label_6") self.lineEdit_name = QtWidgets.QLineEdit(MainWindow) self.lineEdit_name.setText(self.D['name']) self.lineEdit_name.setGeometry(QtCore.QRect(170, 400, 80, 30)) self.lineEdit_name.setObjectName("lineEdit_3") # ———————————————————————————————————————————————————————————————————————————— self.label_device = QtWidgets.QLabel(MainWindow) self.label_device.setGeometry(QtCore.QRect(310, 400, 120, 30)) self.label_device.setObjectName("label_12") self.comboBox_device = QtWidgets.QComboBox(MainWindow) self.comboBox_device.setGeometry(QtCore.QRect(450, 400, 80, 30)) font = QtGui.QFont() font.setPointSize(10) self.comboBox_device.setFont(font) self.comboBox_device.setObjectName("comboBox") self.comboBox_device.addItems(["cpu", "gpu", "tpu"]) self.comboBox_device.setCurrentIndex(self.device_dict[self.D["device"]]) # ———————————————————————————————————————————————————————————————————————————— self.label_box = QtWidgets.QLabel(MainWindow) self.label_box.setGeometry(QtCore.QRect(30, 450, 120, 30)) self.lineEdit_box = QtWidgets.QLineEdit(MainWindow) self.lineEdit_box.setText(str(self.D['box'])) self.lineEdit_box.setGeometry(QtCore.QRect(170, 450, 80, 30)) # ———————————————————————————————————————————————————————————————————————————— self.label_cls = QtWidgets.QLabel(MainWindow) self.label_cls.setGeometry(QtCore.QRect(310, 450, 120, 30)) self.lineEdit_cls = QtWidgets.QLineEdit(MainWindow) self.lineEdit_cls.setText(str(self.D['cls'])) self.lineEdit_cls.setGeometry(QtCore.QRect(450, 450, 80, 30)) # ———————————————————————————————————————————————————————————————————————————— self.label_new = QtWidgets.QLabel(MainWindow) self.label_new.setGeometry(QtCore.QRect(30, 520, 200, 30)) self.label_new.setObjectName("label_9") self.checkBox_new = QtWidgets.QCheckBox(MainWindow) self.checkBox_new.setGeometry(QtCore.QRect(220, 520, 80, 30)) self.checkBox_new.setObjectName("checkBox") self.checkBox_new.setChecked(self.D['exist_ok']) # ———————————————————————————————————————————————————————————————————————————————— self.pushButton_look_img = QtWidgets.QPushButton(MainWindow) self.pushButton_look_img.setGeometry(QtCore.QRect(350, 520, 180, 35)) self.pushButton_look_img.setFont(self.font2) self.pushButton_look_img.setObjectName("pushButton_3") # ———————————————————————————————————————————————————————————————————————————————— self.pushButton_set = QtWidgets.QPushButton(MainWindow) self.pushButton_set.setGeometry(QtCore.QRect(30, 570, 120, 35)) self.pushButton_set.setFont(self.font2) self.pushButton_set.setObjectName("pushButton_2") # ———————————————————————————————————————————————————————————————————————————————— self.pushButton_train = QtWidgets.QPushButton(MainWindow) self.pushButton_train.setGeometry(QtCore.QRect(180, 570, 120, 35)) self.pushButton_train.setFont(self.font2) self.pushButton_train.setObjectName("pushButton") # ———————————————————————————————————————————————————————————————————————————————— self.pushButton_look_train = QtWidgets.QPushButton(MainWindow) self.pushButton_look_train.setGeometry(QtCore.QRect(350, 570, 180, 35)) self.pushButton_look_train.setFont(self.font2) self.pushButton_look_train.setObjectName("pushButton_4") # self.horizontalLayout.addWidget(self.frame) # ———————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————— self.label_book = QtWidgets.QLabel(MainWindow) self.label_book.setGeometry(QtCore.QRect(600, 60, 200, 30)) self.label_book.setMaximumSize(QtCore.QSize(150, 30)) self.label_book.setObjectName("label") self.textBrowser_book = QtWidgets.QTextBrowser(MainWindow) self.textBrowser_book.setGeometry(QtCore.QRect(620,120, 530, 370)) self.textBrowser_book.setObjectName("textBrowser") # ————————————————————————————————————————————————————————————————————— self.label_now_train = QtWidgets.QLabel(MainWindow) self.label_now_train.setGeometry(QtCore.QRect(620, 525, 200, 30)) self.label_now_train.setObjectName("label_2") self.progressBar_now_train = QtWidgets.QProgressBar(MainWindow) self.progressBar_now_train.setGeometry(QtCore.QRect(820, 525, 350, 30)) self.progressBar_now_train.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor)) self.progressBar_now_train.setProperty("value", 0) self.progressBar_now_train.setObjectName("progressBar") # ————————————————————————————————————————————————————————————————————— self.label_all_train = QtWidgets.QLabel(MainWindow) self.label_all_train.setGeometry(QtCore.QRect(620, 575, 200, 30)) self.label_all_train.setObjectName("label_3") self.progressBar_all_train = QtWidgets.QProgressBar(MainWindow) self.progressBar_all_train.setGeometry(QtCore.QRect(820, 575, 350, 30)) self.progressBar_all_train.setProperty("value", 0) self.progressBar_all_train.setObjectName("progressBar_2") # ————————————————————————————————————————————————————————————————————— # self.horizontalLayout.addWidget(self.frame_2) # MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 947, 30)) self.menubar.setObjectName("menubar") MainWindow.setMenuBar(self.menubar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "训练模型")) self.label_set.setText(_translate("MainWindow", "<html><head/><body><p>基础设置:</span></p></body></html>")) self.toolButton_model.setText(_translate("MainWindow", "file")) self.toolButton_yaml.setText(_translate("MainWindow", "file")) self.toolButton_path.setText(_translate("MainWindow", "dir")) self.label_epochs.setText(_translate("MainWindow", "<html><head/><body><p>训练轮数:</span></p></body></html>")) self.label_name.setText(_translate("MainWindow", "<html><head/><body><p>模型名称:</span></p></body></html>")) self.label_model.setText(_translate("MainWindow", "<html><head/><body><p>模型文件:</span></p></body></html>")) self.label_yaml.setText(_translate("MainWindow", "<html><head/><body><p>数据文件:</span></p></body></html>")) self.label_path.setText(_translate("MainWindow", "<html><head/><body><p>保存路径:</span></p></body></html>")) self.label_device.setText(_translate("MainWindow", "<html><head/><body><p>训练设备:</span></p></body></html>")) self.label_box.setText(_translate("MainWindow","<html><head/><body><p>边框损失:</span></p></body></html>")) self.label_cls.setText(_translate("MainWindow","<html><head/><body><p>类别损失:</span></p></body></html>")) self.checkBox_new.setText(_translate("MainWindow", "开启")) self.label_new.setText(_translate("MainWindow", "<html><head/><body><p>覆盖同名实验:</span></p></body></html>")) self.pushButton_set.setText(_translate("MainWindow", "保存设置")) self.pushButton_train.setText(_translate("MainWindow", "开始训练")) self.label_wait.setText(_translate("MainWindow", "<html><head/><body><p>早停轮数:</span></p></body></html>")) self.label_batch.setText(_translate("MainWindow", "<html><head/><body><p>批量大小:</span></p></body></html>")) self.label_imgsz.setText(_translate("MainWindow", "<html><head/><body><p>图片大小:</span></p></body></html>")) self.pushButton_look_img.setText(_translate("MainWindow", "查看数据图片")) self.pushButton_look_train.setText(_translate("MainWindow", "查看训练结果")) self.label_book.setText(_translate("MainWindow", "<html><head/><body><p>日志记录</span></p></body></html>")) self.label_now_train.setText(_translate("MainWindow", "<html><head/><body><p>现批次训练进度:</span></p></body></html>")) self.label_all_train.setText(_translate("MainWindow", "<html><head/><body><p>总批次训练进度:</span></p></body></html>")) self.label_set.setFont(self.font1) self.label_book.setFont(self.font1) self.label_epochs.setFont(self.font2) self.label_name.setFont(self.font2) self.label_model.setFont(self.font2) self.label_yaml.setFont(self.font2) self.label_path.setFont(self.font2) self.label_device.setFont(self.font2) self.label_box.setFont(self.font2) self.label_cls.setFont(self.font2) self.label_new.setFont(self.font2) self.label_wait.setFont(self.font2) self.label_batch.setFont(self.font2) self.label_imgsz.setFont(self.font2) self.label_now_train.setFont(self.font2) self.label_all_train.setFont(self.font2) class MyWindow(QMainWindow_new, Ui_MainWindow): def __init__(self): super(MyWindow, self).__init__() self.setupUi(self) self.toolButton_model.clicked.connect(self.select_model) self.toolButton_yaml.clicked.connect(self.select_yaml) self.toolButton_path.clicked.connect(self.select_savePath) self.pushButton_set.clicked.connect(self.save_all) self.pushButton_train.clicked.connect(self.start_train) self.pushButton_look_img.clicked.connect(self.ImageViewer) self.pushButton_look_train.clicked.connect(self.ProcessView) def select_model(self): fname,_=QFileDialog.getOpenFileName(self,'Open file','./','Image files(*.pt)') self.lineEdit_model.setText(fname) def select_yaml(self): fname , _=QFileDialog.getOpenFileName(self,'Open file','./','Image files(*.yaml)') self.lineEdit_yaml.setText(fname) def select_savePath(self): directory = QtWidgets.QFileDialog.getExistingDirectory(None, "选择文件夹", "D:/") self.lineEdit_path.setText(directory) def save_all(self): self.D['model']=self.lineEdit_model.text() self.D['yaml']=self.lineEdit_yaml.text() self.D['Path']=self.lineEdit_path.text() self.D['epochs']=int(self.lineEdit_epochs.text()) self.D['name']=self.lineEdit_name.text() self.D['wait']=int(self.lineEdit_wait.text()) self.D['batch']=int(self.lineEdit_batch.text()) self.D['imgsz']=int(self.lineEdit_imgsz.text()) self.D['box']=float(self.lineEdit_box.text()) self.D['cls'] = float(self.lineEdit_cls.text()) self.D['exist_ok']=self.checkBox_new.isChecked() if self.comboBox_device.currentText()== 'cpu': self.D['device']=self.comboBox_device.currentText() elif self.comboBox_device.currentText()== 'gpu': self.D['device'] = 0 elif self.comboBox_device.currentText()== 'tpu': self.D['device']= 'tpu' else: self.D['device']=-1 sub_dir = 'Ultralytics' path = Path.home() / 'AppData' / 'Roaming' / sub_dir / 'settings.yaml' try: with open(path,'r') as file: confign = yaml.safe_load(file) confign['runs_dir'] = self.D['Path'] confign['weights_dir'] = self.D['Path'] with open(path,'w') as file: yaml.safe_dump(confign, file) except: pass with open('runin_train.json', 'w') as file: json.dump(self.D, file, indent=4) QMessageBox.information(self,"操作成功","保存成功") # 当前批次训练进度传递 def temp_progressBar(self, value): self.progressBar_now_train.setValue(value) # 总批次训练进度传递 def temp_progressBar_2(self, value): self.progressBar_all_train.setValue(value) if value==self.D["epochs"]: QMessageBox.information(self, "训练完成", "保存到" + self.D["Path"] + "/runs/detect/" + self.D["name"]) def getpBar(self,value): self.V = value self.progressBar_now_train.setRange(0, value) def getmetrics(self, value): self.textBrowser_book.append('第' + str(self.i) + '轮\n') self.textBrowser_book.append(('metrics/precision(B):' +'\t'+ str(value['metrics/precision(B)'])+'\n')) self.textBrowser_book.append(('metrics/recall(B):' +'\t\t'+ str(value['metrics/recall(B)'])+'\n')) self.textBrowser_book.append(('metrics/mAP50(B)' +'\t\t'+ str(value['metrics/mAP50(B)'])+'\n')) self.textBrowser_book.append(('metrics/mAP50-95(B)' +'\t\t'+ str(value['metrics/mAP50-95(B)'])+'\n')) self.textBrowser_book.append(('val/box_loss' +'\t\t'+ str(value['val/box_loss'])+'\n')) self.textBrowser_book.append(('val/cls_loss' +'\t\t'+ str(value['val/cls_loss'])+'\n')) self.textBrowser_book.append(('val/dfl_loss' +'\t\t'+ str(value['val/dfl_loss'])+'\n')) self.i += 1 def ImageViewer(self): self.photo_page = photoshower.Ui_photoshower() try: with open(self.D["yaml"]) as file: config = yaml.safe_load(file) lpath = config["path"] if config["train"] != None: self.photo_page.datasets_train = lpath+"/"+config["train"] if config["val"] != None: self.photo_page.datasets_val = lpath+"/"+config["val"] if config["test"] != None: self.photo_page.datasets_test = lpath + "/" + config["test"] self.photo_page.show() except: QMessageBox.warning(self,"错误", "文件不存在") def ProcessView(self): self.ten=Tensorboradshow.Ui_tensorboradshow() self.ten.show() def start_train(self): self.i = 1 self.textBrowser_book.clear() self.progressBar_all_train.setRange(0, self.D['epochs']) self.singal.emit(0) try: target = self.begin_train Thread(target=target).start() except: QMessageBox.warning(self,"运行失败","请检查输入") def begin_train(self): self.singal.connect(self.temp_progressBar_2) self.singal_2.connect(self.temp_progressBar) self.singal_3.connect(self.getpBar) self.singal_4.connect(self.getmetrics) model = YOLO(self.D['model']) model.train(data=self.D['yaml'], save_dir=self.D['Path'], epochs=self.D['epochs'], imgsz=self.D['imgsz'], patience=self.D['wait'], batch=self.D['batch'], name=self.D['name'], device=self.D['device'], box=self.D["box"], cls=self.D["cls"], exist_ok=self.D['exist_ok'], workers=0, singal=self.singal, singal_2=self.singal_2, singal_3=self.singal_3, singal_4=self.singal_4) self.singal_2.emit(self.V) self.singal.emit(self.D['epochs']) os.system("tensorboard --logdir="+self.D["Path"]+"/detect/"+self.D["name"]) if __name__ == "__main__": QCoreApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling) app = QApplication([]) stats = MyWindow() stats.show() app.exec_() sys.exit()
2201_75373101/TargetSingleAndBinocularRanging
yolo_train.py
Python
unknown
26,268
<script> export default { onLaunch: function() { console.log('App Launch') }, onShow: function() { console.log('App Show') }, onHide: function() { console.log('App Hide') } } </script> <style> /*每个页面公共css */ </style>
2401_82882503/uniDemo01
App.vue
Vue
unknown
254
<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8" /> <script> var coverSupport = 'CSS' in window && typeof CSS.supports === 'function' && (CSS.supports('top: env(a)') || CSS.supports('top: constant(a)')) document.write( '<meta name="viewport" content="width=device-width, user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0' + (coverSupport ? ', viewport-fit=cover' : '') + '" />') </script> <title></title> <!--preload-links--> <!--app-context--> </head> <body> <div id="app"><!--app-html--></div> <script type="module" src="/main.js"></script> </body> </html>
2401_82882503/uniDemo01
index.html
HTML
unknown
672
import App from './App' // #ifndef VUE3 import Vue from 'vue' import './uni.promisify.adaptor' Vue.config.productionTip = false App.mpType = 'app' const app = new Vue({ ...App }) app.$mount() // #endif // #ifdef VUE3 import { createSSRApp } from 'vue' export function createApp() { const app = createSSRApp(App) return { app } } // #endif
2401_82882503/uniDemo01
main.js
JavaScript
unknown
352
<template> <view class="content"> <image class="logo" src="/static/logo.png"></image> <view class="text-area"> <text class="title">{{title}}</text> </view> </view> </template> <script> export default { data() { return { title: 'Hello unioi ' } }, onLoad() { }, methods: { } } </script> <style> .content { display: flex; flex-direction: column; align-items: center; justify-content: center; } .logo { height: 200rpx; width: 200rpx; margin-top: 200rpx; margin-left: auto; margin-right: auto; margin-bottom: 50rpx; } .text-area { display: flex; justify-content: center; } .title { font-size: 36rpx; color: #8f8f94; } </style>
2401_82882503/uniDemo01
pages/index/index.vue
Vue
unknown
701
uni.addInterceptor({ returnValue (res) { if (!(!!res && (typeof res === "object" || typeof res === "function") && typeof res.then === "function")) { return res; } return new Promise((resolve, reject) => { res.then((res) => { if (!res) return resolve(res) return res[0] ? reject(res[0]) : resolve(res[1]) }); }); }, });
2401_82882503/uniDemo01
uni.promisify.adaptor.js
JavaScript
unknown
373
/** * 这里是uni-app内置的常用样式变量 * * uni-app 官方扩展插件及插件市场(https://ext.dcloud.net.cn)上很多三方插件均使用了这些样式变量 * 如果你是插件开发者,建议你使用scss预处理,并在插件代码中直接使用这些变量(无需 import 这个文件),方便用户通过搭积木的方式开发整体风格一致的App * */ /** * 如果你是App开发者(插件使用者),你可以通过修改这些变量来定制自己的插件主题,实现自定义主题功能 * * 如果你的项目同样使用了scss预处理,你也可以直接在你的 scss 代码中使用如下变量,同时无需 import 这个文件 */ /* 颜色变量 */ /* 行为相关颜色 */ $uni-color-primary: #007aff; $uni-color-success: #4cd964; $uni-color-warning: #f0ad4e; $uni-color-error: #dd524d; /* 文字基本颜色 */ $uni-text-color:#333;//基本色 $uni-text-color-inverse:#fff;//反色 $uni-text-color-grey:#999;//辅助灰色,如加载更多的提示信息 $uni-text-color-placeholder: #808080; $uni-text-color-disable:#c0c0c0; /* 背景颜色 */ $uni-bg-color:#ffffff; $uni-bg-color-grey:#f8f8f8; $uni-bg-color-hover:#f1f1f1;//点击状态颜色 $uni-bg-color-mask:rgba(0, 0, 0, 0.4);//遮罩颜色 /* 边框颜色 */ $uni-border-color:#c8c7cc; /* 尺寸变量 */ /* 文字尺寸 */ $uni-font-size-sm:12px; $uni-font-size-base:14px; $uni-font-size-lg:16px; /* 图片尺寸 */ $uni-img-size-sm:20px; $uni-img-size-base:26px; $uni-img-size-lg:40px; /* Border Radius */ $uni-border-radius-sm: 2px; $uni-border-radius-base: 3px; $uni-border-radius-lg: 6px; $uni-border-radius-circle: 50%; /* 水平间距 */ $uni-spacing-row-sm: 5px; $uni-spacing-row-base: 10px; $uni-spacing-row-lg: 15px; /* 垂直间距 */ $uni-spacing-col-sm: 4px; $uni-spacing-col-base: 8px; $uni-spacing-col-lg: 12px; /* 透明度 */ $uni-opacity-disabled: 0.3; // 组件禁用态的透明度 /* 文章场景相关 */ $uni-color-title: #2C405A; // 文章标题颜色 $uni-font-size-title:20px; $uni-color-subtitle: #555555; // 二级标题颜色 $uni-font-size-subtitle:26px; $uni-color-paragraph: #3F536E; // 文章段落颜色 $uni-font-size-paragraph:15px;
2401_82882503/uniDemo01
uni.scss
SCSS
unknown
2,217
<script> export default { onLaunch: function() { console.log('App Launch') }, onShow: function() { console.log('App Show') }, onHide: function() { console.log('App Hide') } } </script> <style> /*每个页面公共css */ </style>
2401_82882503/uniDemo2
App.vue
Vue
unknown
254
<!DOCTYPE html> <html lang="zh-CN"> <head> <meta charset="UTF-8" /> <script> var coverSupport = 'CSS' in window && typeof CSS.supports === 'function' && (CSS.supports('top: env(a)') || CSS.supports('top: constant(a)')) document.write( '<meta name="viewport" content="width=device-width, user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0' + (coverSupport ? ', viewport-fit=cover' : '') + '" />') </script> <title></title> <!--preload-links--> <!--app-context--> </head> <body> <div id="app"><!--app-html--></div> <script type="module" src="/main.js"></script> </body> </html>
2401_82882503/uniDemo2
index.html
HTML
unknown
675
import App from './App' // #ifndef VUE3 import Vue from 'vue' import './uni.promisify.adaptor' Vue.config.productionTip = false App.mpType = 'app' const app = new Vue({ ...App }) app.$mount() // #endif // #ifdef VUE3 import { createSSRApp } from 'vue' export function createApp() { const app = createSSRApp(App) return { app } } // #endif
2401_82882503/uniDemo2
main.js
JavaScript
unknown
352
<template> <view class="content"> <image class="logo" src="/static/logo.png"></image> <view class="text-area"> <text class="title">{{title}}</text> </view> </view> </template> <script> export default { data() { return { title: 'Hello zlx' } }, onLoad() { }, methods: { } } </script> <style> .content { display: flex; flex-direction: column; align-items: center; justify-content: center; } .logo { height: 200rpx; width: 200rpx; margin-top: 200rpx; margin-left: auto; margin-right: auto; margin-bottom: 50rpx; } .text-area { display: flex; justify-content: center; } .title { font-size: 36rpx; color: #8f8f94; } </style>
2401_82882503/uniDemo2
pages/index/index.vue
Vue
unknown
698
uni.addInterceptor({ returnValue (res) { if (!(!!res && (typeof res === "object" || typeof res === "function") && typeof res.then === "function")) { return res; } return new Promise((resolve, reject) => { res.then((res) => { if (!res) return resolve(res) return res[0] ? reject(res[0]) : resolve(res[1]) }); }); }, });
2401_82882503/uniDemo2
uni.promisify.adaptor.js
JavaScript
unknown
373
/** * 这里是uni-app内置的常用样式变量 * * uni-app 官方扩展插件及插件市场(https://ext.dcloud.net.cn)上很多三方插件均使用了这些样式变量 * 如果你是插件开发者,建议你使用scss预处理,并在插件代码中直接使用这些变量(无需 import 这个文件),方便用户通过搭积木的方式开发整体风格一致的App * */ /** * 如果你是App开发者(插件使用者),你可以通过修改这些变量来定制自己的插件主题,实现自定义主题功能 * * 如果你的项目同样使用了scss预处理,你也可以直接在你的 scss 代码中使用如下变量,同时无需 import 这个文件 */ /* 颜色变量 */ /* 行为相关颜色 */ $uni-color-primary: #007aff; $uni-color-success: #4cd964; $uni-color-warning: #f0ad4e; $uni-color-error: #dd524d; /* 文字基本颜色 */ $uni-text-color:#333;//基本色 $uni-text-color-inverse:#fff;//反色 $uni-text-color-grey:#999;//辅助灰色,如加载更多的提示信息 $uni-text-color-placeholder: #808080; $uni-text-color-disable:#c0c0c0; /* 背景颜色 */ $uni-bg-color:#ffffff; $uni-bg-color-grey:#f8f8f8; $uni-bg-color-hover:#f1f1f1;//点击状态颜色 $uni-bg-color-mask:rgba(0, 0, 0, 0.4);//遮罩颜色 /* 边框颜色 */ $uni-border-color:#c8c7cc; /* 尺寸变量 */ /* 文字尺寸 */ $uni-font-size-sm:12px; $uni-font-size-base:14px; $uni-font-size-lg:16px; /* 图片尺寸 */ $uni-img-size-sm:20px; $uni-img-size-base:26px; $uni-img-size-lg:40px; /* Border Radius */ $uni-border-radius-sm: 2px; $uni-border-radius-base: 3px; $uni-border-radius-lg: 6px; $uni-border-radius-circle: 50%; /* 水平间距 */ $uni-spacing-row-sm: 5px; $uni-spacing-row-base: 10px; $uni-spacing-row-lg: 15px; /* 垂直间距 */ $uni-spacing-col-sm: 4px; $uni-spacing-col-base: 8px; $uni-spacing-col-lg: 12px; /* 透明度 */ $uni-opacity-disabled: 0.3; // 组件禁用态的透明度 /* 文章场景相关 */ $uni-color-title: #2C405A; // 文章标题颜色 $uni-font-size-title:20px; $uni-color-subtitle: #555555; // 二级标题颜色 $uni-font-size-subtitle:26px; $uni-color-paragraph: #3F536E; // 文章段落颜色 $uni-font-size-paragraph:15px;
2401_82882503/uniDemo2
uni.scss
SCSS
unknown
2,217
package com.fanchen; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; @SpringBootApplication public class SystemAdminJavaApplication { public static void main(String[] args) { SpringApplication.run(SystemAdminJavaApplication.class, args); } }
2301_78526554/Campus-Epidemic-Management
system-admin-java-main/src/main/java/com/fanchen/SystemAdminJavaApplication.java
Java
apache-2.0
334
package com.fanchen.annotation; import java.lang.annotation.*; @Target({ ElementType.PARAMETER, ElementType.METHOD }) @Retention(RetentionPolicy.RUNTIME) @Documented public @interface Log { /** * 模块 */ String title() default ""; /** * 功能 */ String businessType() default ""; /** * 是否保存请求的参数 */ boolean isSaveRequestData() default true; /** * 是否保存响应的参数 */ boolean isSaveResponseData() default true; }
2301_78526554/Campus-Epidemic-Management
system-admin-java-main/src/main/java/com/fanchen/annotation/Log.java
Java
apache-2.0
521
package com.fanchen.aspectj; import cn.hutool.core.util.StrUtil; import cn.hutool.json.JSON; import cn.hutool.json.JSONUtil; import com.fanchen.annotation.Log; import com.fanchen.entity.SysOperateLog; import com.fanchen.utils.AsyncTaskUtil; import com.fanchen.utils.IpUtils; import com.fanchen.utils.SecurityUtil; import com.fanchen.utils.ServletUtil; import lombok.extern.slf4j.Slf4j; import org.aspectj.lang.JoinPoint; import org.aspectj.lang.annotation.AfterReturning; import org.aspectj.lang.annotation.Aspect; import org.springframework.http.HttpMethod; import org.springframework.stereotype.Component; import org.springframework.validation.BindingResult; import org.springframework.web.multipart.MultipartFile; import javax.annotation.Resource; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.io.IOException; import java.util.Collection; import java.util.Map; @Aspect @Component @Slf4j public class LogAspectj { @Resource private AsyncTaskUtil asyncTaskUtil; @AfterReturning(pointcut = "@annotation(controllerLog)", returning = "jsonResult") public void doAfterReturn(JoinPoint joinPoint, Log controllerLog, Object jsonResult) { handleLog(joinPoint, controllerLog, null, jsonResult); } protected void handleLog(final JoinPoint joinPoint, Log controllerLog, final Exception e, Object jsonResult) { try { String username = SecurityUtil.getLoginUser(); HttpServletRequest request = ServletUtil.getRequest(); // *========数据库日志=========*// SysOperateLog operateLog = new SysOperateLog(); operateLog.setStatus(e != null ? 0 : 1); operateLog.setErrorMsg(e != null ? e.getMessage() : null); operateLog.setOperIp(IpUtils.getIpAddr(request)); operateLog.setOperUrl(ServletUtil.getRequest().getRequestURI()); operateLog.setOperName(StrUtil.isNotBlank(username) ? username : "未知"); String className = joinPoint.getTarget().getClass().getName(); String methodName = joinPoint.getSignature().getName(); operateLog.setMethod(className + "." + methodName + "()"); operateLog.setRequestMethod(request.getMethod()); getControllerMethodDescription(joinPoint, controllerLog, operateLog, jsonResult, request); asyncTaskUtil.recordOperateInfo(operateLog); } catch (Exception exception) { log.error("==========切面通知异常========="); log.error("异常信息:{}", exception.getMessage()); } } public void getControllerMethodDescription(JoinPoint joinPoint, Log controllerLog, SysOperateLog sysOperateLog, Object jsonResult, HttpServletRequest request) throws IOException { sysOperateLog.setBusinessType(controllerLog.businessType()); sysOperateLog.setTitle(controllerLog.title()); if (controllerLog.isSaveRequestData()){ setRequestValue(joinPoint, sysOperateLog, request); } if (controllerLog.isSaveResponseData() && jsonResult != null){ sysOperateLog.setJsonResult(JSONUtil.toJsonStr(jsonResult)); } } private void setRequestValue(JoinPoint joinPoint, SysOperateLog sysOperateLog, HttpServletRequest request) throws IOException { String requestMethod = sysOperateLog.getRequestMethod(); if (HttpMethod.PUT.name().equals(requestMethod) || HttpMethod.POST.name().equals(requestMethod)){ //使用不了 Stream closed 是因为这个InputStream已经被用过了 //String body = StreamUtils.copyToString(request.getInputStream(), StandardCharsets.UTF_8); sysOperateLog.setOperParam(argsArrayToString(joinPoint.getArgs())); }else { Map<String, String[]> parameterMap = request.getParameterMap(); sysOperateLog.setOperParam(StrUtil.toString(parameterMap)); } } /** * 参数拼装 */ private String argsArrayToString(Object[] paramsArray) { StringBuilder params = new StringBuilder(); if (paramsArray != null && paramsArray.length > 0) { for (Object o : paramsArray) { if (o != null && !isFilterObject(o)) { try { JSON jsonObj = JSONUtil.parse(o); params.append(jsonObj.toString()).append(" "); } catch (Exception e) { e.printStackTrace(); } } } } return params.toString().trim(); } /** * 判断是否需要过滤的对象。 * * @param o 对象信息。 * @return 如果是需要过滤的对象,则返回true;否则返回false。 */ @SuppressWarnings("rawtypes") public boolean isFilterObject(final Object o) { Class<?> clazz = o.getClass(); if (clazz.isArray()) { return clazz.getComponentType().isAssignableFrom(MultipartFile.class); } else if (Collection.class.isAssignableFrom(clazz)) { Collection collection = (Collection) o; for (Object value : collection) { return value instanceof MultipartFile; } } else if (Map.class.isAssignableFrom(clazz)) { Map map = (Map) o; for (Object value : map.entrySet()) { Map.Entry entry = (Map.Entry) value; return entry.getValue() instanceof MultipartFile; } } return o instanceof MultipartFile || o instanceof HttpServletRequest || o instanceof HttpServletResponse || o instanceof BindingResult; } }
2301_78526554/Campus-Epidemic-Management
system-admin-java-main/src/main/java/com/fanchen/aspectj/LogAspectj.java
Java
apache-2.0
5,903
package com.fanchen.common.dto; import lombok.Data; /** * @author fanchen * @date 2021/12/11 * @time 18:07 */ @Data public class GoodDto { private Long id; private String goodName; private Integer goodNum; private String goodSize; }
2301_78526554/Campus-Epidemic-Management
system-admin-java-main/src/main/java/com/fanchen/common/dto/GoodDto.java
Java
apache-2.0
255
package com.fanchen.common.dto; import lombok.Data; import java.io.Serializable; import java.util.ArrayList; import java.util.List; @Data public class NavMenu implements Serializable { private Long id; private String name; private String title; private String icon; private String path; private String component; private Integer orderNum; private List<NavMenu> children = new ArrayList<>(); }
2301_78526554/Campus-Epidemic-Management
system-admin-java-main/src/main/java/com/fanchen/common/dto/NavMenu.java
Java
apache-2.0
428
package com.fanchen.common.exception; import org.springframework.security.core.AuthenticationException; public class CaptchaException extends AuthenticationException { public CaptchaException(String msg) { super(msg); } }
2301_78526554/Campus-Epidemic-Management
system-admin-java-main/src/main/java/com/fanchen/common/exception/CaptchaException.java
Java
apache-2.0
240