| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| from __future__ import annotations |
|
|
| import warnings |
| from typing import TYPE_CHECKING, cast |
|
|
| import numpy as np |
|
|
| if TYPE_CHECKING: |
| import numpy.typing as npt |
|
|
| import torch |
|
|
| from monai.utils import Average, look_up_option |
|
|
| from .metric import CumulativeIterationMetric |
|
|
|
|
| class ROCAUCMetric(CumulativeIterationMetric): |
| """ |
| Computes Area Under the Receiver Operating Characteristic Curve (ROC AUC). Referring to: |
| `sklearn.metrics.roc_auc_score <https://scikit-learn.org/stable/modules/generated/ |
| sklearn.metrics.roc_auc_score.html#sklearn.metrics.roc_auc_score>`_. |
| The input `y_pred` and `y` can be a list of `channel-first` Tensor or a `batch-first` Tensor. |
| |
| Example of the typical execution steps of this metric class follows :py:class:`monai.metrics.metric.Cumulative`. |
| |
| Args: |
| average: {``"macro"``, ``"weighted"``, ``"micro"``, ``"none"``} |
| Type of averaging performed if not binary classification. |
| Defaults to ``"macro"``. |
| |
| - ``"macro"``: calculate metrics for each label, and find their unweighted mean. |
| This does not take label imbalance into account. |
| - ``"weighted"``: calculate metrics for each label, and find their average, |
| weighted by support (the number of true instances for each label). |
| - ``"micro"``: calculate metrics globally by considering each element of the label |
| indicator matrix as a label. |
| - ``"none"``: the scores for each class are returned. |
| |
| """ |
|
|
| def __init__(self, average: Average | str = Average.MACRO) -> None: |
| super().__init__() |
| self.average = average |
|
|
| def _compute_tensor(self, y_pred: torch.Tensor, y: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: |
| return y_pred, y |
|
|
| def aggregate(self, average: Average | str | None = None) -> np.ndarray | float | npt.ArrayLike: |
| """ |
| Typically `y_pred` and `y` are stored in the cumulative buffers at each iteration, |
| This function reads the buffers and computes the area under the ROC. |
| |
| Args: |
| average: {``"macro"``, ``"weighted"``, ``"micro"``, ``"none"``} |
| Type of averaging performed if not binary classification. Defaults to `self.average`. |
| |
| """ |
| y_pred, y = self.get_buffer() |
| |
| if not isinstance(y_pred, torch.Tensor) or not isinstance(y, torch.Tensor): |
| raise ValueError("y_pred and y must be PyTorch Tensor.") |
|
|
| return compute_roc_auc(y_pred=y_pred, y=y, average=average or self.average) |
|
|
|
|
| def _calculate(y_pred: torch.Tensor, y: torch.Tensor) -> float: |
| if not (y.ndimension() == y_pred.ndimension() == 1 and len(y) == len(y_pred)): |
| raise AssertionError("y and y_pred must be 1 dimension data with same length.") |
| y_unique = y.unique() |
| if len(y_unique) == 1: |
| warnings.warn(f"y values can not be all {y_unique.item()}, skip AUC computation and return `Nan`.") |
| return float("nan") |
| if not y_unique.equal(torch.tensor([0, 1], dtype=y.dtype, device=y.device)): |
| warnings.warn(f"y values must be 0 or 1, but in {y_unique.tolist()}, skip AUC computation and return `Nan`.") |
| return float("nan") |
|
|
| n = len(y) |
| indices = y_pred.argsort() |
| y = y[indices].cpu().numpy() |
| y_pred = y_pred[indices].cpu().numpy() |
| nneg = auc = tmp_pos = tmp_neg = 0.0 |
|
|
| for i in range(n): |
| y_i = cast(float, y[i]) |
| if i + 1 < n and y_pred[i] == y_pred[i + 1]: |
| tmp_pos += y_i |
| tmp_neg += 1 - y_i |
| continue |
| if tmp_pos + tmp_neg > 0: |
| tmp_pos += y_i |
| tmp_neg += 1 - y_i |
| nneg += tmp_neg |
| auc += tmp_pos * (nneg - tmp_neg / 2) |
| tmp_pos = tmp_neg = 0 |
| continue |
| if y_i == 1: |
| auc += nneg |
| else: |
| nneg += 1 |
| return auc / (nneg * (n - nneg)) |
|
|
|
|
| def compute_roc_auc( |
| y_pred: torch.Tensor, y: torch.Tensor, average: Average | str = Average.MACRO |
| ) -> np.ndarray | float | npt.ArrayLike: |
| """Computes Area Under the Receiver Operating Characteristic Curve (ROC AUC). Referring to: |
| `sklearn.metrics.roc_auc_score <https://scikit-learn.org/stable/modules/generated/ |
| sklearn.metrics.roc_auc_score.html#sklearn.metrics.roc_auc_score>`_. |
| |
| Args: |
| y_pred: input data to compute, typical classification model output. |
| the first dim must be batch, if multi-classes, it must be in One-Hot format. |
| for example: shape `[16]` or `[16, 1]` for a binary data, shape `[16, 2]` for 2 classes data. |
| y: ground truth to compute ROC AUC metric, the first dim must be batch. |
| if multi-classes, it must be in One-Hot format. |
| for example: shape `[16]` or `[16, 1]` for a binary data, shape `[16, 2]` for 2 classes data. |
| average: {``"macro"``, ``"weighted"``, ``"micro"``, ``"none"``} |
| Type of averaging performed if not binary classification. |
| Defaults to ``"macro"``. |
| |
| - ``"macro"``: calculate metrics for each label, and find their unweighted mean. |
| This does not take label imbalance into account. |
| - ``"weighted"``: calculate metrics for each label, and find their average, |
| weighted by support (the number of true instances for each label). |
| - ``"micro"``: calculate metrics globally by considering each element of the label |
| indicator matrix as a label. |
| - ``"none"``: the scores for each class are returned. |
| |
| Raises: |
| ValueError: When ``y_pred`` dimension is not one of [1, 2]. |
| ValueError: When ``y`` dimension is not one of [1, 2]. |
| ValueError: When ``average`` is not one of ["macro", "weighted", "micro", "none"]. |
| |
| Note: |
| ROCAUC expects y to be comprised of 0's and 1's. `y_pred` must be either prob. estimates or confidence values. |
| |
| """ |
| y_pred_ndim = y_pred.ndimension() |
| y_ndim = y.ndimension() |
| if y_pred_ndim not in (1, 2): |
| raise ValueError( |
| f"Predictions should be of shape (batch_size, num_classes) or (batch_size, ), got {y_pred.shape}." |
| ) |
| if y_ndim not in (1, 2): |
| raise ValueError(f"Targets should be of shape (batch_size, num_classes) or (batch_size, ), got {y.shape}.") |
| if y_pred_ndim == 2 and y_pred.shape[1] == 1: |
| y_pred = y_pred.squeeze(dim=-1) |
| y_pred_ndim = 1 |
| if y_ndim == 2 and y.shape[1] == 1: |
| y = y.squeeze(dim=-1) |
|
|
| if y_pred_ndim == 1: |
| return _calculate(y_pred, y) |
|
|
| if y.shape != y_pred.shape: |
| raise ValueError(f"data shapes of y_pred and y do not match, got {y_pred.shape} and {y.shape}.") |
|
|
| average = look_up_option(average, Average) |
| if average == Average.MICRO: |
| return _calculate(y_pred.flatten(), y.flatten()) |
| y, y_pred = y.transpose(0, 1), y_pred.transpose(0, 1) |
| auc_values = [_calculate(y_pred_, y_) for y_pred_, y_ in zip(y_pred, y)] |
| if average == Average.NONE: |
| return auc_values |
| if average == Average.MACRO: |
| return np.mean(auc_values) |
| if average == Average.WEIGHTED: |
| weights = [sum(y_) for y_ in y] |
| return np.average(auc_values, weights=weights) |
| raise ValueError(f'Unsupported average: {average}, available options are ["macro", "weighted", "micro", "none"].') |
|
|