| | import argparse |
| | import glob |
| | import json |
| | import os |
| | import sys |
| |
|
| | import numpy as np |
| | import SimpleITK as sitk |
| | import torch |
| | from medpy import metric |
| |
|
| | |
| | os.environ.setdefault("MONAI_SKIP_SUBMODULES", "1") |
| | _repo_root = os.path.abspath(os.path.dirname(__file__)) |
| | if "" in sys.path: |
| | sys.path.remove("") |
| | if _repo_root in sys.path: |
| | sys.path.remove(_repo_root) |
| | import monai |
| | sys.path.insert(0, _repo_root) |
| |
|
| | from monai.utils import set_determinism |
| | from tqdm import tqdm |
| |
|
| | from light_training.dataloading.dataset import MedicalDataset, get_train_val_test_loader_from_train |
| |
|
| | set_determinism(123) |
| |
|
| | def cal_metric(gt, pred, voxel_spacing): |
| | if pred.sum() > 0 and gt.sum() > 0: |
| | dice = metric.binary.dc(pred, gt) |
| | hd95 = metric.binary.hd95(pred, gt, voxelspacing=voxel_spacing) |
| | return np.array([dice, hd95]) |
| | else: |
| | return np.array([0.0, 50]) |
| |
|
| | def each_cases_metric(gt, pred, voxel_spacing): |
| | classes_num = 3 |
| | class_wise_metric = np.zeros((classes_num, 2)) |
| | for cls in range(0, classes_num): |
| | class_wise_metric[cls, ...] = cal_metric(pred[cls], gt[cls], voxel_spacing) |
| | print(class_wise_metric) |
| | return class_wise_metric |
| |
|
| | def convert_labels(labels): |
| | |
| | labels = labels.unsqueeze(dim=0) |
| |
|
| | result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3] |
| | |
| | return torch.cat(result, dim=0).float() |
| |
|
| |
|
| | if __name__ == "__main__": |
| | parser = argparse.ArgumentParser(description="Compute Dice/HD95 for BraTS2023 (TC/WT/ET) from saved predictions.") |
| | parser.add_argument("--pred_name", required=True, type=str, help="Prediction folder name under results_root.") |
| | parser.add_argument("--results_root", type=str, default="prediction_results") |
| | parser.add_argument("--data_dir", type=str, default="./data/fullres/train", help="Preprocessed data directory (contains *.npz).") |
| | parser.add_argument( |
| | "--gt_source", |
| | type=str, |
| | default="processed", |
| | choices=["processed", "raw"], |
| | help="GT source. 'processed' uses *_seg.npy from preprocessed dataset (recommended for /data/yty/brats23_processed). " |
| | "'raw' uses seg.nii.gz from --raw_data_dir.", |
| | ) |
| | parser.add_argument( |
| | "--raw_data_dir", |
| | type=str, |
| | default="./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/", |
| | help="Raw BraTS2023 training data directory that contains case folders with seg.nii.gz.", |
| | ) |
| | parser.add_argument("--split", type=str, default="test", choices=["train", "val", "test", "all"]) |
| | parser.add_argument("--train_rate", type=float, default=0.7) |
| | parser.add_argument("--val_rate", type=float, default=0.1) |
| | parser.add_argument("--test_rate", type=float, default=0.2) |
| | parser.add_argument("--seed", type=int, default=42) |
| | parser.add_argument("--voxel_spacing", type=str, default="1,1,1", help="Voxel spacing for HD95, e.g. '1,1,1'.") |
| | args = parser.parse_args() |
| |
|
| | voxel_spacing = [float(x) for x in args.voxel_spacing.split(",")] |
| |
|
| | if args.split == "all": |
| | all_paths = sorted(glob.glob(os.path.join(args.data_dir, "*.npz"))) |
| | ds = MedicalDataset(all_paths, test=False) |
| | else: |
| | train_ds, val_ds, test_ds = get_train_val_test_loader_from_train( |
| | args.data_dir, |
| | train_rate=args.train_rate, |
| | val_rate=args.val_rate, |
| | test_rate=args.test_rate, |
| | seed=args.seed, |
| | ) |
| | ds = {"train": train_ds, "val": val_ds, "test": test_ds}[args.split] |
| |
|
| | print(f"Evaluating {len(ds)} cases from split={args.split}") |
| |
|
| | all_results = np.zeros((len(ds), 3, 2), dtype=np.float32) |
| |
|
| | for ind, batch in enumerate(tqdm(ds, total=len(ds))): |
| | properties = batch["properties"] |
| | case_name = properties["name"] |
| | pred_path = os.path.join(args.results_root, args.pred_name, f"{case_name}.nii.gz") |
| | if not os.path.isfile(pred_path): |
| | raise FileNotFoundError(f"Prediction not found: {pred_path}") |
| |
|
| | if args.gt_source == "raw": |
| | gt_path = os.path.join(args.raw_data_dir, case_name, "seg.nii.gz") |
| | if not os.path.isfile(gt_path): |
| | raise FileNotFoundError(f"GT not found: {gt_path}") |
| | gt_itk = sitk.ReadImage(gt_path) |
| | gt_array = sitk.GetArrayFromImage(gt_itk).astype(np.int32) |
| | gt_array = torch.from_numpy(gt_array) |
| | gt_array = convert_labels(gt_array).numpy() |
| | else: |
| | |
| | if "seg" not in batch: |
| | raise KeyError("gt_source=processed requires 'seg' in dataset samples, but it's missing.") |
| | seg = batch["seg"] |
| | if isinstance(seg, np.ndarray): |
| | seg_t = torch.from_numpy(seg) |
| | else: |
| | |
| | seg_t = torch.from_numpy(np.asarray(seg)) |
| | if seg_t.ndim == 4 and seg_t.shape[0] == 1: |
| | seg_t = seg_t[0] |
| | gt_array = convert_labels(seg_t).numpy() |
| |
|
| | pred_itk = sitk.ReadImage(pred_path) |
| | pred_array = sitk.GetArrayFromImage(pred_itk) |
| |
|
| | m = each_cases_metric(gt_array, pred_array, voxel_spacing) |
| | all_results[ind, ...] = m |
| |
|
| | out_dir = os.path.join(args.results_root, "result_metrics") |
| | os.makedirs(out_dir, exist_ok=True) |
| | out_path = os.path.join(out_dir, f"{args.pred_name}.npy") |
| | np.save(out_path, all_results) |
| |
|
| | result = np.load(out_path) |
| | mean_per_class = result.mean(axis=0) |
| | std_per_class = result.std(axis=0) |
| | mean_dice = float(mean_per_class[:, 0].mean()) |
| | mean_hd95 = float(mean_per_class[:, 1].mean()) |
| |
|
| | summary = { |
| | "pred_name": args.pred_name, |
| | "results_root": args.results_root, |
| | "data_dir": args.data_dir, |
| | "split": args.split, |
| | "gt_source": args.gt_source, |
| | "raw_data_dir": args.raw_data_dir if args.gt_source == "raw" else None, |
| | "voxel_spacing": voxel_spacing, |
| | "num_cases": int(result.shape[0]), |
| | "mean_per_class": mean_per_class.tolist(), |
| | "std_per_class": std_per_class.tolist(), |
| | "mean_dice": mean_dice, |
| | "mean_hd95": mean_hd95, |
| | } |
| | summary_path = os.path.join(out_dir, f"{args.pred_name}_summary.json") |
| | with open(summary_path, "w") as f: |
| | json.dump(summary, f, indent=2) |
| |
|
| | print("saved:", out_path) |
| | print("summary:", summary_path) |
| | print(result.shape) |
| | print("mean(TC/WT/ET) [dice, hd95]:") |
| | print(mean_per_class) |
| | print("std(TC/WT/ET) [dice, hd95]:") |
| | print(std_per_class) |
| | print("mean dice:", mean_dice) |
| | print("mean hd95:", mean_hd95) |
| |
|
| |
|
| |
|
| |
|