| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import logging |
| | import os |
| | import sys |
| | import tempfile |
| | from glob import glob |
| |
|
| | import nibabel as nib |
| | import numpy as np |
| | import torch |
| | from torch.utils.data import DataLoader |
| | from torch.utils.tensorboard import SummaryWriter |
| |
|
| | import monai |
| | from monai.data import NiftiDataset, create_test_image_3d |
| | from monai.inferers import sliding_window_inference |
| | from monai.metrics import DiceMetric |
| | from monai.transforms import AddChannel, Compose, RandRotate90, RandSpatialCrop, ScaleIntensity, ToTensor |
| | from monai.visualize import plot_2d_or_3d_image |
| |
|
| |
|
| | def main(tempdir): |
| | monai.config.print_config() |
| | logging.basicConfig(stream=sys.stdout, level=logging.INFO) |
| |
|
| | |
| | print(f"generating synthetic data to {tempdir} (this may take a while)") |
| | for i in range(40): |
| | im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1) |
| |
|
| | n = nib.Nifti1Image(im, np.eye(4)) |
| | nib.save(n, os.path.join(tempdir, f"im{i:d}.nii.gz")) |
| |
|
| | n = nib.Nifti1Image(seg, np.eye(4)) |
| | nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz")) |
| |
|
| | images = sorted(glob(os.path.join(tempdir, "im*.nii.gz"))) |
| | segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz"))) |
| |
|
| | |
| | train_imtrans = Compose( |
| | [ |
| | ScaleIntensity(), |
| | AddChannel(), |
| | RandSpatialCrop((96, 96, 96), random_size=False), |
| | RandRotate90(prob=0.5, spatial_axes=(0, 2)), |
| | ToTensor(), |
| | ] |
| | ) |
| | train_segtrans = Compose( |
| | [ |
| | AddChannel(), |
| | RandSpatialCrop((96, 96, 96), random_size=False), |
| | RandRotate90(prob=0.5, spatial_axes=(0, 2)), |
| | ToTensor(), |
| | ] |
| | ) |
| | val_imtrans = Compose([ScaleIntensity(), AddChannel(), ToTensor()]) |
| | val_segtrans = Compose([AddChannel(), ToTensor()]) |
| |
|
| | |
| | check_ds = NiftiDataset(images, segs, transform=train_imtrans, seg_transform=train_segtrans) |
| | check_loader = DataLoader(check_ds, batch_size=10, num_workers=2, pin_memory=torch.cuda.is_available()) |
| | im, seg = monai.utils.misc.first(check_loader) |
| | print(im.shape, seg.shape) |
| |
|
| | |
| | train_ds = NiftiDataset(images[:20], segs[:20], transform=train_imtrans, seg_transform=train_segtrans) |
| | train_loader = DataLoader(train_ds, batch_size=4, shuffle=True, num_workers=8, pin_memory=torch.cuda.is_available()) |
| | |
| | val_ds = NiftiDataset(images[-20:], segs[-20:], transform=val_imtrans, seg_transform=val_segtrans) |
| | val_loader = DataLoader(val_ds, batch_size=1, num_workers=4, pin_memory=torch.cuda.is_available()) |
| | dice_metric = DiceMetric(include_background=True, to_onehot_y=False, sigmoid=True, reduction="mean") |
| |
|
| | |
| | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| | model = monai.networks.nets.UNet( |
| | dimensions=3, |
| | in_channels=1, |
| | out_channels=1, |
| | channels=(16, 32, 64, 128, 256), |
| | strides=(2, 2, 2, 2), |
| | num_res_units=2, |
| | ).to(device) |
| | loss_function = monai.losses.DiceLoss(sigmoid=True) |
| | optimizer = torch.optim.Adam(model.parameters(), 1e-3) |
| |
|
| | |
| | val_interval = 2 |
| | best_metric = -1 |
| | best_metric_epoch = -1 |
| | epoch_loss_values = list() |
| | metric_values = list() |
| | writer = SummaryWriter() |
| | for epoch in range(5): |
| | print("-" * 10) |
| | print(f"epoch {epoch + 1}/{5}") |
| | model.train() |
| | epoch_loss = 0 |
| | step = 0 |
| | for batch_data in train_loader: |
| | step += 1 |
| | inputs, labels = batch_data[0].to(device), batch_data[1].to(device) |
| | optimizer.zero_grad() |
| | outputs = model(inputs) |
| | loss = loss_function(outputs, labels) |
| | loss.backward() |
| | optimizer.step() |
| | epoch_loss += loss.item() |
| | epoch_len = len(train_ds) // train_loader.batch_size |
| | print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}") |
| | writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step) |
| | epoch_loss /= step |
| | epoch_loss_values.append(epoch_loss) |
| | print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}") |
| |
|
| | if (epoch + 1) % val_interval == 0: |
| | model.eval() |
| | with torch.no_grad(): |
| | metric_sum = 0.0 |
| | metric_count = 0 |
| | val_images = None |
| | val_labels = None |
| | val_outputs = None |
| | for val_data in val_loader: |
| | val_images, val_labels = val_data[0].to(device), val_data[1].to(device) |
| | roi_size = (96, 96, 96) |
| | sw_batch_size = 4 |
| | val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model) |
| | value = dice_metric(y_pred=val_outputs, y=val_labels) |
| | metric_count += len(value) |
| | metric_sum += value.item() * len(value) |
| | metric = metric_sum / metric_count |
| | metric_values.append(metric) |
| | if metric > best_metric: |
| | best_metric = metric |
| | best_metric_epoch = epoch + 1 |
| | torch.save(model.state_dict(), "best_metric_model_segmentation3d_array.pth") |
| | print("saved new best metric model") |
| | print( |
| | "current epoch: {} current mean dice: {:.4f} best mean dice: {:.4f} at epoch {}".format( |
| | epoch + 1, metric, best_metric, best_metric_epoch |
| | ) |
| | ) |
| | writer.add_scalar("val_mean_dice", metric, epoch + 1) |
| | |
| | plot_2d_or_3d_image(val_images, epoch + 1, writer, index=0, tag="image") |
| | plot_2d_or_3d_image(val_labels, epoch + 1, writer, index=0, tag="label") |
| | plot_2d_or_3d_image(val_outputs, epoch + 1, writer, index=0, tag="output") |
| |
|
| | print(f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}") |
| | writer.close() |
| |
|
| |
|
| | if __name__ == "__main__": |
| | with tempfile.TemporaryDirectory() as tempdir: |
| | main(tempdir) |
| |
|