repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
CP2 | CP2-main/builder.py | # The CP2_MoCo model is built upon moco v2 code base:
# https://github.com/facebookresearch/moco
# Copyright (c) Facebook, Inc. and its affilates. All Rights Reserved
import torch
import torch.nn as nn
from mmseg.models import build_segmentor
class CP2_MOCO(nn.Module):
def __init__(self, cfg, dim=128, K=65536, m=0.999, T=0.2):
super(CP2_MOCO, self).__init__()
self.K = K
self.m = m
self.T = T
self.encoder_q = build_segmentor(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
self.encoder_k = build_segmentor(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
# create the queue
self.register_buffer("queue", torch.randn(dim, K))
self.queue = nn.functional.normalize(self.queue, dim=0)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)
@torch.no_grad()
def _dequeue_and_enqueue(self, keys):
# gather keys before updating queue
keys = concat_all_gather(keys)
batch_size = keys.shape[0]
ptr = int(self.queue_ptr)
if ptr + batch_size > self.K:
self.queue[:, ptr:self.K] = keys[0:self.K - ptr].T
self.queue[:, 0:ptr + batch_size - self.K] = keys[self.K - ptr:batch_size].T
else:
self.queue[:, ptr:ptr + batch_size] = keys.T
ptr = (ptr + batch_size) % self.K # move pointer
self.queue_ptr[0] = ptr
@torch.no_grad()
def _batch_shuffle_ddp(self, x):
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
torch.distributed.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(self, x, idx_unshuffle):
"""
Undo batch shuffle.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# restored index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this]
def forward(self, im_q, im_k, mask_q, mask_k):
"""
Input:
im_q: a batch of query images
im_k: a batch of key images
Output:
logits, targets
"""
current_bs = im_q.size(0)
mask_q = mask_q.reshape(current_bs, -1)
mask_k = mask_k.reshape(current_bs, -1)
# compute query features
q = self.encoder_q(im_q) # queries: NxCx14x14
q = q.reshape(q.shape[0], q.shape[1], -1) # queries: NxCx196
q_dense = nn.functional.normalize(q, dim=1)
q_pos = nn.functional.normalize(torch.einsum('ncx,nx->nc', [q_dense, mask_q]), dim=1)
# compute key features
with torch.no_grad(): # no gradient to keys
self._momentum_update_key_encoder() # update the key encoder
# shuffle for making use of BN
im_k, idx_unshuffle = self._batch_shuffle_ddp(im_k)
k = self.encoder_k(im_k) # keys: NxC
# undo shuffle
k = self._batch_unshuffle_ddp(k, idx_unshuffle)
k = k.reshape(k.shape[0], k.shape[1], -1) # keys: NxCx196
k_dense = nn.functional.normalize(k, dim=1) # NxCx120
k_pos = nn.functional.normalize(torch.einsum('ncx,nx->nc', [k_dense, mask_k]), dim=1)
# dense logits
logits_dense = torch.einsum('ncx,ncy->nxy', [q_dense, k_dense]) #Nx196x196
labels_dense = torch.einsum('nx,ny->nxy', [mask_q, mask_k])
labels_dense = labels_dense.reshape(labels_dense.shape[0], -1)
mask_dense = torch.einsum('x,ny->nxy', [torch.ones(196).cuda(), mask_k])
mask_dense = mask_dense.reshape(mask_dense.shape[0], -1)
# moco logits
l_pos = torch.einsum('nc,nc->n', [q_pos, k_pos]).unsqueeze(-1)
l_neg = torch.einsum('nc,ck->nk', [q_pos, self.queue.clone().detach()])
logits_moco = torch.cat([l_pos, l_neg], dim=1)
labels_moco = torch.zeros(logits_moco.shape[0], dtype=torch.long).cuda()
# apply temperature
logits_moco /= self.T
# dequeue and enqueue
self._dequeue_and_enqueue(k_pos)
return logits_moco, logits_dense, labels_moco, labels_dense, mask_dense
# utils
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
| 6,009 | 34.146199 | 97 | py |
CP2 | CP2-main/loader.py | # tool functions from moco v2 code base:
# https://github.com/facebookresearch/moco
# Copyright (c) Facebook, Inc. and its affilates. All Rights Reserved
from PIL import ImageFilter
import random
class TwoCropsTransform:
"""Take two random crops of one image as the query and key."""
def __init__(self, base_transform):
self.base_transform = base_transform
def __call__(self, x):
q = self.base_transform(x)
k = self.base_transform(x)
return [q, k]
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[.1, 2.]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
| 841 | 27.066667 | 79 | py |
CP2 | CP2-main/tools/train.py | import argparse
import copy
import os
import os.path as osp
import time
import mmcv
import torch
from mmcv.runner import init_dist
from mmcv.utils import Config, DictAction, get_git_hash
from mmseg import __version__
from mmseg.apis import set_random_seed, train_segmentor
from mmseg.datasets import build_dataset
from mmseg.models import build_segmentor
from mmseg.utils import collect_env, get_root_logger
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# import torch.distributed as dist
#
# dist.init_process_group('gloo', init_method='file:///temp/somefile', rank=0, world_size=1)
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument(
'config',
help='train config file path')
parser.add_argument(
'--work-dir',
help='the dir to save logs and models')
parser.add_argument(
'--load-from',
help='the checkpoint file to load weights from')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.load_from is not None:
cfg.load_from = args.load_from
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, deterministic: '
f'{args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_segmentor(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
logger.info(model)
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmseg version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmseg_version=f'{__version__}+{get_git_hash()[:7]}',
config=cfg.pretty_text,
CLASSES=datasets[0].CLASSES,
PALETTE=datasets[0].PALETTE)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
# passing checkpoint meta for saving best checkpoint
meta.update(cfg.checkpoint_config.meta)
train_segmentor(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
print('start training')
main()
| 6,051 | 33.19209 | 92 | py |
CP2 | CP2-main/mmseg/version.py | # Copyright (c) Open-MMLab. All rights reserved.
__version__ = '0.14.0'
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
| 502 | 25.473684 | 56 | py |
CP2 | CP2-main/mmseg/__init__.py | import mmcv
from .version import __version__, version_info
MMCV_MIN = '1.3.1'
MMCV_MAX = '1.4.0'
def digit_version(version_str):
digit_version = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
digit_version.append(int(patch_version[0]) - 1)
digit_version.append(int(patch_version[1]))
return digit_version
mmcv_min_version = digit_version(MMCV_MIN)
mmcv_max_version = digit_version(MMCV_MAX)
mmcv_version = digit_version(mmcv.__version__)
assert (mmcv_min_version <= mmcv_version <= mmcv_max_version), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_min_version}, <={mmcv_max_version}.'
__all__ = ['__version__', 'version_info']
| 850 | 26.451613 | 69 | py |
CP2 | CP2-main/mmseg/apis/inference.py | import matplotlib.pyplot as plt
import mmcv
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmseg.datasets.pipelines import Compose
from mmseg.models import build_segmentor
def init_segmentor(config, checkpoint=None, device='cuda:0'):
"""Initialize a segmentor from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
device (str, optional) CPU/CUDA device option. Default 'cuda:0'.
Use 'cpu' for loading model on CPU.
Returns:
nn.Module: The constructed segmentor.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
'but got {}'.format(type(config)))
config.model.pretrained = None
config.model.train_cfg = None
model = build_segmentor(config.model, test_cfg=config.get('test_cfg'))
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
model.CLASSES = checkpoint['meta']['CLASSES']
model.PALETTE = checkpoint['meta']['PALETTE']
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
class LoadImage:
"""A simple pipeline to load image."""
def __call__(self, results):
"""Call function to load images into results.
Args:
results (dict): A result dict contains the file name
of the image to be read.
Returns:
dict: ``results`` will be returned containing loaded image.
"""
if isinstance(results['img'], str):
results['filename'] = results['img']
results['ori_filename'] = results['img']
else:
results['filename'] = None
results['ori_filename'] = None
img = mmcv.imread(results['img'])
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
def inference_segmentor(model, img):
"""Inference image(s) with the segmentor.
Args:
model (nn.Module): The loaded segmentor.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
(list[Tensor]): The segmentation result.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
test_pipeline = Compose(test_pipeline)
# prepare data
data = dict(img=img)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
data['img_metas'] = [i.data[0] for i in data['img_metas']]
# forward the model
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result
def show_result_pyplot(model,
img,
result,
palette=None,
fig_size=(15, 10),
opacity=0.5,
title='',
block=True):
"""Visualize the segmentation results on the image.
Args:
model (nn.Module): The loaded segmentor.
img (str or np.ndarray): Image filename or loaded image.
result (list): The segmentation result.
palette (list[list[int]]] | None): The palette of segmentation
map. If None is given, random palette will be generated.
Default: None
fig_size (tuple): Figure size of the pyplot figure.
opacity(float): Opacity of painted segmentation map.
Default 0.5.
Must be in (0, 1] range.
title (str): The title of pyplot figure.
Default is ''.
block (bool): Whether to block the pyplot figure.
Default is True.
"""
if hasattr(model, 'module'):
model = model.module
img = model.show_result(
img, result, palette=palette, show=False, opacity=opacity)
plt.figure(figsize=fig_size)
plt.imshow(mmcv.bgr2rgb(img))
plt.title(title)
plt.tight_layout()
plt.show(block=block)
| 4,582 | 32.698529 | 79 | py |
CP2 | CP2-main/mmseg/apis/test.py | import os.path as osp
import pickle
import shutil
import tempfile
import mmcv
import numpy as np
import torch
import torch.distributed as dist
from mmcv.image import tensor2imgs
from mmcv.runner import get_dist_info
def np2tmp(array, temp_file_name=None):
"""Save ndarray to local numpy file.
Args:
array (ndarray): Ndarray to save.
temp_file_name (str): Numpy file name. If 'temp_file_name=None', this
function will generate a file name with tempfile.NamedTemporaryFile
to save ndarray. Default: None.
Returns:
str: The numpy file name.
"""
if temp_file_name is None:
temp_file_name = tempfile.NamedTemporaryFile(
suffix='.npy', delete=False).name
np.save(temp_file_name, array)
return temp_file_name
def single_gpu_test(model,
data_loader,
show=False,
out_dir=None,
efficient_test=False,
opacity=0.5):
"""Test with single GPU.
Args:
model (nn.Module): Model to be tested.
data_loader (utils.data.Dataloader): Pytorch data loader.
show (bool): Whether show results during inference. Default: False.
out_dir (str, optional): If specified, the results will be dumped into
the directory to save output results.
efficient_test (bool): Whether save the results as local numpy files to
save CPU memory during evaluation. Default: False.
opacity(float): Opacity of painted segmentation map.
Default 0.5.
Must be in (0, 1] range.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, **data)
if show or out_dir:
img_tensor = data['img'][0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None
model.module.show_result(
img_show,
result,
palette=dataset.PALETTE,
show=show,
out_file=out_file,
opacity=opacity)
if isinstance(result, list):
if efficient_test:
result = [np2tmp(_) for _ in result]
results.extend(result)
else:
if efficient_test:
result = np2tmp(result)
results.append(result)
batch_size = len(result)
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model,
data_loader,
tmpdir=None,
gpu_collect=False,
efficient_test=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (utils.data.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
efficient_test (bool): Whether save the results as local numpy files to
save CPU memory during evaluation. Default: False.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
if isinstance(result, list):
if efficient_test:
result = [np2tmp(_) for _ in result]
results.extend(result)
else:
if efficient_test:
result = np2tmp(result)
results.append(result)
if rank == 0:
batch_size = len(result)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
def collect_results_cpu(result_part, size, tmpdir=None):
"""Collect results with CPU."""
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def collect_results_gpu(result_part, size):
"""Collect results with GPU."""
rank, world_size = get_dist_info()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [
part_tensor.new_zeros(shape_max) for _ in range(world_size)
]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_list.append(
pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
| 8,209 | 33.351464 | 79 | py |
CP2 | CP2-main/mmseg/apis/__init__.py | from .inference import inference_segmentor, init_segmentor, show_result_pyplot
from .test import multi_gpu_test, single_gpu_test
from .train import get_root_logger, set_random_seed, train_segmentor
__all__ = [
'get_root_logger', 'set_random_seed', 'train_segmentor', 'init_segmentor',
'inference_segmentor', 'multi_gpu_test', 'single_gpu_test',
'show_result_pyplot'
]
| 381 | 37.2 | 78 | py |
CP2 | CP2-main/mmseg/apis/train.py | import random
import warnings
import time
import numpy as np
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import build_optimizer, build_runner
from mmseg.core import DistEvalHook, EvalHook
from mmseg.datasets import build_dataloader, build_dataset
from mmseg.utils import get_root_logger
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def train_segmentor(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
"""Launch segmentor training."""
logger = get_root_logger(cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [
build_dataloader(
ds,
cfg.data.samples_per_gpu,
cfg.data.workers_per_gpu,
# cfg.gpus will be ignored if distributed
len(cfg.gpu_ids),
dist=distributed,
seed=cfg.seed,
drop_last=True) for ds in dataset
]
# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(
model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
# build runner
# optimizer = build_optimizer(model.module.decode_head.conv_seg, cfg.optimizer)
optimizer = build_optimizer(model, cfg.optimizer)
if cfg.get('runner') is None:
cfg.runner = {'type': 'IterBasedRunner', 'max_iters': cfg.total_iters}
warnings.warn(
'config is now expected to have a `runner` section, '
'please set `runner` in your config.', UserWarning)
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta))
# register hooks
runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
cfg.checkpoint_config, cfg.log_config,
cfg.get('momentum_config', None))
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
# register eval hooks
if validate:
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(
val_dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
eval_hook = DistEvalHook if distributed else EvalHook
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
# print("runner has run")
# time.sleep(10)
runner.run(data_loaders, cfg.workflow)
| 4,066 | 32.61157 | 83 | py |
CP2 | CP2-main/mmseg/core/__init__.py | from .evaluation import * # noqa: F401, F403
from .seg import * # noqa: F401, F403
from .utils import * # noqa: F401, F403
| 126 | 30.75 | 45 | py |
CP2 | CP2-main/mmseg/core/evaluation/class_names.py | import mmcv
def cityscapes_classes():
"""Cityscapes class names for external use."""
return [
'road', 'sidewalk', 'building', 'wall', 'fence', 'pole',
'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky',
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
'bicycle'
]
def ade_classes():
"""ADE20K class names for external use."""
return [
'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ',
'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth',
'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car',
'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug',
'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe',
'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column',
'signboard', 'chest of drawers', 'counter', 'sand', 'sink',
'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path',
'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door',
'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table',
'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove',
'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar',
'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower',
'chandelier', 'awning', 'streetlight', 'booth', 'television receiver',
'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister',
'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van',
'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything',
'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent',
'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank',
'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake',
'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce',
'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen',
'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass',
'clock', 'flag'
]
def voc_classes():
"""Pascal VOC class names for external use."""
return [
'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor'
]
def cityscapes_palette():
"""Cityscapes palette for external use."""
return [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
[190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0],
[107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60],
[255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100],
[0, 0, 230], [119, 11, 32]]
def ade_palette():
"""ADE20K palette for external use."""
return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
[11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
[0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
[255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
[0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
[173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
[255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
[255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
[255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
[0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
[0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
[143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
[8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
[255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
[92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
[163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
[255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
[255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
[10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
[255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
[41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
[71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
[184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
[102, 255, 0], [92, 0, 255]]
def voc_palette():
"""Pascal VOC palette for external use."""
return [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
[128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],
[192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128],
[192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0],
[128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
dataset_aliases = {
'cityscapes': ['cityscapes'],
'ade': ['ade', 'ade20k'],
'voc': ['voc', 'pascal_voc', 'voc12', 'voc12aug']
}
def get_classes(dataset):
"""Get class names of a dataset."""
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_classes()')
else:
raise ValueError(f'Unrecognized dataset: {dataset}')
else:
raise TypeError(f'dataset must a str, but got {type(dataset)}')
return labels
def get_palette(dataset):
"""Get class palette (RGB) of a dataset."""
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_palette()')
else:
raise ValueError(f'Unrecognized dataset: {dataset}')
else:
raise TypeError(f'dataset must a str, but got {type(dataset)}')
return labels
| 7,277 | 46.568627 | 79 | py |
CP2 | CP2-main/mmseg/core/evaluation/eval_hooks.py | import os.path as osp
import torch.distributed as dist
from mmcv.runner import DistEvalHook as _DistEvalHook
from mmcv.runner import EvalHook as _EvalHook
from torch.nn.modules.batchnorm import _BatchNorm
class EvalHook(_EvalHook):
"""Single GPU EvalHook, with efficient test support.
Args:
by_epoch (bool): Determine perform evaluation by epoch or by iteration.
If set to True, it will perform by epoch. Otherwise, by iteration.
Default: False.
efficient_test (bool): Whether save the results as local numpy files to
save CPU memory during evaluation. Default: False.
Returns:
list: The prediction results.
"""
greater_keys = ['mIoU', 'mAcc', 'aAcc']
def __init__(self, *args, by_epoch=False, efficient_test=False, **kwargs):
super().__init__(*args, by_epoch=by_epoch, **kwargs)
self.efficient_test = efficient_test
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
if not self._should_evaluate(runner):
return
from mmseg.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader, show=False)
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
class DistEvalHook(_DistEvalHook):
"""Distributed EvalHook, with efficient test support.
Args:
by_epoch (bool): Determine perform evaluation by epoch or by iteration.
If set to True, it will perform by epoch. Otherwise, by iteration.
Default: False.
efficient_test (bool): Whether save the results as local numpy files to
save CPU memory during evaluation. Default: False.
Returns:
list: The prediction results.
"""
greater_keys = ['mIoU', 'mAcc', 'aAcc']
def __init__(self, *args, by_epoch=False, efficient_test=False, **kwargs):
super().__init__(*args, by_epoch=by_epoch, **kwargs)
self.efficient_test = efficient_test
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
# Synchronization of BatchNorm's buffer (running_mean
# and running_var) is not supported in the DDP of pytorch,
# which may cause the inconsistent performance of models in
# different ranks, so we broadcast BatchNorm's buffers
# of rank 0 to other ranks to avoid this.
if self.broadcast_bn_buffer:
model = runner.model
for name, module in model.named_modules():
if isinstance(module,
_BatchNorm) and module.track_running_stats:
dist.broadcast(module.running_var, 0)
dist.broadcast(module.running_mean, 0)
if not self._should_evaluate(runner):
return
tmpdir = self.tmpdir
if tmpdir is None:
tmpdir = osp.join(runner.work_dir, '.eval_hook')
from mmseg.apis import multi_gpu_test
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=tmpdir,
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
| 3,528 | 36.147368 | 79 | py |
CP2 | CP2-main/mmseg/core/evaluation/metrics.py | from collections import OrderedDict
import mmcv
import numpy as np
import torch
def f_score(precision, recall, beta=1):
"""calcuate the f-score value.
Args:
precision (float | torch.Tensor): The precision value.
recall (float | torch.Tensor): The recall value.
beta (int): Determines the weight of recall in the combined score.
Default: False.
Returns:
[torch.tensor]: The f-score value.
"""
score = (1 + beta**2) * (precision * recall) / (
(beta**2 * precision) + recall)
return score
def intersect_and_union(pred_label,
label,
num_classes,
ignore_index,
label_map=dict(),
reduce_zero_label=False):
"""Calculate intersection and Union.
Args:
pred_label (ndarray | str): Prediction segmentation map
or predict result filename.
label (ndarray | str): Ground truth segmentation map
or label filename.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
label_map (dict): Mapping old labels to new labels. The parameter will
work only when label is str. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. The parameter will
work only when label is str. Default: False.
Returns:
torch.Tensor: The intersection of prediction and ground truth
histogram on all classes.
torch.Tensor: The union of prediction and ground truth histogram on
all classes.
torch.Tensor: The prediction histogram on all classes.
torch.Tensor: The ground truth histogram on all classes.
"""
if isinstance(pred_label, str):
pred_label = torch.from_numpy(np.load(pred_label))
else:
pred_label = torch.from_numpy((pred_label))
if isinstance(label, str):
label = torch.from_numpy(
mmcv.imread(label, flag='unchanged', backend='pillow'))
else:
label = torch.from_numpy(label)
if label_map is not None:
for old_id, new_id in label_map.items():
label[label == old_id] = new_id
if reduce_zero_label:
label[label == 0] = 255
label = label - 1
label[label == 254] = 255
mask = (label != ignore_index)
pred_label = pred_label[mask]
label = label[mask]
intersect = pred_label[pred_label == label]
area_intersect = torch.histc(
intersect.float(), bins=(num_classes), min=0, max=num_classes - 1)
area_pred_label = torch.histc(
pred_label.float(), bins=(num_classes), min=0, max=num_classes - 1)
area_label = torch.histc(
label.float(), bins=(num_classes), min=0, max=num_classes - 1)
area_union = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def total_intersect_and_union(results,
gt_seg_maps,
num_classes,
ignore_index,
label_map=dict(),
reduce_zero_label=False):
"""Calculate Total Intersection and Union.
Args:
results (list[ndarray] | list[str]): List of prediction segmentation
maps or list of prediction result filenames.
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
segmentation maps or list of label filenames.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
ndarray: The intersection of prediction and ground truth histogram
on all classes.
ndarray: The union of prediction and ground truth histogram on all
classes.
ndarray: The prediction histogram on all classes.
ndarray: The ground truth histogram on all classes.
"""
num_imgs = len(results)
assert len(gt_seg_maps) == num_imgs
total_area_intersect = torch.zeros((num_classes, ), dtype=torch.float64)
total_area_union = torch.zeros((num_classes, ), dtype=torch.float64)
total_area_pred_label = torch.zeros((num_classes, ), dtype=torch.float64)
total_area_label = torch.zeros((num_classes, ), dtype=torch.float64)
for i in range(num_imgs):
area_intersect, area_union, area_pred_label, area_label = \
intersect_and_union(
results[i], gt_seg_maps[i], num_classes, ignore_index,
label_map, reduce_zero_label)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, \
total_area_label
def mean_iou(results,
gt_seg_maps,
num_classes,
ignore_index,
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False):
"""Calculate Mean Intersection and Union (mIoU)
Args:
results (list[ndarray] | list[str]): List of prediction segmentation
maps or list of prediction result filenames.
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
segmentation maps or list of label filenames.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
dict[str, float | ndarray]:
<aAcc> float: Overall accuracy on all images.
<Acc> ndarray: Per category accuracy, shape (num_classes, ).
<IoU> ndarray: Per category IoU, shape (num_classes, ).
"""
iou_result = eval_metrics(
results=results,
gt_seg_maps=gt_seg_maps,
num_classes=num_classes,
ignore_index=ignore_index,
metrics=['mIoU'],
nan_to_num=nan_to_num,
label_map=label_map,
reduce_zero_label=reduce_zero_label)
return iou_result
def mean_dice(results,
gt_seg_maps,
num_classes,
ignore_index,
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False):
"""Calculate Mean Dice (mDice)
Args:
results (list[ndarray] | list[str]): List of prediction segmentation
maps or list of prediction result filenames.
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
segmentation maps or list of label filenames.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
dict[str, float | ndarray]: Default metrics.
<aAcc> float: Overall accuracy on all images.
<Acc> ndarray: Per category accuracy, shape (num_classes, ).
<Dice> ndarray: Per category dice, shape (num_classes, ).
"""
dice_result = eval_metrics(
results=results,
gt_seg_maps=gt_seg_maps,
num_classes=num_classes,
ignore_index=ignore_index,
metrics=['mDice'],
nan_to_num=nan_to_num,
label_map=label_map,
reduce_zero_label=reduce_zero_label)
return dice_result
def mean_fscore(results,
gt_seg_maps,
num_classes,
ignore_index,
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False,
beta=1):
"""Calculate Mean Intersection and Union (mIoU)
Args:
results (list[ndarray] | list[str]): List of prediction segmentation
maps or list of prediction result filenames.
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
segmentation maps or list of label filenames.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
beta (int): Determines the weight of recall in the combined score.
Default: False.
Returns:
dict[str, float | ndarray]: Default metrics.
<aAcc> float: Overall accuracy on all images.
<Fscore> ndarray: Per category recall, shape (num_classes, ).
<Precision> ndarray: Per category precision, shape (num_classes, ).
<Recall> ndarray: Per category f-score, shape (num_classes, ).
"""
fscore_result = eval_metrics(
results=results,
gt_seg_maps=gt_seg_maps,
num_classes=num_classes,
ignore_index=ignore_index,
metrics=['mFscore'],
nan_to_num=nan_to_num,
label_map=label_map,
reduce_zero_label=reduce_zero_label,
beta=beta)
return fscore_result
def eval_metrics(results,
gt_seg_maps,
num_classes,
ignore_index,
metrics=['mIoU'],
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False,
beta=1):
"""Calculate evaluation metrics
Args:
results (list[ndarray] | list[str]): List of prediction segmentation
maps or list of prediction result filenames.
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
segmentation maps or list of label filenames.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
float: Overall accuracy on all images.
ndarray: Per category accuracy, shape (num_classes, ).
ndarray: Per category evaluation metrics, shape (num_classes, ).
"""
if isinstance(metrics, str):
metrics = [metrics]
allowed_metrics = ['mIoU', 'mDice', 'mFscore']
if not set(metrics).issubset(set(allowed_metrics)):
raise KeyError('metrics {} is not supported'.format(metrics))
total_area_intersect, total_area_union, total_area_pred_label, \
total_area_label = total_intersect_and_union(
results, gt_seg_maps, num_classes, ignore_index, label_map,
reduce_zero_label)
all_acc = total_area_intersect.sum() / total_area_label.sum()
ret_metrics = OrderedDict({'aAcc': all_acc})
for metric in metrics:
if metric == 'mIoU':
iou = total_area_intersect / total_area_union
acc = total_area_intersect / total_area_label
ret_metrics['IoU'] = iou
ret_metrics['Acc'] = acc
elif metric == 'mDice':
dice = 2 * total_area_intersect / (
total_area_pred_label + total_area_label)
acc = total_area_intersect / total_area_label
ret_metrics['Dice'] = dice
ret_metrics['Acc'] = acc
elif metric == 'mFscore':
precision = total_area_intersect / total_area_pred_label
recall = total_area_intersect / total_area_label
f_value = torch.tensor(
[f_score(x[0], x[1], beta) for x in zip(precision, recall)])
ret_metrics['Fscore'] = f_value
ret_metrics['Precision'] = precision
ret_metrics['Recall'] = recall
ret_metrics = {
metric: value.numpy()
for metric, value in ret_metrics.items()
}
if nan_to_num is not None:
ret_metrics = OrderedDict({
metric: np.nan_to_num(metric_value, nan=nan_to_num)
for metric, metric_value in ret_metrics.items()
})
return ret_metrics
| 13,051 | 38.914373 | 79 | py |
CP2 | CP2-main/mmseg/core/evaluation/__init__.py | from .class_names import get_classes, get_palette
from .eval_hooks import DistEvalHook, EvalHook
from .metrics import eval_metrics, mean_dice, mean_fscore, mean_iou
__all__ = [
'EvalHook', 'DistEvalHook', 'mean_dice', 'mean_iou', 'mean_fscore',
'eval_metrics', 'get_classes', 'get_palette'
]
| 301 | 32.555556 | 71 | py |
CP2 | CP2-main/mmseg/core/seg/__init__.py | from .builder import build_pixel_sampler
from .sampler import BasePixelSampler, OHEMPixelSampler
__all__ = ['build_pixel_sampler', 'BasePixelSampler', 'OHEMPixelSampler']
| 172 | 33.6 | 73 | py |
CP2 | CP2-main/mmseg/core/seg/builder.py | from mmcv.utils import Registry, build_from_cfg
PIXEL_SAMPLERS = Registry('pixel sampler')
def build_pixel_sampler(cfg, **default_args):
"""Build pixel sampler for segmentation map."""
return build_from_cfg(cfg, PIXEL_SAMPLERS, default_args)
| 253 | 27.222222 | 60 | py |
CP2 | CP2-main/mmseg/core/seg/sampler/base_pixel_sampler.py | from abc import ABCMeta, abstractmethod
class BasePixelSampler(metaclass=ABCMeta):
"""Base class of pixel sampler."""
def __init__(self, **kwargs):
pass
@abstractmethod
def sample(self, seg_logit, seg_label):
"""Placeholder for sample function."""
| 284 | 20.923077 | 46 | py |
CP2 | CP2-main/mmseg/core/seg/sampler/ohem_pixel_sampler.py | import torch
import torch.nn.functional as F
from ..builder import PIXEL_SAMPLERS
from .base_pixel_sampler import BasePixelSampler
@PIXEL_SAMPLERS.register_module()
class OHEMPixelSampler(BasePixelSampler):
"""Online Hard Example Mining Sampler for segmentation.
Args:
context (nn.Module): The context of sampler, subclass of
:obj:`BaseDecodeHead`.
thresh (float, optional): The threshold for hard example selection.
Below which, are prediction with low confidence. If not
specified, the hard examples will be pixels of top ``min_kept``
loss. Default: None.
min_kept (int, optional): The minimum number of predictions to keep.
Default: 100000.
"""
def __init__(self, context, thresh=None, min_kept=100000):
super(OHEMPixelSampler, self).__init__()
self.context = context
assert min_kept > 1
self.thresh = thresh
self.min_kept = min_kept
def sample(self, seg_logit, seg_label):
"""Sample pixels that have high loss or with low prediction confidence.
Args:
seg_logit (torch.Tensor): segmentation logits, shape (N, C, H, W)
seg_label (torch.Tensor): segmentation label, shape (N, 1, H, W)
Returns:
torch.Tensor: segmentation weight, shape (N, H, W)
"""
with torch.no_grad():
assert seg_logit.shape[2:] == seg_label.shape[2:]
assert seg_label.shape[1] == 1
seg_label = seg_label.squeeze(1).long()
batch_kept = self.min_kept * seg_label.size(0)
valid_mask = seg_label != self.context.ignore_index
seg_weight = seg_logit.new_zeros(size=seg_label.size())
valid_seg_weight = seg_weight[valid_mask]
if self.thresh is not None:
seg_prob = F.softmax(seg_logit, dim=1)
tmp_seg_label = seg_label.clone().unsqueeze(1)
tmp_seg_label[tmp_seg_label == self.context.ignore_index] = 0
seg_prob = seg_prob.gather(1, tmp_seg_label).squeeze(1)
sort_prob, sort_indices = seg_prob[valid_mask].sort()
if sort_prob.numel() > 0:
min_threshold = sort_prob[min(batch_kept,
sort_prob.numel() - 1)]
else:
min_threshold = 0.0
threshold = max(min_threshold, self.thresh)
valid_seg_weight[seg_prob[valid_mask] < threshold] = 1.
else:
losses = self.context.loss_decode(
seg_logit,
seg_label,
weight=None,
ignore_index=self.context.ignore_index,
reduction_override='none')
# faster than topk according to https://github.com/pytorch/pytorch/issues/22812 # noqa
_, sort_indices = losses[valid_mask].sort(descending=True)
valid_seg_weight[sort_indices[:batch_kept]] = 1.
seg_weight[valid_mask] = valid_seg_weight
return seg_weight
| 3,155 | 39.987013 | 103 | py |
CP2 | CP2-main/mmseg/core/seg/sampler/__init__.py | from .base_pixel_sampler import BasePixelSampler
from .ohem_pixel_sampler import OHEMPixelSampler
__all__ = ['BasePixelSampler', 'OHEMPixelSampler']
| 150 | 29.2 | 50 | py |
CP2 | CP2-main/mmseg/core/utils/misc.py | def add_prefix(inputs, prefix):
"""Add prefix for dict.
Args:
inputs (dict): The input dict with str keys.
prefix (str): The prefix to add.
Returns:
dict: The dict with keys updated with ``prefix``.
"""
outputs = dict()
for name, value in inputs.items():
outputs[f'{prefix}.{name}'] = value
return outputs
| 371 | 19.666667 | 57 | py |
CP2 | CP2-main/mmseg/core/utils/__init__.py | from .misc import add_prefix
__all__ = ['add_prefix']
| 55 | 13 | 28 | py |
CP2 | CP2-main/mmseg/models/__init__.py | from .backbones import * # noqa: F401,F403
from .builder import (BACKBONES, HEADS, LOSSES, SEGMENTORS, build_backbone,
build_head, build_loss, build_segmentor)
from .decode_heads import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .segmentors import * # noqa: F401,F403
__all__ = [
'BACKBONES', 'HEADS', 'LOSSES', 'SEGMENTORS', 'build_backbone',
'build_head', 'build_loss', 'build_segmentor'
]
| 449 | 36.5 | 75 | py |
CP2 | CP2-main/mmseg/models/builder.py | import warnings
from mmcv.cnn import MODELS as MMCV_MODELS
from mmcv.utils import Registry
MODELS = Registry('models', parent=MMCV_MODELS)
BACKBONES = MODELS
NECKS = MODELS
HEADS = MODELS
LOSSES = MODELS
SEGMENTORS = MODELS
def build_backbone(cfg):
"""Build backbone."""
return BACKBONES.build(cfg)
def build_neck(cfg):
"""Build neck."""
return NECKS.build(cfg)
def build_head(cfg):
"""Build head."""
return HEADS.build(cfg)
def build_loss(cfg):
"""Build loss."""
return LOSSES.build(cfg)
def build_segmentor(cfg, train_cfg=None, test_cfg=None):
"""Build segmentor."""
if train_cfg is not None or test_cfg is not None:
warnings.warn(
'train_cfg and test_cfg is deprecated, '
'please specify them in model', UserWarning)
assert cfg.get('train_cfg') is None or train_cfg is None, \
'train_cfg specified in both outer field and model field '
assert cfg.get('test_cfg') is None or test_cfg is None, \
'test_cfg specified in both outer field and model field '
return SEGMENTORS.build(
cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg))
| 1,165 | 23.808511 | 71 | py |
CP2 | CP2-main/mmseg/models/decode_heads/fcn_head.py | import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from ..builder import HEADS
from .decode_head import BaseDecodeHead
@HEADS.register_module()
class FCNHead(BaseDecodeHead):
"""Fully Convolution Networks for Semantic Segmentation.
This head is implemented of `FCNNet <https://arxiv.org/abs/1411.4038>`_.
Args:
num_convs (int): Number of convs in the head. Default: 2.
kernel_size (int): The kernel size for convs in the head. Default: 3.
concat_input (bool): Whether concat the input and output of convs
before classification layer.
dilation (int): The dilation rate for convs in the head. Default: 1.
"""
def __init__(self,
num_convs=2,
kernel_size=3,
concat_input=True,
contrast=False,
dilation=1,
**kwargs):
assert num_convs >= 0 and dilation > 0 and isinstance(dilation, int)
self.num_convs = num_convs
self.concat_input = concat_input
self.contrast = contrast
self.kernel_size = kernel_size
super(FCNHead, self).__init__(**kwargs)
if num_convs == 0:
assert self.in_channels == self.channels
conv_padding = (kernel_size // 2) * dilation
convs = []
convs.append(
ConvModule(
self.in_channels,
self.channels,
kernel_size=kernel_size,
padding=conv_padding,
dilation=dilation,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
for i in range(num_convs - 1):
convs.append(
ConvModule(
self.channels,
self.channels,
kernel_size=kernel_size,
padding=conv_padding,
dilation=dilation,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
if num_convs == 0:
self.convs = nn.Identity()
else:
self.convs = nn.Sequential(*convs)
if self.concat_input:
self.conv_cat = ConvModule(
self.in_channels + self.channels,
self.channels,
kernel_size=kernel_size,
padding=kernel_size // 2,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
if self.contrast:
self.contrast_conv = nn.Sequential(
nn.Conv2d(self.channels, self.channels, 1),
nn.ReLU(),
nn.Conv2d(self.channels, 128, 1))
def forward(self, inputs):
"""Forward function."""
x = self._transform_inputs(inputs)
output = self.convs(x)
if self.concat_input:
output = self.conv_cat(torch.cat([x, output], dim=1))
if self.contrast:
output = self.contrast_conv(output)
else:
output = self.cls_seg(output)
return output
| 3,166 | 33.423913 | 77 | py |
CP2 | CP2-main/mmseg/models/decode_heads/decode_head.py | from abc import ABCMeta, abstractmethod
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmcv.cnn import constant_init
from mmcv.runner import auto_fp16, force_fp32
from mmcv.runner import load_checkpoint
from mmseg.utils import get_root_logger
from mmseg.core import build_pixel_sampler
from mmseg.ops import resize
from ..builder import build_loss
from ..losses import accuracy
class BaseDecodeHead(nn.Module, metaclass=ABCMeta):
"""Base class for BaseDecodeHead.
Args:
in_channels (int|Sequence[int]): Input channels.
channels (int): Channels after modules, before conv_seg.
num_classes (int): Number of classes.
dropout_ratio (float): Ratio of dropout layer. Default: 0.1.
conv_cfg (dict|None): Config of conv layers. Default: None.
norm_cfg (dict|None): Config of norm layers. Default: None.
act_cfg (dict): Config of activation layers.
Default: dict(type='ReLU')
in_index (int|Sequence[int]): Input feature index. Default: -1
input_transform (str|None): Transformation type of input features.
Options: 'resize_concat', 'multiple_select', None.
'resize_concat': Multiple feature maps will be resize to the
same size as first one and than concat together.
Usually used in FCN head of HRNet.
'multiple_select': Multiple feature maps will be bundle into
a list and passed into decode head.
None: Only one select feature map is allowed.
Default: None.
loss_decode (dict): Config of decode loss.
Default: dict(type='CrossEntropyLoss').
ignore_index (int | None): The label index to be ignored. When using
masked BCE loss, ignore_index should be set to None. Default: 255
sampler (dict|None): The config of segmentation map sampler.
Default: None.
align_corners (bool): align_corners argument of F.interpolate.
Default: False.
"""
def __init__(self,
in_channels,
channels,
*,
num_classes,
dropout_ratio=0.1,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
in_index=-1,
input_transform=None,
loss_decode=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
ignore_index=255,
sampler=None,
align_corners=False):
super(BaseDecodeHead, self).__init__()
self._init_inputs(in_channels, in_index, input_transform)
self.channels = channels
self.num_classes = num_classes
self.dropout_ratio = dropout_ratio
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.in_index = in_index
self.loss_decode = build_loss(loss_decode)
self.ignore_index = ignore_index
self.align_corners = align_corners
if sampler is not None:
self.sampler = build_pixel_sampler(sampler, context=self)
else:
self.sampler = None
self.conv_seg = nn.Conv2d(channels, num_classes, kernel_size=1)
if dropout_ratio > 0:
self.dropout = nn.Dropout2d(dropout_ratio)
else:
self.dropout = None
self.fp16_enabled = False
def extra_repr(self):
"""Extra repr."""
s = f'input_transform={self.input_transform}, ' \
f'ignore_index={self.ignore_index}, ' \
f'align_corners={self.align_corners}'
return s
def _init_inputs(self, in_channels, in_index, input_transform):
"""Check and initialize input transforms.
The in_channels, in_index and input_transform must match.
Specifically, when input_transform is None, only single feature map
will be selected. So in_channels and in_index must be of type int.
When input_transform
Args:
in_channels (int|Sequence[int]): Input channels.
in_index (int|Sequence[int]): Input feature index.
input_transform (str|None): Transformation type of input features.
Options: 'resize_concat', 'multiple_select', None.
'resize_concat': Multiple feature maps will be resize to the
same size as first one and than concat together.
Usually used in FCN head of HRNet.
'multiple_select': Multiple feature maps will be bundle into
a list and passed into decode head.
None: Only one select feature map is allowed.
"""
if input_transform is not None:
assert input_transform in ['resize_concat', 'multiple_select']
self.input_transform = input_transform
self.in_index = in_index
if input_transform is not None:
assert isinstance(in_channels, (list, tuple))
assert isinstance(in_index, (list, tuple))
assert len(in_channels) == len(in_index)
if input_transform == 'resize_concat':
self.in_channels = sum(in_channels)
else:
self.in_channels = in_channels
else:
assert isinstance(in_channels, int)
assert isinstance(in_index, int)
self.in_channels = in_channels
def init_weights(self, pretrained=None):
"""Initialize weights of the whole decoder head."""
# normal_init(self.conv_seg, mean=0, std=0.01)
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
normal_init(self.conv_seg, mean=0, std=0.01)
# constant_init(self.conv_seg, 0)
def _transform_inputs(self, inputs):
"""Transform inputs for decoder.
Args:
inputs (list[Tensor]): List of multi-level img features.
Returns:
Tensor: The transformed inputs
"""
if self.input_transform == 'resize_concat':
inputs = [inputs[i] for i in self.in_index]
upsampled_inputs = [
resize(
input=x,
size=inputs[0].shape[2:],
mode='bilinear',
align_corners=self.align_corners) for x in inputs
]
inputs = torch.cat(upsampled_inputs, dim=1)
elif self.input_transform == 'multiple_select':
inputs = [inputs[i] for i in self.in_index]
else:
inputs = inputs[self.in_index]
return inputs
@auto_fp16()
@abstractmethod
def forward(self, inputs):
"""Placeholder of forward function."""
pass
def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg):
"""Forward function for training.
Args:
inputs (list[Tensor]): List of multi-level img features.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
gt_semantic_seg (Tensor): Semantic segmentation masks
used if the architecture supports semantic segmentation task.
train_cfg (dict): The training config.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
seg_logits = self.forward(inputs)
losses = self.losses(seg_logits, gt_semantic_seg)
return losses
def forward_test(self, inputs, img_metas, test_cfg):
"""Forward function for testing.
Args:
inputs (list[Tensor]): List of multi-level img features.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
test_cfg (dict): The testing config.
Returns:
Tensor: Output segmentation map.
"""
return self.forward(inputs)
def cls_seg(self, feat):
"""Classify each pixel."""
if self.dropout is not None:
feat = self.dropout(feat)
output = self.conv_seg(feat)
return output
@force_fp32(apply_to=('seg_logit', ))
def losses(self, seg_logit, seg_label):
"""Compute segmentation loss."""
loss = dict()
seg_logit = resize(
input=seg_logit,
size=seg_label.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
if self.sampler is not None:
seg_weight = self.sampler.sample(seg_logit, seg_label)
else:
seg_weight = None
seg_label = seg_label.squeeze(1)
loss['loss_seg'] = self.loss_decode(
seg_logit,
seg_label,
weight=seg_weight,
ignore_index=self.ignore_index)
loss['acc_seg'] = accuracy(seg_logit, seg_label)
return loss
| 9,545 | 38.283951 | 78 | py |
CP2 | CP2-main/mmseg/models/decode_heads/__init__.py | from .aspp_head import ASPPHead
from .fcn_head import FCNHead
__all__ = [
'FCNHead', 'ASPPHead',
]
| 104 | 14 | 31 | py |
CP2 | CP2-main/mmseg/models/decode_heads/aspp_head.py | import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmseg.ops import resize
from mmseg.models.builder import HEADS
from mmseg.models.decode_heads.decode_head import BaseDecodeHead
class ASPPModule(nn.ModuleList):
"""Atrous Spatial Pyramid Pooling (ASPP) Module.
Args:
dilations (tuple[int]): Dilation rate of each layer.
in_channels (int): Input channels.
channels (int): Channels after modules, before conv_seg.
conv_cfg (dict|None): Config of conv layers.
norm_cfg (dict|None): Config of norm layers.
act_cfg (dict): Config of activation layers.
"""
def __init__(self, dilations, in_channels, channels, conv_cfg, norm_cfg,
act_cfg):
super(ASPPModule, self).__init__()
self.dilations = dilations
self.in_channels = in_channels
self.channels = channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
for dilation in dilations:
self.append(
ConvModule(
self.in_channels,
self.channels,
1 if dilation == 1 else 3,
dilation=dilation,
padding=0 if dilation == 1 else dilation,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
def forward(self, x):
"""Forward function."""
aspp_outs = []
for aspp_module in self:
aspp_outs.append(aspp_module(x))
return aspp_outs
@HEADS.register_module()
class ASPPHead(BaseDecodeHead):
"""Rethinking Atrous Convolution for Semantic Image Segmentation.
This head is the implementation of `DeepLabV3
<https://arxiv.org/abs/1706.05587>`_.
Args:
dilations (tuple[int]): Dilation rates for ASPP module.
Default: (1, 6, 12, 18).
"""
def __init__(self, dilations=(1, 6, 12, 18), contrast=False, **kwargs):
super(ASPPHead, self).__init__(**kwargs)
assert isinstance(dilations, (list, tuple))
self.dilations = dilations
self.contrast = contrast
self.image_pool = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
ConvModule(
self.in_channels,
self.channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
self.aspp_modules = ASPPModule(
dilations,
self.in_channels,
self.channels,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.bottleneck = ConvModule(
(len(dilations) + 1) * self.channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
if self.contrast:
self.contrast_conv = nn.Sequential(
nn.Conv2d(self.channels, self.channels, 1),
nn.ReLU(),
nn.Conv2d(self.channels, 128, 1))
def forward(self, inputs):
"""Forward function."""
x = self._transform_inputs(inputs)
aspp_outs = [
resize(
self.image_pool(x),
size=x.size()[2:],
mode='bilinear',
align_corners=self.align_corners)
]
aspp_outs.extend(self.aspp_modules(x))
aspp_outs = torch.cat(aspp_outs, dim=1)
output = self.bottleneck(aspp_outs)
if self.contrast:
output = self.contrast_conv(output)
else:
output = self.cls_seg(output)
return output | 3,807 | 31.547009 | 76 | py |
CP2 | CP2-main/mmseg/models/utils/se_layer.py | import mmcv
import torch.nn as nn
from mmcv.cnn import ConvModule
from .make_divisible import make_divisible
class SELayer(nn.Module):
"""Squeeze-and-Excitation Module.
Args:
channels (int): The input (and output) channels of the SE layer.
ratio (int): Squeeze ratio in SELayer, the intermediate channel will be
``int(channels/ratio)``. Default: 16.
conv_cfg (None or dict): Config dict for convolution layer.
Default: None, which means using conv2d.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configured
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configured by the first dict and the
second activation layer will be configured by the second dict.
Default: (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0,
divisor=6.0)).
"""
def __init__(self,
channels,
ratio=16,
conv_cfg=None,
act_cfg=(dict(type='ReLU'),
dict(type='HSigmoid', bias=3.0, divisor=6.0))):
super(SELayer, self).__init__()
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert len(act_cfg) == 2
assert mmcv.is_tuple_of(act_cfg, dict)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.conv1 = ConvModule(
in_channels=channels,
out_channels=make_divisible(channels // ratio, 8),
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
self.conv2 = ConvModule(
in_channels=make_divisible(channels // ratio, 8),
out_channels=channels,
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[1])
def forward(self, x):
out = self.global_avgpool(x)
out = self.conv1(out)
out = self.conv2(out)
return x * out
| 2,103 | 35.275862 | 79 | py |
CP2 | CP2-main/mmseg/models/utils/weight_init.py | """Modified from https://github.com/rwightman/pytorch-image-
models/blob/master/timm/models/layers/drop.py."""
import math
import warnings
import torch
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
"""Reference: https://people.sc.fsu.edu/~jburkardt/presentations
/truncated_normal.pdf"""
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. '
'The distribution of values may be incorrect.',
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
lower_bound = norm_cdf((a - mean) / std)
upper_bound = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * lower_bound - 1, 2 * upper_bound - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`
mean (float): the mean of the normal distribution
std (float): the standard deviation of the normal distribution
a (float): the minimum cutoff value
b (float): the maximum cutoff value
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
| 2,327 | 35.952381 | 76 | py |
CP2 | CP2-main/mmseg/models/utils/res_layer.py | from mmcv.cnn import build_conv_layer, build_norm_layer
from torch import nn as nn
class ResLayer(nn.Sequential):
"""ResLayer to build ResNet style backbone.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
multi_grid (int | None): Multi grid dilation rates of last
stage. Default: None
contract_dilation (bool): Whether contract first dilation of each layer
Default: False
"""
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
dilation=1,
avg_down=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
multi_grid=None,
contract_dilation=False,
**kwargs):
self.block = block
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = []
conv_stride = stride
if avg_down:
conv_stride = 1
downsample.append(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False))
downsample.extend([
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=conv_stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1]
])
downsample = nn.Sequential(*downsample)
layers = []
if multi_grid is None:
if dilation > 1 and contract_dilation:
first_dilation = dilation // 2
else:
first_dilation = dilation
else:
first_dilation = multi_grid[0]
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
dilation=first_dilation,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
dilation=dilation if multi_grid is None else multi_grid[i],
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
super(ResLayer, self).__init__(*layers)
| 3,315 | 33.905263 | 79 | py |
CP2 | CP2-main/mmseg/models/utils/self_attention_block.py | import torch
from mmcv.cnn import ConvModule, constant_init
from torch import nn as nn
from torch.nn import functional as F
class SelfAttentionBlock(nn.Module):
"""General self-attention block/non-local block.
Please refer to https://arxiv.org/abs/1706.03762 for details about key,
query and value.
Args:
key_in_channels (int): Input channels of key feature.
query_in_channels (int): Input channels of query feature.
channels (int): Output channels of key/query transform.
out_channels (int): Output channels.
share_key_query (bool): Whether share projection weight between key
and query projection.
query_downsample (nn.Module): Query downsample module.
key_downsample (nn.Module): Key downsample module.
key_query_num_convs (int): Number of convs for key/query projection.
value_num_convs (int): Number of convs for value projection.
matmul_norm (bool): Whether normalize attention map with sqrt of
channels
with_out (bool): Whether use out projection.
conv_cfg (dict|None): Config of conv layers.
norm_cfg (dict|None): Config of norm layers.
act_cfg (dict|None): Config of activation layers.
"""
def __init__(self, key_in_channels, query_in_channels, channels,
out_channels, share_key_query, query_downsample,
key_downsample, key_query_num_convs, value_out_num_convs,
key_query_norm, value_out_norm, matmul_norm, with_out,
conv_cfg, norm_cfg, act_cfg):
super(SelfAttentionBlock, self).__init__()
if share_key_query:
assert key_in_channels == query_in_channels
self.key_in_channels = key_in_channels
self.query_in_channels = query_in_channels
self.out_channels = out_channels
self.channels = channels
self.share_key_query = share_key_query
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.key_project = self.build_project(
key_in_channels,
channels,
num_convs=key_query_num_convs,
use_conv_module=key_query_norm,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if share_key_query:
self.query_project = self.key_project
else:
self.query_project = self.build_project(
query_in_channels,
channels,
num_convs=key_query_num_convs,
use_conv_module=key_query_norm,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.value_project = self.build_project(
key_in_channels,
channels if with_out else out_channels,
num_convs=value_out_num_convs,
use_conv_module=value_out_norm,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if with_out:
self.out_project = self.build_project(
channels,
out_channels,
num_convs=value_out_num_convs,
use_conv_module=value_out_norm,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
else:
self.out_project = None
self.query_downsample = query_downsample
self.key_downsample = key_downsample
self.matmul_norm = matmul_norm
self.init_weights()
def init_weights(self):
"""Initialize weight of later layer."""
if self.out_project is not None:
if not isinstance(self.out_project, ConvModule):
constant_init(self.out_project, 0)
def build_project(self, in_channels, channels, num_convs, use_conv_module,
conv_cfg, norm_cfg, act_cfg):
"""Build projection layer for key/query/value/out."""
if use_conv_module:
convs = [
ConvModule(
in_channels,
channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
]
for _ in range(num_convs - 1):
convs.append(
ConvModule(
channels,
channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
else:
convs = [nn.Conv2d(in_channels, channels, 1)]
for _ in range(num_convs - 1):
convs.append(nn.Conv2d(channels, channels, 1))
if len(convs) > 1:
convs = nn.Sequential(*convs)
else:
convs = convs[0]
return convs
def forward(self, query_feats, key_feats):
"""Forward function."""
batch_size = query_feats.size(0)
query = self.query_project(query_feats)
if self.query_downsample is not None:
query = self.query_downsample(query)
query = query.reshape(*query.shape[:2], -1)
query = query.permute(0, 2, 1).contiguous()
key = self.key_project(key_feats)
value = self.value_project(key_feats)
if self.key_downsample is not None:
key = self.key_downsample(key)
value = self.key_downsample(value)
key = key.reshape(*key.shape[:2], -1)
value = value.reshape(*value.shape[:2], -1)
value = value.permute(0, 2, 1).contiguous()
sim_map = torch.matmul(query, key)
if self.matmul_norm:
sim_map = (self.channels**-.5) * sim_map
sim_map = F.softmax(sim_map, dim=-1)
context = torch.matmul(sim_map, value)
context = context.permute(0, 2, 1).contiguous()
context = context.reshape(batch_size, -1, *query_feats.shape[2:])
if self.out_project is not None:
context = self.out_project(context)
return context
| 6,125 | 37.2875 | 78 | py |
CP2 | CP2-main/mmseg/models/utils/up_conv_block.py | import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, build_upsample_layer
class UpConvBlock(nn.Module):
"""Upsample convolution block in decoder for UNet.
This upsample convolution block consists of one upsample module
followed by one convolution block. The upsample module expands the
high-level low-resolution feature map and the convolution block fuses
the upsampled high-level low-resolution feature map and the low-level
high-resolution feature map from encoder.
Args:
conv_block (nn.Sequential): Sequential of convolutional layers.
in_channels (int): Number of input channels of the high-level
skip_channels (int): Number of input channels of the low-level
high-resolution feature map from encoder.
out_channels (int): Number of output channels.
num_convs (int): Number of convolutional layers in the conv_block.
Default: 2.
stride (int): Stride of convolutional layer in conv_block. Default: 1.
dilation (int): Dilation rate of convolutional layer in conv_block.
Default: 1.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
conv_cfg (dict | None): Config dict for convolution layer.
Default: None.
norm_cfg (dict | None): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict | None): Config dict for activation layer in ConvModule.
Default: dict(type='ReLU').
upsample_cfg (dict): The upsample config of the upsample module in
decoder. Default: dict(type='InterpConv'). If the size of
high-level feature map is the same as that of skip feature map
(low-level feature map from encoder), it does not need upsample the
high-level feature map and the upsample_cfg is None.
dcn (bool): Use deformable convolution in convolutional layer or not.
Default: None.
plugins (dict): plugins for convolutional layers. Default: None.
"""
def __init__(self,
conv_block,
in_channels,
skip_channels,
out_channels,
num_convs=2,
stride=1,
dilation=1,
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
upsample_cfg=dict(type='InterpConv'),
dcn=None,
plugins=None):
super(UpConvBlock, self).__init__()
assert dcn is None, 'Not implemented yet.'
assert plugins is None, 'Not implemented yet.'
self.conv_block = conv_block(
in_channels=2 * skip_channels,
out_channels=out_channels,
num_convs=num_convs,
stride=stride,
dilation=dilation,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
dcn=None,
plugins=None)
if upsample_cfg is not None:
self.upsample = build_upsample_layer(
cfg=upsample_cfg,
in_channels=in_channels,
out_channels=skip_channels,
with_cp=with_cp,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
else:
self.upsample = ConvModule(
in_channels,
skip_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def forward(self, skip, x):
"""Forward function."""
x = self.upsample(x)
out = torch.cat([skip, x], dim=1)
out = self.conv_block(out)
return out
| 3,968 | 37.911765 | 79 | py |
CP2 | CP2-main/mmseg/models/utils/make_divisible.py | def make_divisible(value, divisor, min_value=None, min_ratio=0.9):
"""Make divisible function.
This function rounds the channel number to the nearest value that can be
divisible by the divisor. It is taken from the original tf repo. It ensures
that all layers have a channel number that is divisible by divisor. It can
be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py # noqa
Args:
value (int): The original channel number.
divisor (int): The divisor to fully divide the channel number.
min_value (int): The minimum value of the output channel.
Default: None, means that the minimum value equal to the divisor.
min_ratio (float): The minimum ratio of the rounded channel number to
the original channel number. Default: 0.9.
Returns:
int: The modified output channel number.
"""
if min_value is None:
min_value = divisor
new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than (1-min_ratio).
if new_value < min_ratio * value:
new_value += divisor
return new_value
| 1,231 | 43 | 116 | py |
CP2 | CP2-main/mmseg/models/utils/inverted_residual.py | from mmcv.cnn import ConvModule
from torch import nn
from torch.utils import checkpoint as cp
from .se_layer import SELayer
class InvertedResidual(nn.Module):
"""InvertedResidual block for MobileNetV2.
Args:
in_channels (int): The input channels of the InvertedResidual block.
out_channels (int): The output channels of the InvertedResidual block.
stride (int): Stride of the middle (first) 3x3 convolution.
expand_ratio (int): Adjusts number of channels of the hidden layer
in InvertedResidual by this amount.
dilation (int): Dilation rate of depthwise conv. Default: 1
conv_cfg (dict): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
stride,
expand_ratio,
dilation=1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
with_cp=False):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.use_res_connect = self.stride == 1 and in_channels == out_channels
hidden_dim = int(round(in_channels * expand_ratio))
layers = []
if expand_ratio != 1:
layers.append(
ConvModule(
in_channels=in_channels,
out_channels=hidden_dim,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
layers.extend([
ConvModule(
in_channels=hidden_dim,
out_channels=hidden_dim,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
groups=hidden_dim,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
in_channels=hidden_dim,
out_channels=out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
def _inner_forward(x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
class InvertedResidualV3(nn.Module):
"""Inverted Residual Block for MobileNetV3.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
mid_channels (int): The input channels of the depthwise convolution.
kernel_size (int): The kernel size of the depthwise convolution.
Default: 3.
stride (int): The stride of the depthwise convolution. Default: 1.
se_cfg (dict): Config dict for se layer. Default: None, which means no
se layer.
with_expand_conv (bool): Use expand conv or not. If set False,
mid_channels must be the same with in_channels. Default: True.
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
kernel_size=3,
stride=1,
se_cfg=None,
with_expand_conv=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
with_cp=False):
super(InvertedResidualV3, self).__init__()
self.with_res_shortcut = (stride == 1 and in_channels == out_channels)
assert stride in [1, 2]
self.with_cp = with_cp
self.with_se = se_cfg is not None
self.with_expand_conv = with_expand_conv
if self.with_se:
assert isinstance(se_cfg, dict)
if not self.with_expand_conv:
assert mid_channels == in_channels
if self.with_expand_conv:
self.expand_conv = ConvModule(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.depthwise_conv = ConvModule(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
groups=mid_channels,
conv_cfg=dict(
type='Conv2dAdaptivePadding') if stride == 2 else conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if self.with_se:
self.se = SELayer(**se_cfg)
self.linear_conv = ConvModule(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
def forward(self, x):
def _inner_forward(x):
out = x
if self.with_expand_conv:
out = self.expand_conv(out)
out = self.depthwise_conv(out)
if self.with_se:
out = self.se(out)
out = self.linear_conv(out)
if self.with_res_shortcut:
return x + out
else:
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
| 7,005 | 32.521531 | 79 | py |
CP2 | CP2-main/mmseg/models/utils/__init__.py | from .drop import DropPath
from .inverted_residual import InvertedResidual, InvertedResidualV3
from .make_divisible import make_divisible
from .res_layer import ResLayer
from .se_layer import SELayer
from .self_attention_block import SelfAttentionBlock
from .up_conv_block import UpConvBlock
from .weight_init import trunc_normal_
__all__ = [
'ResLayer', 'SelfAttentionBlock', 'make_divisible', 'InvertedResidual',
'UpConvBlock', 'InvertedResidualV3', 'SELayer', 'DropPath', 'trunc_normal_'
]
| 502 | 34.928571 | 79 | py |
CP2 | CP2-main/mmseg/models/utils/drop.py | """Modified from https://github.com/rwightman/pytorch-image-
models/blob/master/timm/models/layers/drop.py."""
import torch
from torch import nn
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of
residual blocks).
Args:
drop_prob (float): Drop rate for paths of model. Dropout rate has
to be between 0 and 1. Default: 0.
"""
def __init__(self, drop_prob=0.):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
self.keep_prob = 1 - drop_prob
def forward(self, x):
if self.drop_prob == 0. or not self.training:
return x
shape = (x.shape[0], ) + (1, ) * (
x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = self.keep_prob + torch.rand(
shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(self.keep_prob) * random_tensor
return output
| 1,015 | 30.75 | 78 | py |
CP2 | CP2-main/mmseg/models/segmentors/base.py | import logging
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import mmcv
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from mmcv.runner import auto_fp16
class BaseSegmentor(nn.Module):
"""Base class for segmentors."""
__metaclass__ = ABCMeta
def __init__(self):
super(BaseSegmentor, self).__init__()
self.fp16_enabled = False
@property
def with_neck(self):
"""bool: whether the segmentor has neck"""
return hasattr(self, 'neck') and self.neck is not None
@property
def with_auxiliary_head(self):
"""bool: whether the segmentor has auxiliary head"""
return hasattr(self,
'auxiliary_head') and self.auxiliary_head is not None
@property
def with_decode_head(self):
"""bool: whether the segmentor has decode head"""
return hasattr(self, 'decode_head') and self.decode_head is not None
@abstractmethod
def extract_feat(self, imgs):
"""Placeholder for extract features from images."""
pass
@abstractmethod
def encode_decode(self, img, img_metas):
"""Placeholder for encode images with backbone and decode into a
semantic segmentation map of the same size as input."""
pass
@abstractmethod
def forward_train(self, imgs, img_metas, **kwargs):
"""Placeholder for Forward function for training."""
pass
@abstractmethod
def simple_test(self, img, img_meta, **kwargs):
"""Placeholder for single image test."""
pass
@abstractmethod
def aug_test(self, imgs, img_metas, **kwargs):
"""Placeholder for augmentation test."""
pass
def init_weights(self, pretrained=None):
"""Initialize the weights in segmentor.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if pretrained is not None:
logger = logging.getLogger()
logger.info(f'load model from: {pretrained}')
def forward_test(self, imgs, img_metas, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch.
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got '
f'{type(var)}')
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(imgs)}) != '
f'num of image meta ({len(img_metas)})')
# all images in the same aug batch all of the same ori_shape and pad
# shape
for img_meta in img_metas:
ori_shapes = [_['ori_shape'] for _ in img_meta]
assert all(shape == ori_shapes[0] for shape in ori_shapes)
img_shapes = [_['img_shape'] for _ in img_meta]
assert all(shape == img_shapes[0] for shape in img_shapes)
pad_shapes = [_['pad_shape'] for _ in img_meta]
assert all(shape == pad_shapes[0] for shape in pad_shapes)
if num_augs == 1:
return self.simple_test(imgs[0], img_metas[0], **kwargs)
else:
return self.aug_test(imgs, img_metas, **kwargs)
@auto_fp16(apply_to=('img', ))
def forward(self, img, img_metas, return_loss=True, **kwargs):
"""Calls either :func:`forward_train` or :func:`forward_test` depending
on whether ``return_loss`` is ``True``.
Note this setting will change the expected inputs. When
``return_loss=True``, img and img_meta are single-nested (i.e. Tensor
and List[dict]), and when ``resturn_loss=False``, img and img_meta
should be double nested (i.e. List[Tensor], List[List[dict]]), with
the outer list indicating test time augmentations.
"""
if return_loss:
return self.forward_train(img, img_metas, **kwargs)
else:
return self.forward_test(img, img_metas, **kwargs)
def train_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
``num_samples``.
``loss`` is a tensor for back propagation, which can be a
weighted sum of multiple losses.
``log_vars`` contains all the variables to be sent to the
logger.
``num_samples`` indicates the batch size (when the model is
DDP, it means the batch size on each GPU), which is used for
averaging the logs.
"""
losses = self(**data_batch)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(data_batch['img_metas']))
return outputs
def val_step(self, data_batch, **kwargs):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
output = self(**data_batch, **kwargs)
return output
@staticmethod
def _parse_losses(losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary information.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor
which may be a weighted sum of all losses, log_vars contains
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def show_result(self,
img,
result,
palette=None,
win_name='',
show=False,
wait_time=0,
out_file=None,
opacity=0.5):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (Tensor): The semantic segmentation results to draw over
`img`.
palette (list[list[int]]] | np.ndarray | None): The palette of
segmentation map. If None is given, random palette will be
generated. Default: None
win_name (str): The window name.
wait_time (int): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
opacity(float): Opacity of painted segmentation map.
Default 0.5.
Must be in (0, 1] range.
Returns:
img (Tensor): Only if not `show` or `out_file`
"""
img = mmcv.imread(img)
img = img.copy()
seg = result[0]
if palette is None:
if self.PALETTE is None:
palette = np.random.randint(
0, 255, size=(len(self.CLASSES), 3))
else:
palette = self.PALETTE
palette = np.array(palette)
assert palette.shape[0] == len(self.CLASSES)
assert palette.shape[1] == 3
assert len(palette.shape) == 2
assert 0 < opacity <= 1.0
color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
for label, color in enumerate(palette):
color_seg[seg == label, :] = color
# convert to BGR
color_seg = color_seg[..., ::-1]
img = img * (1 - opacity) + color_seg * opacity
img = img.astype(np.uint8)
# if out_file specified, do not show image in window
if out_file is not None:
show = False
if show:
mmcv.imshow(img, win_name, wait_time)
if out_file is not None:
mmcv.imwrite(img, out_file)
if not (show or out_file):
warnings.warn('show==False and out_file is not specified, only '
'result image will be returned')
return img
| 10,350 | 36.777372 | 79 | py |
CP2 | CP2-main/mmseg/models/segmentors/encoder_decoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmseg.core import add_prefix
from mmseg.ops import resize
from .. import builder
from ..builder import SEGMENTORS
from .base import BaseSegmentor
@SEGMENTORS.register_module()
class EncoderDecoder(BaseSegmentor):
"""Encoder Decoder segmentors.
EncoderDecoder typically consists of backbone, decode_head, auxiliary_head.
Note that auxiliary_head is only used for deep supervision during training,
which could be dumped during inference.
"""
def __init__(self,
backbone,
decode_head,
neck=None,
auxiliary_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(EncoderDecoder, self).__init__()
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
self._init_decode_head(decode_head)
self._init_auxiliary_head(auxiliary_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
assert self.with_decode_head
def _init_decode_head(self, decode_head):
"""Initialize ``decode_head``"""
self.decode_head = builder.build_head(decode_head)
self.align_corners = self.decode_head.align_corners
self.num_classes = self.decode_head.num_classes
def _init_auxiliary_head(self, auxiliary_head):
"""Initialize ``auxiliary_head``"""
if auxiliary_head is not None:
if isinstance(auxiliary_head, list):
self.auxiliary_head = nn.ModuleList()
for head_cfg in auxiliary_head:
self.auxiliary_head.append(builder.build_head(head_cfg))
else:
self.auxiliary_head = builder.build_head(auxiliary_head)
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone and heads.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
super(EncoderDecoder, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
# self.decode_head.init_weights()
self.decode_head.init_weights(pretrained=pretrained)
if self.with_auxiliary_head:
if isinstance(self.auxiliary_head, nn.ModuleList):
for aux_head in self.auxiliary_head:
aux_head.init_weights()
else:
self.auxiliary_head.init_weights()
def extract_feat(self, img):
"""Extract features from images."""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def encode_decode(self, img, img_metas):
"""Encode images with backbone and decode into a semantic segmentation
map of the same size as input."""
x = self.extract_feat(img)
out = self._decode_head_forward_test(x, img_metas)
out = resize(
input=out,
size=img.shape[2:],
mode='bilinear',
align_corners=self.align_corners)
return out
def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg):
"""Run forward function and calculate loss for decode head in
training."""
losses = dict()
loss_decode = self.decode_head.forward_train(x, img_metas,
gt_semantic_seg,
self.train_cfg)
losses.update(add_prefix(loss_decode, 'decode'))
return losses
def _decode_head_forward_test(self, x, img_metas):
"""Run forward function and calculate loss for decode head in
inference."""
seg_logits = self.decode_head.forward_test(x, img_metas, self.test_cfg)
return seg_logits
def _auxiliary_head_forward_train(self, x, img_metas, gt_semantic_seg):
"""Run forward function and calculate loss for auxiliary head in
training."""
losses = dict()
if isinstance(self.auxiliary_head, nn.ModuleList):
for idx, aux_head in enumerate(self.auxiliary_head):
loss_aux = aux_head.forward_train(x, img_metas,
gt_semantic_seg,
self.train_cfg)
losses.update(add_prefix(loss_aux, f'aux_{idx}'))
else:
loss_aux = self.auxiliary_head.forward_train(
x, img_metas, gt_semantic_seg, self.train_cfg)
losses.update(add_prefix(loss_aux, 'aux'))
return losses
def forward_dummy(self, img):
"""Dummy forward function."""
seg_logit = self.encode_decode(img, None)
return seg_logit
def forward(self, img, img_metas=None, return_loss=True, **kwargs):
if img_metas is None:
x = self.extract_feat(img)
return self.decode_head.forward(x)
else:
if return_loss:
return self.forward_train(img, img_metas, **kwargs)
else:
return self.forward_test(img, img_metas, **kwargs)
def forward_train(self, img, img_metas, gt_semantic_seg):
"""Forward function for training.
Args:
img (Tensor): Input images.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
gt_semantic_seg (Tensor): Semantic segmentation masks
used if the architecture supports semantic segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
x = self.extract_feat(img)
losses = dict()
loss_decode = self._decode_head_forward_train(x, img_metas,
gt_semantic_seg)
losses.update(loss_decode)
if self.with_auxiliary_head:
loss_aux = self._auxiliary_head_forward_train(
x, img_metas, gt_semantic_seg)
losses.update(loss_aux)
return losses
# TODO refactor
def slide_inference(self, img, img_meta, rescale):
"""Inference by sliding-window with overlap.
If h_crop > h_img or w_crop > w_img, the small patch will be used to
decode without padding.
"""
h_stride, w_stride = self.test_cfg.stride
h_crop, w_crop = self.test_cfg.crop_size
batch_size, _, h_img, w_img = img.size()
num_classes = self.num_classes
h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1
w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1
preds = img.new_zeros((batch_size, num_classes, h_img, w_img))
count_mat = img.new_zeros((batch_size, 1, h_img, w_img))
for h_idx in range(h_grids):
for w_idx in range(w_grids):
y1 = h_idx * h_stride
x1 = w_idx * w_stride
y2 = min(y1 + h_crop, h_img)
x2 = min(x1 + w_crop, w_img)
y1 = max(y2 - h_crop, 0)
x1 = max(x2 - w_crop, 0)
crop_img = img[:, :, y1:y2, x1:x2]
crop_seg_logit = self.encode_decode(crop_img, img_meta)
preds += F.pad(crop_seg_logit,
(int(x1), int(preds.shape[3] - x2), int(y1),
int(preds.shape[2] - y2)))
count_mat[:, :, y1:y2, x1:x2] += 1
assert (count_mat == 0).sum() == 0
if torch.onnx.is_in_onnx_export():
# cast count_mat to constant while exporting to ONNX
count_mat = torch.from_numpy(
count_mat.cpu().detach().numpy()).to(device=img.device)
preds = preds / count_mat
if rescale:
preds = resize(
preds,
size=img_meta[0]['ori_shape'][:2],
mode='bilinear',
align_corners=self.align_corners,
warning=False)
return preds
def whole_inference(self, img, img_meta, rescale):
"""Inference with full image."""
seg_logit = self.encode_decode(img, img_meta)
if rescale:
# support dynamic shape for onnx
if torch.onnx.is_in_onnx_export():
size = img.shape[2:]
else:
size = img_meta[0]['ori_shape'][:2]
seg_logit = resize(
seg_logit,
size=size,
mode='bilinear',
align_corners=self.align_corners,
warning=False)
return seg_logit
def inference(self, img, img_meta, rescale):
"""Inference with slide/whole style.
Args:
img (Tensor): The input image of shape (N, 3, H, W).
img_meta (dict): Image info dict where each dict has: 'img_shape',
'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmseg/datasets/pipelines/formatting.py:Collect`.
rescale (bool): Whether rescale back to original shape.
Returns:
Tensor: The output segmentation map.
"""
assert self.test_cfg.mode in ['slide', 'whole']
ori_shape = img_meta[0]['ori_shape']
assert all(_['ori_shape'] == ori_shape for _ in img_meta)
if self.test_cfg.mode == 'slide':
seg_logit = self.slide_inference(img, img_meta, rescale)
else:
seg_logit = self.whole_inference(img, img_meta, rescale)
output = F.softmax(seg_logit, dim=1)
flip = img_meta[0]['flip']
if flip:
flip_direction = img_meta[0]['flip_direction']
assert flip_direction in ['horizontal', 'vertical']
if flip_direction == 'horizontal':
output = output.flip(dims=(3, ))
elif flip_direction == 'vertical':
output = output.flip(dims=(2, ))
return output
def simple_test(self, img, img_meta, rescale=True):
"""Simple test with single image."""
seg_logit = self.inference(img, img_meta, rescale)
seg_pred = seg_logit.argmax(dim=1)
if torch.onnx.is_in_onnx_export():
# our inference backend only support 4D output
seg_pred = seg_pred.unsqueeze(0)
return seg_pred
seg_pred = seg_pred.cpu().numpy()
# unravel batch dim
seg_pred = list(seg_pred)
return seg_pred
def aug_test(self, imgs, img_metas, rescale=True):
"""Test with augmentations.
Only rescale=True is supported.
"""
# aug_test rescale all imgs back to ori_shape for now
assert rescale
# to save memory, we get augmented seg logit inplace
seg_logit = self.inference(imgs[0], img_metas[0], rescale)
for i in range(1, len(imgs)):
cur_seg_logit = self.inference(imgs[i], img_metas[i], rescale)
seg_logit += cur_seg_logit
seg_logit /= len(imgs)
seg_pred = seg_logit.argmax(dim=1)
seg_pred = seg_pred.cpu().numpy()
# unravel batch dim
seg_pred = list(seg_pred)
return seg_pred
| 11,752 | 36.790997 | 79 | py |
CP2 | CP2-main/mmseg/models/segmentors/__init__.py | from .base import BaseSegmentor
from .encoder_decoder import EncoderDecoder
__all__ = ['BaseSegmentor', 'EncoderDecoder']
| 123 | 23.8 | 45 | py |
CP2 | CP2-main/mmseg/models/losses/dice_loss.py | """Modified from https://github.com/LikeLy-Journey/SegmenTron/blob/master/
segmentron/solver/loss.py (Apache-2.0 License)"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import get_class_weight, weighted_loss
@weighted_loss
def dice_loss(pred,
target,
valid_mask,
smooth=1,
exponent=2,
class_weight=None,
ignore_index=255):
assert pred.shape[0] == target.shape[0]
total_loss = 0
num_classes = pred.shape[1]
for i in range(num_classes):
if i != ignore_index:
dice_loss = binary_dice_loss(
pred[:, i],
target[..., i],
valid_mask=valid_mask,
smooth=smooth,
exponent=exponent)
if class_weight is not None:
dice_loss *= class_weight[i]
total_loss += dice_loss
return total_loss / num_classes
@weighted_loss
def binary_dice_loss(pred, target, valid_mask, smooth=1, exponent=2, **kwards):
assert pred.shape[0] == target.shape[0]
pred = pred.reshape(pred.shape[0], -1)
target = target.reshape(target.shape[0], -1)
valid_mask = valid_mask.reshape(valid_mask.shape[0], -1)
num = torch.sum(torch.mul(pred, target) * valid_mask, dim=1) * 2 + smooth
den = torch.sum(pred.pow(exponent) + target.pow(exponent), dim=1) + smooth
return 1 - num / den
@LOSSES.register_module()
class DiceLoss(nn.Module):
"""DiceLoss.
This loss is proposed in `V-Net: Fully Convolutional Neural Networks for
Volumetric Medical Image Segmentation <https://arxiv.org/abs/1606.04797>`_.
Args:
loss_type (str, optional): Binary or multi-class loss.
Default: 'multi_class'. Options are "binary" and "multi_class".
smooth (float): A float number to smooth loss, and avoid NaN error.
Default: 1
exponent (float): An float number to calculate denominator
value: \\sum{x^exponent} + \\sum{y^exponent}. Default: 2.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
class_weight (list[float] | str, optional): Weight of each class. If in
str format, read them from a file. Defaults to None.
loss_weight (float, optional): Weight of the loss. Default to 1.0.
ignore_index (int | None): The label index to be ignored. Default: 255.
"""
def __init__(self,
smooth=1,
exponent=2,
reduction='mean',
class_weight=None,
loss_weight=1.0,
ignore_index=255,
**kwards):
super(DiceLoss, self).__init__()
self.smooth = smooth
self.exponent = exponent
self.reduction = reduction
self.class_weight = get_class_weight(class_weight)
self.loss_weight = loss_weight
self.ignore_index = ignore_index
def forward(self,
pred,
target,
avg_factor=None,
reduction_override=None,
**kwards):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = pred.new_tensor(self.class_weight)
else:
class_weight = None
pred = F.softmax(pred, dim=1)
num_classes = pred.shape[1]
one_hot_target = F.one_hot(
torch.clamp(target.long(), 0, num_classes - 1),
num_classes=num_classes)
valid_mask = (target != self.ignore_index).long()
loss = self.loss_weight * dice_loss(
pred,
one_hot_target,
valid_mask=valid_mask,
reduction=reduction,
avg_factor=avg_factor,
smooth=self.smooth,
exponent=self.exponent,
class_weight=class_weight,
ignore_index=self.ignore_index)
return loss
| 4,239 | 34.333333 | 79 | py |
CP2 | CP2-main/mmseg/models/losses/lovasz_loss.py | """Modified from https://github.com/bermanmaxim/LovaszSoftmax/blob/master/pytor
ch/lovasz_losses.py Lovasz-Softmax and Jaccard hinge loss in PyTorch Maxim
Berman 2018 ESAT-PSI KU Leuven (MIT License)"""
import mmcv
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import get_class_weight, weight_reduce_loss
def lovasz_grad(gt_sorted):
"""Computes gradient of the Lovasz extension w.r.t sorted errors.
See Alg. 1 in paper.
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def flatten_binary_logits(logits, labels, ignore_index=None):
"""Flattens predictions in the batch (binary case) Remove labels equal to
'ignore_index'."""
logits = logits.view(-1)
labels = labels.view(-1)
if ignore_index is None:
return logits, labels
valid = (labels != ignore_index)
vlogits = logits[valid]
vlabels = labels[valid]
return vlogits, vlabels
def flatten_probs(probs, labels, ignore_index=None):
"""Flattens predictions in the batch."""
if probs.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probs.size()
probs = probs.view(B, 1, H, W)
B, C, H, W = probs.size()
probs = probs.permute(0, 2, 3, 1).contiguous().view(-1, C) # B*H*W, C=P,C
labels = labels.view(-1)
if ignore_index is None:
return probs, labels
valid = (labels != ignore_index)
vprobs = probs[valid.nonzero().squeeze()]
vlabels = labels[valid]
return vprobs, vlabels
def lovasz_hinge_flat(logits, labels):
"""Binary Lovasz hinge loss.
Args:
logits (torch.Tensor): [P], logits at each prediction
(between -infty and +infty).
labels (torch.Tensor): [P], binary ground truth labels (0 or 1).
Returns:
torch.Tensor: The calculated loss.
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.
signs = 2. * labels.float() - 1.
errors = (1. - logits * signs)
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), grad)
return loss
def lovasz_hinge(logits,
labels,
classes='present',
per_image=False,
class_weight=None,
reduction='mean',
avg_factor=None,
ignore_index=255):
"""Binary Lovasz hinge loss.
Args:
logits (torch.Tensor): [B, H, W], logits at each pixel
(between -infty and +infty).
labels (torch.Tensor): [B, H, W], binary ground truth masks (0 or 1).
classes (str | list[int], optional): Placeholder, to be consistent with
other loss. Default: None.
per_image (bool, optional): If per_image is True, compute the loss per
image instead of per batch. Default: False.
class_weight (list[float], optional): Placeholder, to be consistent
with other loss. Default: None.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. This parameter only works when per_image is True.
Default: None.
ignore_index (int | None): The label index to be ignored. Default: 255.
Returns:
torch.Tensor: The calculated loss.
"""
if per_image:
loss = [
lovasz_hinge_flat(*flatten_binary_logits(
logit.unsqueeze(0), label.unsqueeze(0), ignore_index))
for logit, label in zip(logits, labels)
]
loss = weight_reduce_loss(
torch.stack(loss), None, reduction, avg_factor)
else:
loss = lovasz_hinge_flat(
*flatten_binary_logits(logits, labels, ignore_index))
return loss
def lovasz_softmax_flat(probs, labels, classes='present', class_weight=None):
"""Multi-class Lovasz-Softmax loss.
Args:
probs (torch.Tensor): [P, C], class probabilities at each prediction
(between 0 and 1).
labels (torch.Tensor): [P], ground truth labels (between 0 and C - 1).
classes (str | list[int], optional): Classes chosen to calculate loss.
'all' for all classes, 'present' for classes present in labels, or
a list of classes to average. Default: 'present'.
class_weight (list[float], optional): The weight for each class.
Default: None.
Returns:
torch.Tensor: The calculated loss.
"""
if probs.numel() == 0:
# only void pixels, the gradients should be 0
return probs * 0.
C = probs.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
for c in class_to_sum:
fg = (labels == c).float() # foreground for class c
if (classes == 'present' and fg.sum() == 0):
continue
if C == 1:
if len(classes) > 1:
raise ValueError('Sigmoid output possible only with 1 class')
class_pred = probs[:, 0]
else:
class_pred = probs[:, c]
errors = (fg - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
loss = torch.dot(errors_sorted, lovasz_grad(fg_sorted))
if class_weight is not None:
loss *= class_weight[c]
losses.append(loss)
return torch.stack(losses).mean()
def lovasz_softmax(probs,
labels,
classes='present',
per_image=False,
class_weight=None,
reduction='mean',
avg_factor=None,
ignore_index=255):
"""Multi-class Lovasz-Softmax loss.
Args:
probs (torch.Tensor): [B, C, H, W], class probabilities at each
prediction (between 0 and 1).
labels (torch.Tensor): [B, H, W], ground truth labels (between 0 and
C - 1).
classes (str | list[int], optional): Classes chosen to calculate loss.
'all' for all classes, 'present' for classes present in labels, or
a list of classes to average. Default: 'present'.
per_image (bool, optional): If per_image is True, compute the loss per
image instead of per batch. Default: False.
class_weight (list[float], optional): The weight for each class.
Default: None.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. This parameter only works when per_image is True.
Default: None.
ignore_index (int | None): The label index to be ignored. Default: 255.
Returns:
torch.Tensor: The calculated loss.
"""
if per_image:
loss = [
lovasz_softmax_flat(
*flatten_probs(
prob.unsqueeze(0), label.unsqueeze(0), ignore_index),
classes=classes,
class_weight=class_weight)
for prob, label in zip(probs, labels)
]
loss = weight_reduce_loss(
torch.stack(loss), None, reduction, avg_factor)
else:
loss = lovasz_softmax_flat(
*flatten_probs(probs, labels, ignore_index),
classes=classes,
class_weight=class_weight)
return loss
@LOSSES.register_module()
class LovaszLoss(nn.Module):
"""LovaszLoss.
This loss is proposed in `The Lovasz-Softmax loss: A tractable surrogate
for the optimization of the intersection-over-union measure in neural
networks <https://arxiv.org/abs/1705.08790>`_.
Args:
loss_type (str, optional): Binary or multi-class loss.
Default: 'multi_class'. Options are "binary" and "multi_class".
classes (str | list[int], optional): Classes chosen to calculate loss.
'all' for all classes, 'present' for classes present in labels, or
a list of classes to average. Default: 'present'.
per_image (bool, optional): If per_image is True, compute the loss per
image instead of per batch. Default: False.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
class_weight (list[float] | str, optional): Weight of each class. If in
str format, read them from a file. Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
"""
def __init__(self,
loss_type='multi_class',
classes='present',
per_image=False,
reduction='mean',
class_weight=None,
loss_weight=1.0):
super(LovaszLoss, self).__init__()
assert loss_type in ('binary', 'multi_class'), "loss_type should be \
'binary' or 'multi_class'."
if loss_type == 'binary':
self.cls_criterion = lovasz_hinge
else:
self.cls_criterion = lovasz_softmax
assert classes in ('all', 'present') or mmcv.is_list_of(classes, int)
if not per_image:
assert reduction == 'none', "reduction should be 'none' when \
per_image is False."
self.classes = classes
self.per_image = per_image
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = get_class_weight(class_weight)
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function."""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = cls_score.new_tensor(self.class_weight)
else:
class_weight = None
# if multi-class loss, transform logits to probs
if self.cls_criterion == lovasz_softmax:
cls_score = F.softmax(cls_score, dim=1)
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
self.classes,
self.per_image,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_cls
| 11,391 | 36.473684 | 79 | py |
CP2 | CP2-main/mmseg/models/losses/utils.py | import functools
import mmcv
import numpy as np
import torch.nn.functional as F
def get_class_weight(class_weight):
"""Get class weight for loss function.
Args:
class_weight (list[float] | str | None): If class_weight is a str,
take it as a file name and read from it.
"""
if isinstance(class_weight, str):
# take it as a file path
if class_weight.endswith('.npy'):
class_weight = np.load(class_weight)
else:
# pkl, json or yaml
class_weight = mmcv.load(class_weight)
return class_weight
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
assert weight.dim() == loss.dim()
if weight.dim() > 1:
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
| 3,690 | 29.254098 | 79 | py |
CP2 | CP2-main/mmseg/models/losses/accuracy.py | import torch.nn as nn
def accuracy(pred, target, topk=1, thresh=None):
"""Calculate accuracy according to the prediction and target.
Args:
pred (torch.Tensor): The model prediction, shape (N, num_class, ...)
target (torch.Tensor): The target of each prediction, shape (N, , ...)
topk (int | tuple[int], optional): If the predictions in ``topk``
matches the target, the predictions will be regarded as
correct ones. Defaults to 1.
thresh (float, optional): If not None, predictions with scores under
this threshold are considered incorrect. Default to None.
Returns:
float | tuple[float]: If the input ``topk`` is a single integer,
the function will return a single float as accuracy. If
``topk`` is a tuple containing multiple integers, the
function will return a tuple containing accuracies of
each ``topk`` number.
"""
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
if pred.size(0) == 0:
accu = [pred.new_tensor(0.) for i in range(len(topk))]
return accu[0] if return_single else accu
assert pred.ndim == target.ndim + 1
assert pred.size(0) == target.size(0)
assert maxk <= pred.size(1), \
f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
pred_value, pred_label = pred.topk(maxk, dim=1)
# transpose to shape (maxk, N, ...)
pred_label = pred_label.transpose(0, 1)
correct = pred_label.eq(target.unsqueeze(0).expand_as(pred_label))
if thresh is not None:
# Only prediction values larger than thresh are counted as correct
correct = correct & (pred_value > thresh).t()
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / target.numel()))
return res[0] if return_single else res
class Accuracy(nn.Module):
"""Accuracy calculation module."""
def __init__(self, topk=(1, ), thresh=None):
"""Module to calculate the accuracy.
Args:
topk (tuple, optional): The criterion used to calculate the
accuracy. Defaults to (1,).
thresh (float, optional): If not None, predictions with scores
under this threshold are considered incorrect. Default to None.
"""
super().__init__()
self.topk = topk
self.thresh = thresh
def forward(self, pred, target):
"""Forward function to calculate accuracy.
Args:
pred (torch.Tensor): Prediction of models.
target (torch.Tensor): Target for each prediction.
Returns:
tuple[float]: The accuracies under different topk criterions.
"""
return accuracy(pred, target, self.topk, self.thresh)
| 2,970 | 36.607595 | 79 | py |
CP2 | CP2-main/mmseg/models/losses/cross_entropy_loss.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import get_class_weight, weight_reduce_loss
def cross_entropy(pred,
label,
weight=None,
class_weight=None,
reduction='mean',
avg_factor=None,
ignore_index=-100):
"""The wrapper function for :func:`F.cross_entropy`"""
# class_weight is a manual rescaling weight given to each class.
# If given, has to be a Tensor of size C element-wise losses
loss = F.cross_entropy(
pred,
label,
weight=class_weight,
reduction='none',
ignore_index=ignore_index)
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_onehot_labels(labels, label_weights, target_shape, ignore_index):
"""Expand onehot labels to match the size of prediction."""
bin_labels = labels.new_zeros(target_shape)
valid_mask = (labels >= 0) & (labels != ignore_index)
inds = torch.nonzero(valid_mask, as_tuple=True)
if inds[0].numel() > 0:
if labels.dim() == 3:
bin_labels[inds[0], labels[valid_mask], inds[1], inds[2]] = 1
else:
bin_labels[inds[0], labels[valid_mask]] = 1
valid_mask = valid_mask.unsqueeze(1).expand(target_shape).float()
if label_weights is None:
bin_label_weights = valid_mask
else:
bin_label_weights = label_weights.unsqueeze(1).expand(target_shape)
bin_label_weights *= valid_mask
return bin_labels, bin_label_weights
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=255):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1).
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (int | None): The label index to be ignored. Default: 255
Returns:
torch.Tensor: The calculated loss
"""
if pred.dim() != label.dim():
assert (pred.dim() == 2 and label.dim() == 1) or (
pred.dim() == 4 and label.dim() == 3), \
'Only pred shape [N, C], label shape [N] or pred shape [N, C, ' \
'H, W], label shape [N, H, W] are supported'
label, weight = _expand_onehot_labels(label, weight, pred.shape,
ignore_index)
# weighted element-wise losses
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), pos_weight=class_weight, reduction='none')
# do the reduction for the weighted loss
loss = weight_reduce_loss(
loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss
def mask_cross_entropy(pred,
target,
label,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=None):
"""Calculate the CrossEntropy loss for masks.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction.
label (torch.Tensor): ``label`` indicates the class label of the mask'
corresponding object. This will be used to select the mask in the
of the class which the object belongs to when the mask prediction
if not class-agnostic.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (None): Placeholder, to be consistent with other loss.
Default: None.
Returns:
torch.Tensor: The calculated loss
"""
assert ignore_index is None, 'BCE loss does not support ignore_index'
# TODO: handle these two reserved arguments
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, weight=class_weight, reduction='mean')[None]
@LOSSES.register_module()
class CrossEntropyLoss(nn.Module):
"""CrossEntropyLoss.
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float] | str, optional): Weight of each class. If in
str format, read them from a file. Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
"""
def __init__(self,
use_sigmoid=False,
use_mask=False,
reduction='mean',
class_weight=None,
loss_weight=1.0):
super(CrossEntropyLoss, self).__init__()
assert (use_sigmoid is False) or (use_mask is False)
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = get_class_weight(class_weight)
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = cross_entropy
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function."""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = cls_score.new_tensor(self.class_weight)
else:
class_weight = None
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
weight,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_cls
| 7,437 | 36.376884 | 79 | py |
CP2 | CP2-main/mmseg/models/losses/__init__.py | from .accuracy import Accuracy, accuracy
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .dice_loss import DiceLoss
from .lovasz_loss import LovaszLoss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'reduce_loss',
'weight_reduce_loss', 'weighted_loss', 'LovaszLoss', 'DiceLoss'
]
| 529 | 39.769231 | 72 | py |
CP2 | CP2-main/mmseg/models/backbones/resnet.py | import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (build_conv_layer, build_norm_layer, build_plugin_layer,
constant_init, kaiming_init)
from mmcv.runner import load_checkpoint
from mmcv.utils.parrots_wrapper import _BatchNorm
from mmseg.utils import get_root_logger
from ..builder import BACKBONES
from ..utils import ResLayer
class BasicBlock(nn.Module):
"""Basic block for ResNet."""
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None):
super(BasicBlock, self).__init__()
assert dcn is None, 'Not implemented yet.'
assert plugins is None, 'Not implemented yet.'
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg, planes, planes, 3, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""Bottleneck block for ResNet.
If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is
"caffe", the stride-two layer is the first 1x1 conv layer.
"""
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None):
super(Bottleneck, self).__init__()
assert style in ['pytorch', 'caffe']
assert dcn is None or isinstance(dcn, dict)
assert plugins is None or isinstance(plugins, list)
if plugins is not None:
allowed_position = ['after_conv1', 'after_conv2', 'after_conv3']
assert all(p['position'] in allowed_position for p in plugins)
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.dcn = dcn
self.with_dcn = dcn is not None
self.plugins = plugins
self.with_plugins = plugins is not None
if self.with_plugins:
# collect plugins for conv1/conv2/conv3
self.after_conv1_plugins = [
plugin['cfg'] for plugin in plugins
if plugin['position'] == 'after_conv1'
]
self.after_conv2_plugins = [
plugin['cfg'] for plugin in plugins
if plugin['position'] == 'after_conv2'
]
self.after_conv3_plugins = [
plugin['cfg'] for plugin in plugins
if plugin['position'] == 'after_conv3'
]
if self.style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
norm_cfg, planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = dcn.pop('fallback_on_stride', False)
if not self.with_dcn or fallback_on_stride:
self.conv2 = build_conv_layer(
conv_cfg,
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
else:
assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(
dcn,
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
conv_cfg,
planes,
planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
if self.with_plugins:
self.after_conv1_plugin_names = self.make_block_plugins(
planes, self.after_conv1_plugins)
self.after_conv2_plugin_names = self.make_block_plugins(
planes, self.after_conv2_plugins)
self.after_conv3_plugin_names = self.make_block_plugins(
planes * self.expansion, self.after_conv3_plugins)
def make_block_plugins(self, in_channels, plugins):
"""make plugins for block.
Args:
in_channels (int): Input channels of plugin.
plugins (list[dict]): List of plugins cfg to build.
Returns:
list[str]: List of the names of plugin.
"""
assert isinstance(plugins, list)
plugin_names = []
for plugin in plugins:
plugin = plugin.copy()
name, layer = build_plugin_layer(
plugin,
in_channels=in_channels,
postfix=plugin.pop('postfix', ''))
assert not hasattr(self, name), f'duplicate plugin {name}'
self.add_module(name, layer)
plugin_names.append(name)
return plugin_names
def forward_plugin(self, x, plugin_names):
"""Forward function for plugins."""
out = x
for name in plugin_names:
out = getattr(self, name)(x)
return out
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name)
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name)
@property
def norm3(self):
"""nn.Module: normalization layer after the third convolution layer"""
return getattr(self, self.norm3_name)
def forward(self, x):
"""Forward function."""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
@BACKBONES.register_module()
class ResNet(nn.Module):
"""ResNet backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
in_channels (int): Number of input image channels. Default" 3.
stem_channels (int): Number of stem channels. Default: 64.
base_channels (int): Number of base channels of res layer. Default: 64.
num_stages (int): Resnet stages, normally 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
norm_cfg (dict): Dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- position (str, required): Position inside block to insert plugin,
options: 'after_conv1', 'after_conv2', 'after_conv3'.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'
multi_grid (Sequence[int]|None): Multi grid dilation rates of last
stage. Default: None
contract_dilation (bool): Whether contract first dilation of each layer
Default: False
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity.
Example:
>>> from mmseg.models import ResNet
>>> import torch
>>> self = ResNet(depth=18)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 64, 8, 8)
(1, 128, 4, 4)
(1, 256, 2, 2)
(1, 512, 1, 1)
"""
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
in_channels=3,
stem_channels=64,
base_channels=64,
num_stages=4,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3),
style='pytorch',
deep_stem=False,
avg_down=False,
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=False,
dcn=None,
stage_with_dcn=(False, False, False, False),
plugins=None,
multi_grid=None,
contract_dilation=False,
with_cp=False,
zero_init_residual=True):
super(ResNet, self).__init__()
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
self.depth = depth
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert num_stages >= 1 and num_stages <= 4
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.dcn = dcn
self.stage_with_dcn = stage_with_dcn
if dcn is not None:
assert len(stage_with_dcn) == num_stages
self.plugins = plugins
self.multi_grid = multi_grid
self.contract_dilation = contract_dilation
self.zero_init_residual = zero_init_residual
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = stem_channels
self._make_stem_layer(in_channels, stem_channels)
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
dcn = self.dcn if self.stage_with_dcn[i] else None
if plugins is not None:
stage_plugins = self.make_stage_plugins(plugins, i)
else:
stage_plugins = None
# multi grid is applied to last layer only
stage_multi_grid = multi_grid if i == len(
self.stage_blocks) - 1 else None
planes = base_channels * 2**i
res_layer = self.make_res_layer(
block=self.block,
inplanes=self.inplanes,
planes=planes,
num_blocks=num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
avg_down=self.avg_down,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
plugins=stage_plugins,
multi_grid=stage_multi_grid,
contract_dilation=contract_dilation)
self.inplanes = planes * self.block.expansion
layer_name = f'layer{i+1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = self.block.expansion * base_channels * 2**(
len(self.stage_blocks) - 1)
def make_stage_plugins(self, plugins, stage_idx):
"""make plugins for ResNet 'stage_idx'th stage .
Currently we support to insert 'context_block',
'empirical_attention_block', 'nonlocal_block' into the backbone like
ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of
Bottleneck.
An example of plugins format could be :
>>> plugins=[
... dict(cfg=dict(type='xxx', arg1='xxx'),
... stages=(False, True, True, True),
... position='after_conv2'),
... dict(cfg=dict(type='yyy'),
... stages=(True, True, True, True),
... position='after_conv3'),
... dict(cfg=dict(type='zzz', postfix='1'),
... stages=(True, True, True, True),
... position='after_conv3'),
... dict(cfg=dict(type='zzz', postfix='2'),
... stages=(True, True, True, True),
... position='after_conv3')
... ]
>>> self = ResNet(depth=18)
>>> stage_plugins = self.make_stage_plugins(plugins, 0)
>>> assert len(stage_plugins) == 3
Suppose 'stage_idx=0', the structure of blocks in the stage would be:
conv1-> conv2->conv3->yyy->zzz1->zzz2
Suppose 'stage_idx=1', the structure of blocks in the stage would be:
conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2
If stages is missing, the plugin would be applied to all stages.
Args:
plugins (list[dict]): List of plugins cfg to build. The postfix is
required if multiple same type plugins are inserted.
stage_idx (int): Index of stage to build
Returns:
list[dict]: Plugins for current stage
"""
stage_plugins = []
for plugin in plugins:
plugin = plugin.copy()
stages = plugin.pop('stages', None)
assert stages is None or len(stages) == self.num_stages
# whether to insert plugin into current stage
if stages is None or stages[stage_idx]:
stage_plugins.append(plugin)
return stage_plugins
def make_res_layer(self, **kwargs):
"""Pack all blocks in a stage into a ``ResLayer``."""
return ResLayer(**kwargs)
@property
def norm1(self):
"""nn.Module: the normalization layer named "norm1" """
return getattr(self, self.norm1_name)
def _make_stem_layer(self, in_channels, stem_channels):
"""Make stem layer for ResNet."""
if self.deep_stem:
self.stem = nn.Sequential(
build_conv_layer(
self.conv_cfg,
in_channels,
stem_channels // 2,
kernel_size=3,
stride=2,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
nn.ReLU(inplace=True),
build_conv_layer(
self.conv_cfg,
stem_channels // 2,
stem_channels // 2,
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
nn.ReLU(inplace=True),
build_conv_layer(
self.conv_cfg,
stem_channels // 2,
stem_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(self.norm_cfg, stem_channels)[1],
nn.ReLU(inplace=True))
else:
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
stem_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, stem_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _freeze_stages(self):
"""Freeze stages param and norm stats."""
if self.frozen_stages >= 0:
if self.deep_stem:
self.stem.eval()
for param in self.stem.parameters():
param.requires_grad = False
else:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.dcn is not None:
for m in self.modules():
if isinstance(m, Bottleneck) and hasattr(
m, 'conv2_offset'):
constant_init(m.conv2_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
"""Forward function."""
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def train(self, mode=True):
"""Convert the model into training mode while keep normalization layer
freezed."""
super(ResNet, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
@BACKBONES.register_module()
class ResNetV1c(ResNet):
"""ResNetV1c variant described in [1]_.
Compared with default ResNet(ResNetV1b), ResNetV1c replaces the 7x7 conv
in the input stem with three 3x3 convs.
References:
.. [1] https://arxiv.org/pdf/1812.01187.pdf
"""
def __init__(self, **kwargs):
super(ResNetV1c, self).__init__(
deep_stem=True, avg_down=False, **kwargs)
@BACKBONES.register_module()
class ResNetV1d(ResNet):
"""ResNetV1d variant described in [1]_.
Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in
the input stem with three 3x3 convs. And in the downsampling block, a 2x2
avg_pool with stride 2 is added before conv, whose stride is changed to 1.
"""
def __init__(self, **kwargs):
super(ResNetV1d, self).__init__(
deep_stem=True, avg_down=True, **kwargs)
| 24,210 | 34.139332 | 79 | py |
CP2 | CP2-main/mmseg/models/backbones/vit.py | """Modified from https://github.com/rwightman/pytorch-image-
models/blob/master/timm/models/vision_transformer.py."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from mmcv.cnn import (Conv2d, Linear, build_activation_layer, build_norm_layer,
constant_init, kaiming_init, normal_init)
from mmcv.runner import _load_checkpoint
from mmcv.runner import load_checkpoint
from mmcv.utils.parrots_wrapper import _BatchNorm
from mmseg.utils import get_root_logger
from ..builder import BACKBONES
from ..utils import DropPath, trunc_normal_
class Mlp(nn.Module):
"""MLP layer for Encoder block.
Args:
in_features(int): Input dimension for the first fully
connected layer.
hidden_features(int): Output dimension for the first fully
connected layer.
out_features(int): Output dementsion for the second fully
connected layer.
act_cfg(dict): Config dict for activation layer.
Default: dict(type='GELU').
drop(float): Drop rate for the dropout layer. Dropout rate has
to be between 0 and 1. Default: 0.
"""
def __init__(self,
in_features,
hidden_features=None,
out_features=None,
act_cfg=dict(type='GELU'),
drop=0.):
super(Mlp, self).__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = Linear(in_features, hidden_features)
self.act = build_activation_layer(act_cfg)
self.fc2 = Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
"""Attention layer for Encoder block.
Args:
dim (int): Dimension for the input vector.
num_heads (int): Number of parallel attention heads.
qkv_bias (bool): Enable bias for qkv if True. Default: False.
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
attn_drop (float): Drop rate for attention output weights.
Default: 0.
proj_drop (float): Drop rate for output weights. Default: 0.
"""
def __init__(self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.,
proj_drop=0.):
super(Attention, self).__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim**-0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
b, n, c = x.shape
qkv = self.qkv(x).reshape(b, n, 3, self.num_heads,
c // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(b, n, c)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
"""Implements encoder block with residual connection.
Args:
dim (int): The feature dimension.
num_heads (int): Number of parallel attention heads.
mlp_ratio (int): Ratio of mlp hidden dim to embedding dim.
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
drop (float): Drop rate for mlp output weights. Default: 0.
attn_drop (float): Drop rate for attention output weights.
Default: 0.
proj_drop (float): Drop rate for attn layer output weights.
Default: 0.
drop_path (float): Drop rate for paths of model.
Default: 0.
act_cfg (dict): Config dict for activation layer.
Default: dict(type='GELU').
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN', requires_grad=True).
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
def __init__(self,
dim,
num_heads,
mlp_ratio=4,
qkv_bias=False,
qk_scale=None,
drop=0.,
attn_drop=0.,
proj_drop=0.,
drop_path=0.,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN', eps=1e-6),
with_cp=False):
super(Block, self).__init__()
self.with_cp = with_cp
_, self.norm1 = build_norm_layer(norm_cfg, dim)
self.attn = Attention(dim, num_heads, qkv_bias, qk_scale, attn_drop,
proj_drop)
self.drop_path = DropPath(
drop_path) if drop_path > 0. else nn.Identity()
_, self.norm2 = build_norm_layer(norm_cfg, dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_cfg=act_cfg,
drop=drop)
def forward(self, x):
def _inner_forward(x):
out = x + self.drop_path(self.attn(self.norm1(x)))
out = out + self.drop_path(self.mlp(self.norm2(out)))
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
class PatchEmbed(nn.Module):
"""Image to Patch Embedding.
Args:
img_size (int | tuple): Input image size.
default: 224.
patch_size (int): Width and height for a patch.
default: 16.
in_channels (int): Input channels for images. Default: 3.
embed_dim (int): The embedding dimension. Default: 768.
"""
def __init__(self,
img_size=224,
patch_size=16,
in_channels=3,
embed_dim=768):
super(PatchEmbed, self).__init__()
if isinstance(img_size, int):
self.img_size = (img_size, img_size)
elif isinstance(img_size, tuple):
self.img_size = img_size
else:
raise TypeError('img_size must be type of int or tuple')
h, w = self.img_size
self.patch_size = (patch_size, patch_size)
self.num_patches = (h // patch_size) * (w // patch_size)
self.proj = Conv2d(
in_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
return self.proj(x).flatten(2).transpose(1, 2)
@BACKBONES.register_module()
class VisionTransformer(nn.Module):
"""Vision transformer backbone.
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for
Image Recognition at Scale` - https://arxiv.org/abs/2010.11929
Args:
img_size (tuple): input image size. Default: (224, 224).
patch_size (int, tuple): patch size. Default: 16.
in_channels (int): number of input channels. Default: 3.
embed_dim (int): embedding dimension. Default: 768.
depth (int): depth of transformer. Default: 12.
num_heads (int): number of attention heads. Default: 12.
mlp_ratio (int): ratio of mlp hidden dim to embedding dim.
Default: 4.
out_indices (list | tuple | int): Output from which stages.
Default: -1.
qkv_bias (bool): enable bias for qkv if True. Default: True.
qk_scale (float): override default qk scale of head_dim ** -0.5 if set.
drop_rate (float): dropout rate. Default: 0.
attn_drop_rate (float): attention dropout rate. Default: 0.
drop_path_rate (float): Rate of DropPath. Default: 0.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN', eps=1e-6, requires_grad=True).
act_cfg (dict): Config dict for activation layer.
Default: dict(type='GELU').
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
final_norm (bool): Whether to add a additional layer to normalize
final feature map. Default: False.
out_reshape (str): Select the output format of feature information.
Default: NCHW.
interpolate_mode (str): Select the interpolate mode for position
embeding vector resize. Default: bicubic.
with_cls_token (bool): If concatenating class token into image tokens
as transformer input. Default: True.
with_cp (bool): Use checkpoint or not. Using checkpoint
will save some memory while slowing down the training speed.
Default: False.
"""
def __init__(self,
img_size=(224, 224),
patch_size=16,
in_channels=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
out_indices=11,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_cfg=dict(type='LN', eps=1e-6, requires_grad=True),
act_cfg=dict(type='GELU'),
norm_eval=False,
final_norm=False,
out_shape='NCHW',
with_cls_token=True,
interpolate_mode='bicubic',
with_cp=False):
super(VisionTransformer, self).__init__()
self.img_size = img_size
self.patch_size = patch_size
self.features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_channels=in_channels,
embed_dim=embed_dim)
self.with_cls_token = with_cls_token
self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
self.pos_embed = nn.Parameter(
torch.zeros(1, self.patch_embed.num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
if isinstance(out_indices, int):
self.out_indices = [out_indices]
elif isinstance(out_indices, list) or isinstance(out_indices, tuple):
self.out_indices = out_indices
else:
raise TypeError('out_indices must be type of int, list or tuple')
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)
] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=dpr[i],
attn_drop=attn_drop_rate,
act_cfg=act_cfg,
norm_cfg=norm_cfg,
with_cp=with_cp) for i in range(depth)
])
assert out_shape in ['NLC',
'NCHW'], 'output shape must be "NLC" or "NCHW".'
self.out_shape = out_shape
self.interpolate_mode = interpolate_mode
self.final_norm = final_norm
if final_norm:
_, self.norm = build_norm_layer(norm_cfg, embed_dim)
self.norm_eval = norm_eval
self.with_cp = with_cp
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
checkpoint = _load_checkpoint(pretrained, logger=logger)
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
elif 'model' in checkpoint:
state_dict = checkpoint['model']
else:
state_dict = checkpoint
if 'pos_embed' in state_dict.keys():
if self.pos_embed.shape != state_dict['pos_embed'].shape:
logger.info(msg=f'Resize the pos_embed shape from \
{state_dict["pos_embed"].shape} to {self.pos_embed.shape}')
h, w = self.img_size
pos_size = int(
math.sqrt(state_dict['pos_embed'].shape[1] - 1))
state_dict['pos_embed'] = self.resize_pos_embed(
state_dict['pos_embed'], (h, w), (pos_size, pos_size),
self.patch_size, self.interpolate_mode)
self.load_state_dict(state_dict, False)
# load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
# We only implement the 'jax_impl' initialization implemented at
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py#L353 # noqa: E501
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
for n, m in self.named_modules():
if isinstance(m, Linear):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
if 'mlp' in n:
normal_init(m.bias, std=1e-6)
else:
constant_init(m.bias, 0)
elif isinstance(m, Conv2d):
kaiming_init(m.weight, mode='fan_in')
if m.bias is not None:
constant_init(m.bias, 0)
elif isinstance(m, (_BatchNorm, nn.GroupNorm, nn.LayerNorm)):
constant_init(m.bias, 0)
constant_init(m.weight, 1.0)
else:
raise TypeError('pretrained must be a str or None')
def _pos_embeding(self, img, patched_img, pos_embed):
"""Positiong embeding method.
Resize the pos_embed, if the input image size doesn't match
the training size.
Args:
img (torch.Tensor): The inference image tensor, the shape
must be [B, C, H, W].
patched_img (torch.Tensor): The patched image, it should be
shape of [B, L1, C].
pos_embed (torch.Tensor): The pos_embed weighs, it should be
shape of [B, L2, c].
Return:
torch.Tensor: The pos encoded image feature.
"""
assert patched_img.ndim == 3 and pos_embed.ndim == 3, \
'the shapes of patched_img and pos_embed must be [B, L, C]'
x_len, pos_len = patched_img.shape[1], pos_embed.shape[1]
if x_len != pos_len:
if pos_len == (self.img_size[0] // self.patch_size) * (
self.img_size[1] // self.patch_size) + 1:
pos_h = self.img_size[0] // self.patch_size
pos_w = self.img_size[1] // self.patch_size
else:
raise ValueError(
'Unexpected shape of pos_embed, got {}.'.format(
pos_embed.shape))
pos_embed = self.resize_pos_embed(pos_embed, img.shape[2:],
(pos_h, pos_w), self.patch_size,
self.interpolate_mode)
return self.pos_drop(patched_img + pos_embed)
@staticmethod
def resize_pos_embed(pos_embed, input_shpae, pos_shape, patch_size, mode):
"""Resize pos_embed weights.
Resize pos_embed using bicubic interpolate method.
Args:
pos_embed (torch.Tensor): pos_embed weights.
input_shpae (tuple): Tuple for (input_h, intput_w).
pos_shape (tuple): Tuple for (pos_h, pos_w).
patch_size (int): Patch size.
Return:
torch.Tensor: The resized pos_embed of shape [B, L_new, C]
"""
assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]'
input_h, input_w = input_shpae
pos_h, pos_w = pos_shape
cls_token_weight = pos_embed[:, 0]
pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):]
pos_embed_weight = pos_embed_weight.reshape(
1, pos_h, pos_w, pos_embed.shape[2]).permute(0, 3, 1, 2)
pos_embed_weight = F.interpolate(
pos_embed_weight,
size=[input_h // patch_size, input_w // patch_size],
align_corners=False,
mode=mode)
cls_token_weight = cls_token_weight.unsqueeze(1)
pos_embed_weight = torch.flatten(pos_embed_weight, 2).transpose(1, 2)
pos_embed = torch.cat((cls_token_weight, pos_embed_weight), dim=1)
return pos_embed
def forward(self, inputs):
B = inputs.shape[0]
x = self.patch_embed(inputs)
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x = self._pos_embeding(inputs, x, self.pos_embed)
if not self.with_cls_token:
# Remove class token for transformer input
x = x[:, 1:]
outs = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if i == len(self.blocks) - 1:
if self.final_norm:
x = self.norm(x)
if i in self.out_indices:
if self.with_cls_token:
# Remove class token and reshape token for decoder head
out = x[:, 1:]
else:
out = x
if self.out_shape == 'NCHW':
B, _, C = out.shape
out = out.reshape(B, inputs.shape[2] // self.patch_size,
inputs.shape[3] // self.patch_size,
C).permute(0, 3, 1, 2).contiguous()
outs.append(out)
return tuple(outs)
def train(self, mode=True):
super(VisionTransformer, self).train(mode)
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, nn.LayerNorm):
m.eval()
| 18,574 | 38.270613 | 128 | py |
CP2 | CP2-main/mmseg/models/backbones/__init__.py | from .resnet import ResNet
from .vit import VisionTransformer
__all__ = [
'ResNet', 'VisionTransformer'
]
| 111 | 15 | 34 | py |
CP2 | CP2-main/mmseg/datasets/custom.py | import os
import os.path as osp
from collections import OrderedDict
from functools import reduce
import mmcv
import numpy as np
from mmcv.utils import print_log
from prettytable import PrettyTable
from torch.utils.data import Dataset
from mmseg.core import eval_metrics
from mmseg.utils import get_root_logger
from .builder import DATASETS
from .pipelines import Compose
@DATASETS.register_module()
class CustomDataset(Dataset):
"""Custom dataset for semantic segmentation. An example of file structure
is as followed.
.. code-block:: none
├── data
│ ├── my_dataset
│ │ ├── img_dir
│ │ │ ├── train
│ │ │ │ ├── xxx{img_suffix}
│ │ │ │ ├── yyy{img_suffix}
│ │ │ │ ├── zzz{img_suffix}
│ │ │ ├── val
│ │ ├── ann_dir
│ │ │ ├── train
│ │ │ │ ├── xxx{seg_map_suffix}
│ │ │ │ ├── yyy{seg_map_suffix}
│ │ │ │ ├── zzz{seg_map_suffix}
│ │ │ ├── val
The img/gt_semantic_seg pair of CustomDataset should be of the same
except suffix. A valid img/gt_semantic_seg filename pair should be like
``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included
in the suffix). If split is given, then ``xxx`` is specified in txt file.
Otherwise, all files in ``img_dir/``and ``ann_dir`` will be loaded.
Please refer to ``docs/tutorials/new_dataset.md`` for more details.
Args:
pipeline (list[dict]): Processing pipeline
img_dir (str): Path to image directory
img_suffix (str): Suffix of images. Default: '.jpg'
ann_dir (str, optional): Path to annotation directory. Default: None
seg_map_suffix (str): Suffix of segmentation maps. Default: '.png'
split (str, optional): Split txt file. If split is specified, only
file with suffix in the splits will be loaded. Otherwise, all
images in img_dir/ann_dir will be loaded. Default: None
data_root (str, optional): Data root for img_dir/ann_dir. Default:
None.
test_mode (bool): If test_mode=True, gt wouldn't be loaded.
ignore_index (int): The label index to be ignored. Default: 255
reduce_zero_label (bool): Whether to mark label zero as ignored.
Default: False
classes (str | Sequence[str], optional): Specify classes to load.
If is None, ``cls.CLASSES`` will be used. Default: None.
palette (Sequence[Sequence[int]]] | np.ndarray | None):
The palette of segmentation map. If None is given, and
self.PALETTE is None, random palette will be generated.
Default: None
"""
CLASSES = None
PALETTE = None
def __init__(self,
pipeline,
img_dir,
img_suffix='.jpg',
ann_dir=None,
seg_map_suffix='.png',
split=None,
data_root=None,
test_mode=False,
ignore_index=255,
reduce_zero_label=False,
classes=None,
palette=None):
self.pipeline = Compose(pipeline)
self.img_dir = img_dir
self.img_suffix = img_suffix
self.ann_dir = ann_dir
self.seg_map_suffix = seg_map_suffix
self.split = split
self.data_root = data_root
self.test_mode = test_mode
self.ignore_index = ignore_index
self.reduce_zero_label = reduce_zero_label
self.label_map = None
self.CLASSES, self.PALETTE = self.get_classes_and_palette(
classes, palette)
# join paths if data_root is specified
if self.data_root is not None:
if not osp.isabs(self.img_dir):
self.img_dir = osp.join(self.data_root, self.img_dir)
if not (self.ann_dir is None or osp.isabs(self.ann_dir)):
self.ann_dir = osp.join(self.data_root, self.ann_dir)
if not (self.split is None or osp.isabs(self.split)):
self.split = osp.join(self.data_root, self.split)
# load annotations
self.img_infos = self.load_annotations(self.img_dir, self.img_suffix,
self.ann_dir,
self.seg_map_suffix, self.split)
def __len__(self):
"""Total number of samples of data."""
return len(self.img_infos)
def load_annotations(self, img_dir, img_suffix, ann_dir, seg_map_suffix,
split):
"""Load annotation from directory.
Args:
img_dir (str): Path to image directory
img_suffix (str): Suffix of images.
ann_dir (str|None): Path to annotation directory.
seg_map_suffix (str|None): Suffix of segmentation maps.
split (str|None): Split txt file. If split is specified, only file
with suffix in the splits will be loaded. Otherwise, all images
in img_dir/ann_dir will be loaded. Default: None
Returns:
list[dict]: All image info of dataset.
"""
img_infos = []
if split is not None:
with open(split) as f:
for line in f:
img_name = line.strip()
img_info = dict(filename=img_name + img_suffix)
if ann_dir is not None:
seg_map = img_name + seg_map_suffix
img_info['ann'] = dict(seg_map=seg_map)
img_infos.append(img_info)
else:
for img in mmcv.scandir(img_dir, img_suffix, recursive=True):
img_info = dict(filename=img)
if ann_dir is not None:
seg_map = img.replace(img_suffix, seg_map_suffix)
img_info['ann'] = dict(seg_map=seg_map)
img_infos.append(img_info)
print_log(f'Loaded {len(img_infos)} images', logger=get_root_logger())
return img_infos
def get_ann_info(self, idx):
"""Get annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
return self.img_infos[idx]['ann']
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
results['seg_fields'] = []
results['img_prefix'] = self.img_dir
results['seg_prefix'] = self.ann_dir
if self.custom_classes:
results['label_map'] = self.label_map
def __getitem__(self, idx):
"""Get training/test data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training/test data (with annotation if `test_mode` is set
False).
"""
if self.test_mode:
return self.prepare_test_img(idx)
else:
return self.prepare_train_img(idx)
def prepare_train_img(self, idx):
"""Get training data and annotations after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training data and annotation after pipeline with new keys
introduced by pipeline.
"""
img_info = self.img_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, idx):
"""Get testing data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Testing data after pipeline with new keys introduced by
pipeline.
"""
img_info = self.img_infos[idx]
results = dict(img_info=img_info)
self.pre_pipeline(results)
return self.pipeline(results)
def format_results(self, results, **kwargs):
"""Place holder to format result to dataset specific output."""
def get_gt_seg_maps(self, efficient_test=False):
"""Get ground truth segmentation maps for evaluation."""
gt_seg_maps = []
for img_info in self.img_infos:
seg_map = osp.join(self.ann_dir, img_info['ann']['seg_map'])
if efficient_test:
gt_seg_map = seg_map
else:
gt_seg_map = mmcv.imread(
seg_map, flag='unchanged', backend='pillow')
gt_seg_maps.append(gt_seg_map)
return gt_seg_maps
def get_classes_and_palette(self, classes=None, palette=None):
"""Get class names of current dataset.
Args:
classes (Sequence[str] | str | None): If classes is None, use
default CLASSES defined by builtin dataset. If classes is a
string, take it as a file name. The file contains the name of
classes where each line contains one class name. If classes is
a tuple or list, override the CLASSES defined by the dataset.
palette (Sequence[Sequence[int]]] | np.ndarray | None):
The palette of segmentation map. If None is given, random
palette will be generated. Default: None
"""
if classes is None:
self.custom_classes = False
return self.CLASSES, self.PALETTE
self.custom_classes = True
if isinstance(classes, str):
# take it as a file path
class_names = mmcv.list_from_file(classes)
elif isinstance(classes, (tuple, list)):
class_names = classes
else:
raise ValueError(f'Unsupported type {type(classes)} of classes.')
if self.CLASSES:
if not set(classes).issubset(self.CLASSES):
raise ValueError('classes is not a subset of CLASSES.')
# dictionary, its keys are the old label ids and its values
# are the new label ids.
# used for changing pixel labels in load_annotations.
self.label_map = {}
for i, c in enumerate(self.CLASSES):
if c not in class_names:
self.label_map[i] = -1
else:
self.label_map[i] = classes.index(c)
palette = self.get_palette_for_custom_classes(class_names, palette)
return class_names, palette
def get_palette_for_custom_classes(self, class_names, palette=None):
if self.label_map is not None:
# return subset of palette
palette = []
for old_id, new_id in sorted(
self.label_map.items(), key=lambda x: x[1]):
if new_id != -1:
palette.append(self.PALETTE[old_id])
palette = type(self.PALETTE)(palette)
elif palette is None:
if self.PALETTE is None:
palette = np.random.randint(0, 255, size=(len(class_names), 3))
else:
palette = self.PALETTE
return palette
def evaluate(self,
results,
metric='mIoU',
logger=None,
efficient_test=False,
**kwargs):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. 'mIoU',
'mDice' and 'mFscore' are supported.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str, float]: Default metrics.
"""
if isinstance(metric, str):
metric = [metric]
allowed_metrics = ['mIoU', 'mDice', 'mFscore']
if not set(metric).issubset(set(allowed_metrics)):
raise KeyError('metric {} is not supported'.format(metric))
eval_results = {}
gt_seg_maps = self.get_gt_seg_maps(efficient_test)
if self.CLASSES is None:
num_classes = len(
reduce(np.union1d, [np.unique(_) for _ in gt_seg_maps]))
else:
num_classes = len(self.CLASSES)
ret_metrics = eval_metrics(
results,
gt_seg_maps,
num_classes,
self.ignore_index,
metric,
label_map=self.label_map,
reduce_zero_label=self.reduce_zero_label)
if self.CLASSES is None:
class_names = tuple(range(num_classes))
else:
class_names = self.CLASSES
# summary table
ret_metrics_summary = OrderedDict({
ret_metric: np.round(np.nanmean(ret_metric_value) * 100, 2)
for ret_metric, ret_metric_value in ret_metrics.items()
})
# each class table
ret_metrics.pop('aAcc', None)
ret_metrics_class = OrderedDict({
ret_metric: np.round(ret_metric_value * 100, 2)
for ret_metric, ret_metric_value in ret_metrics.items()
})
ret_metrics_class.update({'Class': class_names})
ret_metrics_class.move_to_end('Class', last=False)
# for logger
class_table_data = PrettyTable()
for key, val in ret_metrics_class.items():
class_table_data.add_column(key, val)
summary_table_data = PrettyTable()
for key, val in ret_metrics_summary.items():
if key == 'aAcc':
summary_table_data.add_column(key, [val])
else:
summary_table_data.add_column('m' + key, [val])
print_log('per class results:', logger)
print_log('\n' + class_table_data.get_string(), logger=logger)
print_log('Summary:', logger)
print_log('\n' + summary_table_data.get_string(), logger=logger)
# each metric dict
for key, value in ret_metrics_summary.items():
if key == 'aAcc':
eval_results[key] = value / 100.0
else:
eval_results['m' + key] = value / 100.0
ret_metrics_class.pop('Class', None)
for key, value in ret_metrics_class.items():
eval_results.update({
key + '.' + str(name): value[idx] / 100.0
for idx, name in enumerate(class_names)
})
if mmcv.is_list_of(results, str):
for file_name in results:
os.remove(file_name)
return eval_results
| 14,628 | 35.481297 | 79 | py |
CP2 | CP2-main/mmseg/datasets/voc.py | import os.path as osp
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class PascalVOCDataset(CustomDataset):
"""Pascal VOC dataset.
Args:
split (str): Split txt file for Pascal VOC.
"""
CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',
'train', 'tvmonitor')
PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
[128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],
[192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128],
[192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0],
[128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
def __init__(self, split, **kwargs):
super(PascalVOCDataset, self).__init__(
img_suffix='.jpg', seg_map_suffix='.png', split=split, **kwargs)
assert osp.exists(self.img_dir) and self.split is not None
| 1,130 | 36.7 | 79 | py |
CP2 | CP2-main/mmseg/datasets/ade.py | import os.path as osp
import tempfile
import mmcv
import numpy as np
from PIL import Image
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class ADE20KDataset(CustomDataset):
"""ADE20K dataset.
In segmentation map annotation for ADE20K, 0 stands for background, which
is not included in 150 categories. ``reduce_zero_label`` is fixed to True.
The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is fixed to
'.png'.
"""
CLASSES = (
'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ',
'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth',
'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car',
'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug',
'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe',
'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column',
'signboard', 'chest of drawers', 'counter', 'sand', 'sink',
'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path',
'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door',
'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table',
'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove',
'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar',
'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower',
'chandelier', 'awning', 'streetlight', 'booth', 'television receiver',
'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister',
'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van',
'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything',
'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent',
'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank',
'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake',
'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce',
'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen',
'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass',
'clock', 'flag')
PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
[11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
[0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
[255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
[0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
[173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
[255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
[255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
[255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
[0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
[0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
[143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
[8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
[255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
[92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
[163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
[255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
[255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
[10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
[255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
[41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
[71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
[184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
[102, 255, 0], [92, 0, 255]]
def __init__(self, **kwargs):
super(ADE20KDataset, self).__init__(
img_suffix='.jpg',
seg_map_suffix='.png',
reduce_zero_label=True,
**kwargs)
def results2img(self, results, imgfile_prefix, to_label_id):
"""Write the segmentation results to images.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
imgfile_prefix (str): The filename prefix of the png files.
If the prefix is "somepath/xxx",
the png files will be named "somepath/xxx.png".
to_label_id (bool): whether convert output to label_id for
submission
Returns:
list[str: str]: result txt files which contains corresponding
semantic segmentation images.
"""
mmcv.mkdir_or_exist(imgfile_prefix)
result_files = []
prog_bar = mmcv.ProgressBar(len(self))
for idx in range(len(self)):
result = results[idx]
filename = self.img_infos[idx]['filename']
basename = osp.splitext(osp.basename(filename))[0]
png_filename = osp.join(imgfile_prefix, f'{basename}.png')
# The index range of official requirement is from 0 to 150.
# But the index range of output is from 0 to 149.
# That is because we set reduce_zero_label=True.
result = result + 1
output = Image.fromarray(result.astype(np.uint8))
output.save(png_filename)
result_files.append(png_filename)
prog_bar.update()
return result_files
def format_results(self, results, imgfile_prefix=None, to_label_id=True):
"""Format the results into dir (standard format for ade20k evaluation).
Args:
results (list): Testing results of the dataset.
imgfile_prefix (str | None): The prefix of images files. It
includes the file path and the prefix of filename, e.g.,
"a/b/prefix". If not specified, a temp file will be created.
Default: None.
to_label_id (bool): whether convert output to label_id for
submission. Default: False
Returns:
tuple: (result_files, tmp_dir), result_files is a list containing
the image paths, tmp_dir is the temporal directory created
for saving json/png files when img_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: '
f'{len(results)} != {len(self)}')
if imgfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
imgfile_prefix = tmp_dir.name
else:
tmp_dir = None
result_files = self.results2img(results, imgfile_prefix, to_label_id)
return result_files, tmp_dir
| 8,130 | 48.579268 | 79 | py |
CP2 | CP2-main/mmseg/datasets/hrf.py | import os.path as osp
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class HRFDataset(CustomDataset):
"""HRF dataset.
In segmentation map annotation for HRF, 0 stands for background, which is
included in 2 categories. ``reduce_zero_label`` is fixed to False. The
``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
'.png'.
"""
CLASSES = ('background', 'vessel')
PALETTE = [[120, 120, 120], [6, 230, 230]]
def __init__(self, **kwargs):
super(HRFDataset, self).__init__(
img_suffix='.png',
seg_map_suffix='.png',
reduce_zero_label=False,
**kwargs)
assert osp.exists(self.img_dir)
| 747 | 25.714286 | 77 | py |
CP2 | CP2-main/mmseg/datasets/chase_db1.py | import os.path as osp
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class ChaseDB1Dataset(CustomDataset):
"""Chase_db1 dataset.
In segmentation map annotation for Chase_db1, 0 stands for background,
which is included in 2 categories. ``reduce_zero_label`` is fixed to False.
The ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
'_1stHO.png'.
"""
CLASSES = ('background', 'vessel')
PALETTE = [[120, 120, 120], [6, 230, 230]]
def __init__(self, **kwargs):
super(ChaseDB1Dataset, self).__init__(
img_suffix='.png',
seg_map_suffix='_1stHO.png',
reduce_zero_label=False,
**kwargs)
assert osp.exists(self.img_dir)
| 781 | 26.928571 | 79 | py |
CP2 | CP2-main/mmseg/datasets/cityscapes.py | import os.path as osp
import tempfile
import mmcv
import numpy as np
from mmcv.utils import print_log
from PIL import Image
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class CityscapesDataset(CustomDataset):
"""Cityscapes dataset.
The ``img_suffix`` is fixed to '_leftImg8bit.png' and ``seg_map_suffix`` is
fixed to '_gtFine_labelTrainIds.png' for Cityscapes dataset.
"""
CLASSES = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole',
'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky',
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
'bicycle')
PALETTE = [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
[190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0],
[107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60],
[255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100],
[0, 80, 100], [0, 0, 230], [119, 11, 32]]
def __init__(self, **kwargs):
super(CityscapesDataset, self).__init__(
img_suffix='_leftImg8bit.png',
seg_map_suffix='_gtFine_labelTrainIds.png',
**kwargs)
@staticmethod
def _convert_to_label_id(result):
"""Convert trainId to id for cityscapes."""
if isinstance(result, str):
result = np.load(result)
import cityscapesscripts.helpers.labels as CSLabels
result_copy = result.copy()
for trainId, label in CSLabels.trainId2label.items():
result_copy[result == trainId] = label.id
return result_copy
def results2img(self, results, imgfile_prefix, to_label_id):
"""Write the segmentation results to images.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
imgfile_prefix (str): The filename prefix of the png files.
If the prefix is "somepath/xxx",
the png files will be named "somepath/xxx.png".
to_label_id (bool): whether convert output to label_id for
submission
Returns:
list[str: str]: result txt files which contains corresponding
semantic segmentation images.
"""
mmcv.mkdir_or_exist(imgfile_prefix)
result_files = []
prog_bar = mmcv.ProgressBar(len(self))
for idx in range(len(self)):
result = results[idx]
if to_label_id:
result = self._convert_to_label_id(result)
filename = self.img_infos[idx]['filename']
basename = osp.splitext(osp.basename(filename))[0]
png_filename = osp.join(imgfile_prefix, f'{basename}.png')
output = Image.fromarray(result.astype(np.uint8)).convert('P')
import cityscapesscripts.helpers.labels as CSLabels
palette = np.zeros((len(CSLabels.id2label), 3), dtype=np.uint8)
for label_id, label in CSLabels.id2label.items():
palette[label_id] = label.color
output.putpalette(palette)
output.save(png_filename)
result_files.append(png_filename)
prog_bar.update()
return result_files
def format_results(self, results, imgfile_prefix=None, to_label_id=True):
"""Format the results into dir (standard format for Cityscapes
evaluation).
Args:
results (list): Testing results of the dataset.
imgfile_prefix (str | None): The prefix of images files. It
includes the file path and the prefix of filename, e.g.,
"a/b/prefix". If not specified, a temp file will be created.
Default: None.
to_label_id (bool): whether convert output to label_id for
submission. Default: False
Returns:
tuple: (result_files, tmp_dir), result_files is a list containing
the image paths, tmp_dir is the temporal directory created
for saving json/png files when img_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: '
f'{len(results)} != {len(self)}')
if imgfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
imgfile_prefix = tmp_dir.name
else:
tmp_dir = None
result_files = self.results2img(results, imgfile_prefix, to_label_id)
return result_files, tmp_dir
def evaluate(self,
results,
metric='mIoU',
logger=None,
imgfile_prefix=None,
efficient_test=False):
"""Evaluation in Cityscapes/default protocol.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
imgfile_prefix (str | None): The prefix of output image file,
for cityscapes evaluation only. It includes the file path and
the prefix of filename, e.g., "a/b/prefix".
If results are evaluated with cityscapes protocol, it would be
the prefix of output png files. The output files would be
png images under folder "a/b/prefix/xxx.png", where "xxx" is
the image name of cityscapes. If not specified, a temp file
will be created for evaluation.
Default: None.
Returns:
dict[str, float]: Cityscapes/default metrics.
"""
eval_results = dict()
metrics = metric.copy() if isinstance(metric, list) else [metric]
if 'cityscapes' in metrics:
eval_results.update(
self._evaluate_cityscapes(results, logger, imgfile_prefix))
metrics.remove('cityscapes')
if len(metrics) > 0:
eval_results.update(
super(CityscapesDataset,
self).evaluate(results, metrics, logger, efficient_test))
return eval_results
def _evaluate_cityscapes(self, results, logger, imgfile_prefix):
"""Evaluation in Cityscapes protocol.
Args:
results (list): Testing results of the dataset.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
imgfile_prefix (str | None): The prefix of output image file
Returns:
dict[str: float]: Cityscapes evaluation results.
"""
try:
import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as CSEval # noqa
except ImportError:
raise ImportError('Please run "pip install cityscapesscripts" to '
'install cityscapesscripts first.')
msg = 'Evaluating in Cityscapes style'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
result_files, tmp_dir = self.format_results(results, imgfile_prefix)
if tmp_dir is None:
result_dir = imgfile_prefix
else:
result_dir = tmp_dir.name
eval_results = dict()
print_log(f'Evaluating results under {result_dir} ...', logger=logger)
CSEval.args.evalInstLevelScore = True
CSEval.args.predictionPath = osp.abspath(result_dir)
CSEval.args.evalPixelAccuracy = True
CSEval.args.JSONOutput = False
seg_map_list = []
pred_list = []
# when evaluating with official cityscapesscripts,
# **_gtFine_labelIds.png is used
for seg_map in mmcv.scandir(
self.ann_dir, 'gtFine_labelIds.png', recursive=True):
seg_map_list.append(osp.join(self.ann_dir, seg_map))
pred_list.append(CSEval.getPrediction(CSEval.args, seg_map))
eval_results.update(
CSEval.evaluateImgLists(pred_list, seg_map_list, CSEval.args))
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
| 8,446 | 37.747706 | 96 | py |
CP2 | CP2-main/mmseg/datasets/dataset_wrappers.py | from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
from .builder import DATASETS
@DATASETS.register_module()
class ConcatDataset(_ConcatDataset):
"""A wrapper of concatenated dataset.
Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
concat the group flag for image aspect ratio.
Args:
datasets (list[:obj:`Dataset`]): A list of datasets.
"""
def __init__(self, datasets):
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
self.PALETTE = datasets[0].PALETTE
@DATASETS.register_module()
class RepeatDataset(object):
"""A wrapper of repeated dataset.
The length of repeated dataset will be `times` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (:obj:`Dataset`): The dataset to be repeated.
times (int): Repeat times.
"""
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
self.PALETTE = dataset.PALETTE
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
"""Get item from original dataset."""
return self.dataset[idx % self._ori_len]
def __len__(self):
"""The length is multiplied by ``times``"""
return self.times * self._ori_len
| 1,499 | 28.411765 | 78 | py |
CP2 | CP2-main/mmseg/datasets/pascal_context.py | import os.path as osp
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class PascalContextDataset(CustomDataset):
"""PascalContext dataset.
In segmentation map annotation for PascalContext, 0 stands for background,
which is included in 60 categories. ``reduce_zero_label`` is fixed to
False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
fixed to '.png'.
Args:
split (str): Split txt file for PascalContext.
"""
CLASSES = ('background', 'aeroplane', 'bag', 'bed', 'bedclothes', 'bench',
'bicycle', 'bird', 'boat', 'book', 'bottle', 'building', 'bus',
'cabinet', 'car', 'cat', 'ceiling', 'chair', 'cloth',
'computer', 'cow', 'cup', 'curtain', 'dog', 'door', 'fence',
'floor', 'flower', 'food', 'grass', 'ground', 'horse',
'keyboard', 'light', 'motorbike', 'mountain', 'mouse', 'person',
'plate', 'platform', 'pottedplant', 'road', 'rock', 'sheep',
'shelves', 'sidewalk', 'sign', 'sky', 'snow', 'sofa', 'table',
'track', 'train', 'tree', 'truck', 'tvmonitor', 'wall', 'water',
'window', 'wood')
PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255]]
def __init__(self, split, **kwargs):
super(PascalContextDataset, self).__init__(
img_suffix='.jpg',
seg_map_suffix='.png',
split=split,
reduce_zero_label=False,
**kwargs)
assert osp.exists(self.img_dir) and self.split is not None
@DATASETS.register_module()
class PascalContextDataset59(CustomDataset):
"""PascalContext dataset.
In segmentation map annotation for PascalContext, 0 stands for background,
which is included in 60 categories. ``reduce_zero_label`` is fixed to
False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
fixed to '.png'.
Args:
split (str): Split txt file for PascalContext.
"""
CLASSES = ('aeroplane', 'bag', 'bed', 'bedclothes', 'bench', 'bicycle',
'bird', 'boat', 'book', 'bottle', 'building', 'bus', 'cabinet',
'car', 'cat', 'ceiling', 'chair', 'cloth', 'computer', 'cow',
'cup', 'curtain', 'dog', 'door', 'fence', 'floor', 'flower',
'food', 'grass', 'ground', 'horse', 'keyboard', 'light',
'motorbike', 'mountain', 'mouse', 'person', 'plate', 'platform',
'pottedplant', 'road', 'rock', 'sheep', 'shelves', 'sidewalk',
'sign', 'sky', 'snow', 'sofa', 'table', 'track', 'train',
'tree', 'truck', 'tvmonitor', 'wall', 'water', 'window', 'wood')
PALETTE = [[180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3],
[120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230],
[4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61],
[120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140],
[204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200],
[61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71],
[255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92],
[112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6],
[10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8],
[102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8],
[0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255],
[235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140],
[250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0],
[255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0],
[0, 235, 255], [0, 173, 255], [31, 0, 255]]
def __init__(self, split, **kwargs):
super(PascalContextDataset59, self).__init__(
img_suffix='.jpg',
seg_map_suffix='.png',
split=split,
reduce_zero_label=True,
**kwargs)
assert osp.exists(self.img_dir) and self.split is not None
| 5,202 | 49.028846 | 79 | py |
CP2 | CP2-main/mmseg/datasets/drive.py | import os.path as osp
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class DRIVEDataset(CustomDataset):
"""DRIVE dataset.
In segmentation map annotation for DRIVE, 0 stands for background, which is
included in 2 categories. ``reduce_zero_label`` is fixed to False. The
``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
'_manual1.png'.
"""
CLASSES = ('background', 'vessel')
PALETTE = [[120, 120, 120], [6, 230, 230]]
def __init__(self, **kwargs):
super(DRIVEDataset, self).__init__(
img_suffix='.png',
seg_map_suffix='_manual1.png',
reduce_zero_label=False,
**kwargs)
assert osp.exists(self.img_dir)
| 771 | 26.571429 | 79 | py |
CP2 | CP2-main/mmseg/datasets/__init__.py | from .ade import ADE20KDataset
from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
from .chase_db1 import ChaseDB1Dataset
from .cityscapes import CityscapesDataset
from .custom import CustomDataset
from .dataset_wrappers import ConcatDataset, RepeatDataset
from .drive import DRIVEDataset
from .hrf import HRFDataset
from .pascal_context import PascalContextDataset, PascalContextDataset59
from .stare import STAREDataset
from .voc import PascalVOCDataset
__all__ = [
'CustomDataset', 'build_dataloader', 'ConcatDataset', 'RepeatDataset',
'DATASETS', 'build_dataset', 'PIPELINES', 'CityscapesDataset',
'PascalVOCDataset', 'ADE20KDataset', 'PascalContextDataset',
'PascalContextDataset59', 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset',
'STAREDataset'
]
| 798 | 38.95 | 78 | py |
CP2 | CP2-main/mmseg/datasets/builder.py | import copy
import platform
import random
from functools import partial
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import Registry, build_from_cfg
from mmcv.utils.parrots_wrapper import DataLoader, PoolDataLoader
from torch.utils.data import DistributedSampler
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
hard_limit = rlimit[1]
soft_limit = min(4096, hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
DATASETS = Registry('dataset')
PIPELINES = Registry('pipeline')
def _concat_dataset(cfg, default_args=None):
"""Build :obj:`ConcatDataset by."""
from .dataset_wrappers import ConcatDataset
img_dir = cfg['img_dir']
ann_dir = cfg.get('ann_dir', None)
split = cfg.get('split', None)
num_img_dir = len(img_dir) if isinstance(img_dir, (list, tuple)) else 1
if ann_dir is not None:
num_ann_dir = len(ann_dir) if isinstance(ann_dir, (list, tuple)) else 1
else:
num_ann_dir = 0
if split is not None:
num_split = len(split) if isinstance(split, (list, tuple)) else 1
else:
num_split = 0
if num_img_dir > 1:
assert num_img_dir == num_ann_dir or num_ann_dir == 0
assert num_img_dir == num_split or num_split == 0
else:
assert num_split == num_ann_dir or num_ann_dir <= 1
num_dset = max(num_split, num_img_dir)
datasets = []
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
if isinstance(img_dir, (list, tuple)):
data_cfg['img_dir'] = img_dir[i]
if isinstance(ann_dir, (list, tuple)):
data_cfg['ann_dir'] = ann_dir[i]
if isinstance(split, (list, tuple)):
data_cfg['split'] = split[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets)
def build_dataset(cfg, default_args=None):
"""Build datasets."""
from .dataset_wrappers import ConcatDataset, RepeatDataset
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
elif isinstance(cfg.get('img_dir'), (list, tuple)) or isinstance(
cfg.get('split', None), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
def build_dataloader(dataset,
samples_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
drop_last=False,
pin_memory=True,
dataloader_type='PoolDataLoader',
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (Dataset): A PyTorch dataset.
samples_per_gpu (int): Number of training samples on each GPU, i.e.,
batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data loading
for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed training.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
seed (int | None): Seed to be used. Default: None.
drop_last (bool): Whether to drop the last incomplete batch in epoch.
Default: False
pin_memory (bool): Whether to use pin_memory in DataLoader.
Default: True
dataloader_type (str): Type of dataloader. Default: 'PoolDataLoader'
kwargs: any keyword argument to be used to initialize DataLoader
Returns:
DataLoader: A PyTorch dataloader.
"""
rank, world_size = get_dist_info()
if dist:
sampler = DistributedSampler(
dataset, world_size, rank, shuffle=shuffle)
shuffle = False
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = None
batch_size = num_gpus * samples_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = partial(
worker_init_fn, num_workers=num_workers, rank=rank,
seed=seed) if seed is not None else None
assert dataloader_type in (
'DataLoader',
'PoolDataLoader'), f'unsupported dataloader {dataloader_type}'
if dataloader_type == 'PoolDataLoader':
dataloader = PoolDataLoader
elif dataloader_type == 'DataLoader':
dataloader = DataLoader
data_loader = dataloader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=pin_memory,
shuffle=shuffle,
worker_init_fn=init_fn,
drop_last=drop_last,
**kwargs)
return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed):
"""Worker init func for dataloader.
The seed of each worker equals to num_worker * rank + worker_id + user_seed
Args:
worker_id (int): Worker id.
num_workers (int): Number of workers.
rank (int): The rank of current process.
seed (int): The random seed to use.
"""
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
| 5,871 | 33.541176 | 79 | py |
CP2 | CP2-main/mmseg/datasets/stare.py | import os.path as osp
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class STAREDataset(CustomDataset):
"""STARE dataset.
In segmentation map annotation for STARE, 0 stands for background, which is
included in 2 categories. ``reduce_zero_label`` is fixed to False. The
``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
'.ah.png'.
"""
CLASSES = ('background', 'vessel')
PALETTE = [[120, 120, 120], [6, 230, 230]]
def __init__(self, **kwargs):
super(STAREDataset, self).__init__(
img_suffix='.png',
seg_map_suffix='.ah.png',
reduce_zero_label=False,
**kwargs)
assert osp.exists(self.img_dir)
| 761 | 26.214286 | 79 | py |
CP2 | CP2-main/mmseg/datasets/pipelines/loading.py | import os.path as osp
import mmcv
import numpy as np
from ..builder import PIPELINES
@PIPELINES.register_module()
class LoadImageFromFile(object):
"""Load an image from file.
Required keys are "img_prefix" and "img_info" (a dict that must contain the
key "filename"). Added or updated keys are "filename", "img", "img_shape",
"ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
"scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
Defaults to 'color'.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
'cv2'
"""
def __init__(self,
to_float32=False,
color_type='color',
file_client_args=dict(backend='disk'),
imdecode_backend='cv2'):
self.to_float32 = to_float32
self.color_type = color_type
self.file_client_args = file_client_args.copy()
self.file_client = None
self.imdecode_backend = imdecode_backend
def __call__(self, results):
"""Call functions to load image and get image meta information.
Args:
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
Returns:
dict: The dict contains loaded image and meta information.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results.get('img_prefix') is not None:
filename = osp.join(results['img_prefix'],
results['img_info']['filename'])
else:
filename = results['img_info']['filename']
img_bytes = self.file_client.get(filename)
img = mmcv.imfrombytes(
img_bytes, flag=self.color_type, backend=self.imdecode_backend)
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = filename
results['ori_filename'] = results['img_info']['filename']
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results['img_norm_cfg'] = dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(to_float32={self.to_float32},'
repr_str += f"color_type='{self.color_type}',"
repr_str += f"imdecode_backend='{self.imdecode_backend}')"
return repr_str
@PIPELINES.register_module()
class LoadAnnotations(object):
"""Load annotations for semantic segmentation.
Args:
reduce_zero_label (bool): Whether reduce all label value by 1.
Usually used for datasets where 0 is background label.
Default: False.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
'pillow'
"""
def __init__(self,
reduce_zero_label=False,
file_client_args=dict(backend='disk'),
imdecode_backend='pillow'):
self.reduce_zero_label = reduce_zero_label
self.file_client_args = file_client_args.copy()
self.file_client = None
self.imdecode_backend = imdecode_backend
def __call__(self, results):
"""Call function to load multiple types annotations.
Args:
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
Returns:
dict: The dict contains loaded semantic segmentation annotations.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results.get('seg_prefix', None) is not None:
filename = osp.join(results['seg_prefix'],
results['ann_info']['seg_map'])
else:
filename = results['ann_info']['seg_map']
img_bytes = self.file_client.get(filename)
gt_semantic_seg = mmcv.imfrombytes(
img_bytes, flag='unchanged',
backend=self.imdecode_backend).squeeze().astype(np.uint8)
# modify if custom classes
if results.get('label_map', None) is not None:
for old_id, new_id in results['label_map'].items():
gt_semantic_seg[gt_semantic_seg == old_id] = new_id
# reduce zero_label
if self.reduce_zero_label:
# avoid using underflow conversion
gt_semantic_seg[gt_semantic_seg == 0] = 255
gt_semantic_seg = gt_semantic_seg - 1
gt_semantic_seg[gt_semantic_seg == 254] = 255
results['gt_semantic_seg'] = gt_semantic_seg
results['seg_fields'].append('gt_semantic_seg')
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(reduce_zero_label={self.reduce_zero_label},'
repr_str += f"imdecode_backend='{self.imdecode_backend}')"
return repr_str
| 5,873 | 37.142857 | 79 | py |
CP2 | CP2-main/mmseg/datasets/pipelines/compose.py | import collections
from mmcv.utils import build_from_cfg
from ..builder import PIPELINES
@PIPELINES.register_module()
class Compose(object):
"""Compose multiple transforms sequentially.
Args:
transforms (Sequence[dict | callable]): Sequence of transform object or
config dict to be composed.
"""
def __init__(self, transforms):
assert isinstance(transforms, collections.abc.Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError('transform must be callable or a dict')
def __call__(self, data):
"""Call function to apply transforms sequentially.
Args:
data (dict): A result dict contains the data to transform.
Returns:
dict: Transformed data.
"""
for t in self.transforms:
data = t(data)
if data is None:
return None
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += f' {t}'
format_string += '\n)'
return format_string
| 1,464 | 27.173077 | 79 | py |
CP2 | CP2-main/mmseg/datasets/pipelines/formating.py | from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from ..builder import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
Args:
data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to
be converted.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
@PIPELINES.register_module()
class ToTensor(object):
"""Convert some results to :obj:`torch.Tensor` by given keys.
Args:
keys (Sequence[str]): Keys that need to be converted to Tensor.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function to convert data in results to :obj:`torch.Tensor`.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data converted
to :obj:`torch.Tensor`.
"""
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class ImageToTensor(object):
"""Convert image to :obj:`torch.Tensor` by given keys.
The dimension order of input image is (H, W, C). The pipeline will convert
it to (C, H, W). If only 2 dimension (H, W) is given, the output would be
(1, H, W).
Args:
keys (Sequence[str]): Key of images to be converted to Tensor.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function to convert image in results to :obj:`torch.Tensor` and
transpose the channel order.
Args:
results (dict): Result dict contains the image data to convert.
Returns:
dict: The result dict contains the image converted
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
"""
for key in self.keys:
img = results[key]
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
results[key] = to_tensor(img.transpose(2, 0, 1))
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class Transpose(object):
"""Transpose some results by given keys.
Args:
keys (Sequence[str]): Keys of results to be transposed.
order (Sequence[int]): Order of transpose.
"""
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
"""Call function to convert image in results to :obj:`torch.Tensor` and
transpose the channel order.
Args:
results (dict): Result dict contains the image data to convert.
Returns:
dict: The result dict contains the image converted
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
"""
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, order={self.order})'
@PIPELINES.register_module()
class ToDataContainer(object):
"""Convert results to :obj:`mmcv.DataContainer` by given fields.
Args:
fields (Sequence[dict]): Each field is a dict like
``dict(key='xxx', **kwargs)``. The ``key`` in result will
be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.
Default: ``(dict(key='img', stack=True),
dict(key='gt_semantic_seg'))``.
"""
def __init__(self,
fields=(dict(key='img',
stack=True), dict(key='gt_semantic_seg'))):
self.fields = fields
def __call__(self, results):
"""Call function to convert data in results to
:obj:`mmcv.DataContainer`.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data converted to
:obj:`mmcv.DataContainer`.
"""
for field in self.fields:
field = field.copy()
key = field.pop('key')
results[key] = DC(results[key], **field)
return results
def __repr__(self):
return self.__class__.__name__ + f'(fields={self.fields})'
@PIPELINES.register_module()
class DefaultFormatBundle(object):
"""Default formatting bundle.
It simplifies the pipeline of formatting common fields, including "img"
and "gt_semantic_seg". These fields are formatted as follows.
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor,
(3)to DataContainer (stack=True)
"""
def __call__(self, results):
"""Call function to transform and format common fields in results.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data that is formatted with
default bundle.
"""
if 'img' in results:
img = results['img']
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results['img'] = DC(to_tensor(img), stack=True)
if 'gt_semantic_seg' in results:
# convert to long
results['gt_semantic_seg'] = DC(
to_tensor(results['gt_semantic_seg'][None,
...].astype(np.int64)),
stack=True)
return results
def __repr__(self):
return self.__class__.__name__
@PIPELINES.register_module()
class Collect(object):
"""Collect data from the loader relevant to the specific task.
This is usually the last stage of the data loader pipeline. Typically keys
is set to some subset of "img", "gt_semantic_seg".
The "img_meta" item is always populated. The contents of the "img_meta"
dictionary depends on "meta_keys". By default this includes:
- "img_shape": shape of the image input to the network as a tuple
(h, w, c). Note that images may be zero padded on the bottom/right
if the batch tensor is larger than this shape.
- "scale_factor": a float indicating the preprocessing scale
- "flip": a boolean indicating if image flip transform was used
- "filename": path to the image file
- "ori_shape": original shape of the image as a tuple (h, w, c)
- "pad_shape": image shape after padding
- "img_norm_cfg": a dict of normalization information:
- mean - per channel mean subtraction
- std - per channel std divisor
- to_rgb - bool indicating if bgr was converted to rgb
Args:
keys (Sequence[str]): Keys of results to be collected in ``data``.
meta_keys (Sequence[str], optional): Meta keys to be converted to
``mmcv.DataContainer`` and collected in ``data[img_metas]``.
Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape',
'pad_shape', 'scale_factor', 'flip', 'flip_direction',
'img_norm_cfg')``
"""
def __init__(self,
keys,
meta_keys=('filename', 'ori_filename', 'ori_shape',
'img_shape', 'pad_shape', 'scale_factor', 'flip',
'flip_direction', 'img_norm_cfg')):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
"""Call function to collect keys in results. The keys in ``meta_keys``
will be converted to :obj:mmcv.DataContainer.
Args:
results (dict): Result dict contains the data to collect.
Returns:
dict: The result dict contains the following keys
- keys in``self.keys``
- ``img_metas``
"""
data = {}
img_meta = {}
for key in self.meta_keys:
img_meta[key] = results[key]
data['img_metas'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, meta_keys={self.meta_keys})'
| 9,228 | 30.934256 | 79 | py |
CP2 | CP2-main/mmseg/datasets/pipelines/__init__.py | from .compose import Compose
from .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor,
Transpose, to_tensor)
from .loading import LoadAnnotations, LoadImageFromFile
from .test_time_aug import MultiScaleFlipAug
from .transforms import (CLAHE, AdjustGamma, Normalize, Pad,
PhotoMetricDistortion, RandomCrop, RandomFlip,
RandomRotate, Rerange, Resize, RGB2Gray, SegRescale)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',
'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',
'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate',
'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray'
]
| 813 | 46.882353 | 77 | py |
CP2 | CP2-main/mmseg/datasets/pipelines/transforms.py | import mmcv
import numpy as np
from mmcv.utils import deprecated_api_warning, is_tuple_of
from numpy import random
from ..builder import PIPELINES
@PIPELINES.register_module()
class Resize(object):
"""Resize images & seg.
This transform resizes the input image to some scale. If the input dict
contains the key "scale", then the scale in the input dict is used,
otherwise the specified scale in the init method is used.
``img_scale`` can be None, a tuple (single-scale) or a list of tuple
(multi-scale). There are 4 multiscale modes:
- ``ratio_range is not None``:
1. When img_scale is None, img_scale is the shape of image in results
(img_scale = results['img'].shape[:2]) and the image is resized based
on the original size. (mode 1)
2. When img_scale is a tuple (single-scale), randomly sample a ratio from
the ratio range and multiply it with the image scale. (mode 2)
- ``ratio_range is None and multiscale_mode == "range"``: randomly sample a
scale from the a range. (mode 3)
- ``ratio_range is None and multiscale_mode == "value"``: randomly sample a
scale from multiple scales. (mode 4)
Args:
img_scale (tuple or list[tuple]): Images scales for resizing.
multiscale_mode (str): Either "range" or "value".
ratio_range (tuple[float]): (min_ratio, max_ratio)
keep_ratio (bool): Whether to keep the aspect ratio when resizing the
image.
"""
def __init__(self,
img_scale=None,
multiscale_mode='range',
ratio_range=None,
keep_ratio=True):
if img_scale is None:
self.img_scale = None
else:
if isinstance(img_scale, list):
self.img_scale = img_scale
else:
self.img_scale = [img_scale]
assert mmcv.is_list_of(self.img_scale, tuple)
if ratio_range is not None:
# mode 1: given img_scale=None and a range of image ratio
# mode 2: given a scale and a range of image ratio
assert self.img_scale is None or len(self.img_scale) == 1
else:
# mode 3 and 4: given multiple scales or a range of scales
assert multiscale_mode in ['value', 'range']
self.multiscale_mode = multiscale_mode
self.ratio_range = ratio_range
self.keep_ratio = keep_ratio
@staticmethod
def random_select(img_scales):
"""Randomly select an img_scale from given candidates.
Args:
img_scales (list[tuple]): Images scales for selection.
Returns:
(tuple, int): Returns a tuple ``(img_scale, scale_dix)``,
where ``img_scale`` is the selected image scale and
``scale_idx`` is the selected index in the given candidates.
"""
assert mmcv.is_list_of(img_scales, tuple)
scale_idx = np.random.randint(len(img_scales))
img_scale = img_scales[scale_idx]
return img_scale, scale_idx
@staticmethod
def random_sample(img_scales):
"""Randomly sample an img_scale when ``multiscale_mode=='range'``.
Args:
img_scales (list[tuple]): Images scale range for sampling.
There must be two tuples in img_scales, which specify the lower
and upper bound of image scales.
Returns:
(tuple, None): Returns a tuple ``(img_scale, None)``, where
``img_scale`` is sampled scale and None is just a placeholder
to be consistent with :func:`random_select`.
"""
assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
img_scale_long = [max(s) for s in img_scales]
img_scale_short = [min(s) for s in img_scales]
long_edge = np.random.randint(
min(img_scale_long),
max(img_scale_long) + 1)
short_edge = np.random.randint(
min(img_scale_short),
max(img_scale_short) + 1)
img_scale = (long_edge, short_edge)
return img_scale, None
@staticmethod
def random_sample_ratio(img_scale, ratio_range):
"""Randomly sample an img_scale when ``ratio_range`` is specified.
A ratio will be randomly sampled from the range specified by
``ratio_range``. Then it would be multiplied with ``img_scale`` to
generate sampled scale.
Args:
img_scale (tuple): Images scale base to multiply with ratio.
ratio_range (tuple[float]): The minimum and maximum ratio to scale
the ``img_scale``.
Returns:
(tuple, None): Returns a tuple ``(scale, None)``, where
``scale`` is sampled ratio multiplied with ``img_scale`` and
None is just a placeholder to be consistent with
:func:`random_select`.
"""
assert isinstance(img_scale, tuple) and len(img_scale) == 2
min_ratio, max_ratio = ratio_range
assert min_ratio <= max_ratio
ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
return scale, None
def _random_scale(self, results):
"""Randomly sample an img_scale according to ``ratio_range`` and
``multiscale_mode``.
If ``ratio_range`` is specified, a ratio will be sampled and be
multiplied with ``img_scale``.
If multiple scales are specified by ``img_scale``, a scale will be
sampled according to ``multiscale_mode``.
Otherwise, single scale will be used.
Args:
results (dict): Result dict from :obj:`dataset`.
Returns:
dict: Two new keys 'scale` and 'scale_idx` are added into
``results``, which would be used by subsequent pipelines.
"""
if self.ratio_range is not None:
if self.img_scale is None:
h, w = results['img'].shape[:2]
scale, scale_idx = self.random_sample_ratio((w, h),
self.ratio_range)
else:
scale, scale_idx = self.random_sample_ratio(
self.img_scale[0], self.ratio_range)
elif len(self.img_scale) == 1:
scale, scale_idx = self.img_scale[0], 0
elif self.multiscale_mode == 'range':
scale, scale_idx = self.random_sample(self.img_scale)
elif self.multiscale_mode == 'value':
scale, scale_idx = self.random_select(self.img_scale)
else:
raise NotImplementedError
results['scale'] = scale
results['scale_idx'] = scale_idx
def _resize_img(self, results):
"""Resize images with ``results['scale']``."""
if self.keep_ratio:
img, scale_factor = mmcv.imrescale(
results['img'], results['scale'], return_scale=True)
# the w_scale and h_scale has minor difference
# a real fix should be done in the mmcv.imrescale in the future
new_h, new_w = img.shape[:2]
h, w = results['img'].shape[:2]
w_scale = new_w / w
h_scale = new_h / h
else:
img, w_scale, h_scale = mmcv.imresize(
results['img'], results['scale'], return_scale=True)
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
dtype=np.float32)
results['img'] = img
results['img_shape'] = img.shape
results['pad_shape'] = img.shape # in case that there is no padding
results['scale_factor'] = scale_factor
results['keep_ratio'] = self.keep_ratio
def _resize_seg(self, results):
"""Resize semantic segmentation map with ``results['scale']``."""
for key in results.get('seg_fields', []):
if self.keep_ratio:
gt_seg = mmcv.imrescale(
results[key], results['scale'], interpolation='nearest')
else:
gt_seg = mmcv.imresize(
results[key], results['scale'], interpolation='nearest')
results[key] = gt_seg
def __call__(self, results):
"""Call function to resize images, bounding boxes, masks, semantic
segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor',
'keep_ratio' keys are added into result dict.
"""
if 'scale' not in results:
self._random_scale(results)
self._resize_img(results)
self._resize_seg(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(img_scale={self.img_scale}, '
f'multiscale_mode={self.multiscale_mode}, '
f'ratio_range={self.ratio_range}, '
f'keep_ratio={self.keep_ratio})')
return repr_str
@PIPELINES.register_module()
class RandomFlip(object):
"""Flip the image & seg.
If the input dict contains the key "flip", then the flag will be used,
otherwise it will be randomly decided by a ratio specified in the init
method.
Args:
prob (float, optional): The flipping probability. Default: None.
direction(str, optional): The flipping direction. Options are
'horizontal' and 'vertical'. Default: 'horizontal'.
"""
@deprecated_api_warning({'flip_ratio': 'prob'}, cls_name='RandomFlip')
def __init__(self, prob=None, direction='horizontal'):
self.prob = prob
self.direction = direction
if prob is not None:
assert prob >= 0 and prob <= 1
assert direction in ['horizontal', 'vertical']
def __call__(self, results):
"""Call function to flip bounding boxes, masks, semantic segmentation
maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Flipped results, 'flip', 'flip_direction' keys are added into
result dict.
"""
if 'flip' not in results:
flip = True if np.random.rand() < self.prob else False
results['flip'] = flip
if 'flip_direction' not in results:
results['flip_direction'] = self.direction
if results['flip']:
# flip image
results['img'] = mmcv.imflip(
results['img'], direction=results['flip_direction'])
# flip segs
for key in results.get('seg_fields', []):
# use copy() to make numpy stride positive
results[key] = mmcv.imflip(
results[key], direction=results['flip_direction']).copy()
return results
def __repr__(self):
return self.__class__.__name__ + f'(prob={self.prob})'
@PIPELINES.register_module()
class Pad(object):
"""Pad the image & mask.
There are two padding modes: (1) pad to a fixed size and (2) pad to the
minimum size that is divisible by some number.
Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor",
Args:
size (tuple, optional): Fixed padding size.
size_divisor (int, optional): The divisor of padded size.
pad_val (float, optional): Padding value. Default: 0.
seg_pad_val (float, optional): Padding value of segmentation map.
Default: 255.
"""
def __init__(self,
size=None,
size_divisor=None,
pad_val=0,
seg_pad_val=255):
self.size = size
self.size_divisor = size_divisor
self.pad_val = pad_val
self.seg_pad_val = seg_pad_val
# only one of size and size_divisor should be valid
assert size is not None or size_divisor is not None
assert size is None or size_divisor is None
def _pad_img(self, results):
"""Pad images according to ``self.size``."""
if self.size is not None:
padded_img = mmcv.impad(
results['img'], shape=self.size, pad_val=self.pad_val)
elif self.size_divisor is not None:
padded_img = mmcv.impad_to_multiple(
results['img'], self.size_divisor, pad_val=self.pad_val)
results['img'] = padded_img
results['pad_shape'] = padded_img.shape
results['pad_fixed_size'] = self.size
results['pad_size_divisor'] = self.size_divisor
def _pad_seg(self, results):
"""Pad masks according to ``results['pad_shape']``."""
for key in results.get('seg_fields', []):
results[key] = mmcv.impad(
results[key],
shape=results['pad_shape'][:2],
pad_val=self.seg_pad_val)
def __call__(self, results):
"""Call function to pad images, masks, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Updated result dict.
"""
self._pad_img(results)
self._pad_seg(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(size={self.size}, size_divisor={self.size_divisor}, ' \
f'pad_val={self.pad_val})'
return repr_str
@PIPELINES.register_module()
class Normalize(object):
"""Normalize the image.
Added key is "img_norm_cfg".
Args:
mean (sequence): Mean values of 3 channels.
std (sequence): Std values of 3 channels.
to_rgb (bool): Whether to convert the image from BGR to RGB,
default is true.
"""
def __init__(self, mean, std, to_rgb=True):
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_rgb = to_rgb
def __call__(self, results):
"""Call function to normalize images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Normalized results, 'img_norm_cfg' key is added into
result dict.
"""
results['img'] = mmcv.imnormalize(results['img'], self.mean, self.std,
self.to_rgb)
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_rgb=self.to_rgb)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(mean={self.mean}, std={self.std}, to_rgb=' \
f'{self.to_rgb})'
return repr_str
@PIPELINES.register_module()
class Rerange(object):
"""Rerange the image pixel value.
Args:
min_value (float or int): Minimum value of the reranged image.
Default: 0.
max_value (float or int): Maximum value of the reranged image.
Default: 255.
"""
def __init__(self, min_value=0, max_value=255):
assert isinstance(min_value, float) or isinstance(min_value, int)
assert isinstance(max_value, float) or isinstance(max_value, int)
assert min_value < max_value
self.min_value = min_value
self.max_value = max_value
def __call__(self, results):
"""Call function to rerange images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Reranged results.
"""
img = results['img']
img_min_value = np.min(img)
img_max_value = np.max(img)
assert img_min_value < img_max_value
# rerange to [0, 1]
img = (img - img_min_value) / (img_max_value - img_min_value)
# rerange to [min_value, max_value]
img = img * (self.max_value - self.min_value) + self.min_value
results['img'] = img
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(min_value={self.min_value}, max_value={self.max_value})'
return repr_str
@PIPELINES.register_module()
class CLAHE(object):
"""Use CLAHE method to process the image.
See `ZUIDERVELD,K. Contrast Limited Adaptive Histogram Equalization[J].
Graphics Gems, 1994:474-485.` for more information.
Args:
clip_limit (float): Threshold for contrast limiting. Default: 40.0.
tile_grid_size (tuple[int]): Size of grid for histogram equalization.
Input image will be divided into equally sized rectangular tiles.
It defines the number of tiles in row and column. Default: (8, 8).
"""
def __init__(self, clip_limit=40.0, tile_grid_size=(8, 8)):
assert isinstance(clip_limit, (float, int))
self.clip_limit = clip_limit
assert is_tuple_of(tile_grid_size, int)
assert len(tile_grid_size) == 2
self.tile_grid_size = tile_grid_size
def __call__(self, results):
"""Call function to Use CLAHE method process images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Processed results.
"""
for i in range(results['img'].shape[2]):
results['img'][:, :, i] = mmcv.clahe(
np.array(results['img'][:, :, i], dtype=np.uint8),
self.clip_limit, self.tile_grid_size)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(clip_limit={self.clip_limit}, '\
f'tile_grid_size={self.tile_grid_size})'
return repr_str
@PIPELINES.register_module()
class RandomCrop(object):
"""Random crop the image & seg.
Args:
crop_size (tuple): Expected size after cropping, (h, w).
cat_max_ratio (float): The maximum ratio that single category could
occupy.
"""
def __init__(self, crop_size, cat_max_ratio=1., ignore_index=255):
assert crop_size[0] > 0 and crop_size[1] > 0
self.crop_size = crop_size
self.cat_max_ratio = cat_max_ratio
self.ignore_index = ignore_index
def get_crop_bbox(self, img):
"""Randomly get a crop bounding box."""
margin_h = max(img.shape[0] - self.crop_size[0], 0)
margin_w = max(img.shape[1] - self.crop_size[1], 0)
offset_h = np.random.randint(0, margin_h + 1)
offset_w = np.random.randint(0, margin_w + 1)
crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0]
crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1]
return crop_y1, crop_y2, crop_x1, crop_x2
def crop(self, img, crop_bbox):
"""Crop from ``img``"""
crop_y1, crop_y2, crop_x1, crop_x2 = crop_bbox
img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
return img
def __call__(self, results):
"""Call function to randomly crop images, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Randomly cropped results, 'img_shape' key in result dict is
updated according to crop size.
"""
img = results['img']
crop_bbox = self.get_crop_bbox(img)
if self.cat_max_ratio < 1.:
# Repeat 10 times
for _ in range(10):
seg_temp = self.crop(results['gt_semantic_seg'], crop_bbox)
labels, cnt = np.unique(seg_temp, return_counts=True)
cnt = cnt[labels != self.ignore_index]
if len(cnt) > 1 and np.max(cnt) / np.sum(
cnt) < self.cat_max_ratio:
break
crop_bbox = self.get_crop_bbox(img)
# crop the image
img = self.crop(img, crop_bbox)
img_shape = img.shape
results['img'] = img
results['img_shape'] = img_shape
# crop semantic seg
for key in results.get('seg_fields', []):
results[key] = self.crop(results[key], crop_bbox)
return results
def __repr__(self):
return self.__class__.__name__ + f'(crop_size={self.crop_size})'
@PIPELINES.register_module()
class RandomRotate(object):
"""Rotate the image & seg.
Args:
prob (float): The rotation probability.
degree (float, tuple[float]): Range of degrees to select from. If
degree is a number instead of tuple like (min, max),
the range of degree will be (``-degree``, ``+degree``)
pad_val (float, optional): Padding value of image. Default: 0.
seg_pad_val (float, optional): Padding value of segmentation map.
Default: 255.
center (tuple[float], optional): Center point (w, h) of the rotation in
the source image. If not specified, the center of the image will be
used. Default: None.
auto_bound (bool): Whether to adjust the image size to cover the whole
rotated image. Default: False
"""
def __init__(self,
prob,
degree,
pad_val=0,
seg_pad_val=255,
center=None,
auto_bound=False):
self.prob = prob
assert prob >= 0 and prob <= 1
if isinstance(degree, (float, int)):
assert degree > 0, f'degree {degree} should be positive'
self.degree = (-degree, degree)
else:
self.degree = degree
assert len(self.degree) == 2, f'degree {self.degree} should be a ' \
f'tuple of (min, max)'
self.pal_val = pad_val
self.seg_pad_val = seg_pad_val
self.center = center
self.auto_bound = auto_bound
def __call__(self, results):
"""Call function to rotate image, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Rotated results.
"""
rotate = True if np.random.rand() < self.prob else False
degree = np.random.uniform(min(*self.degree), max(*self.degree))
if rotate:
# rotate image
results['img'] = mmcv.imrotate(
results['img'],
angle=degree,
border_value=self.pal_val,
center=self.center,
auto_bound=self.auto_bound)
# rotate segs
for key in results.get('seg_fields', []):
results[key] = mmcv.imrotate(
results[key],
angle=degree,
border_value=self.seg_pad_val,
center=self.center,
auto_bound=self.auto_bound,
interpolation='nearest')
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(prob={self.prob}, ' \
f'degree={self.degree}, ' \
f'pad_val={self.pal_val}, ' \
f'seg_pad_val={self.seg_pad_val}, ' \
f'center={self.center}, ' \
f'auto_bound={self.auto_bound})'
return repr_str
@PIPELINES.register_module()
class RGB2Gray(object):
"""Convert RGB image to grayscale image.
This transform calculate the weighted mean of input image channels with
``weights`` and then expand the channels to ``out_channels``. When
``out_channels`` is None, the number of output channels is the same as
input channels.
Args:
out_channels (int): Expected number of output channels after
transforming. Default: None.
weights (tuple[float]): The weights to calculate the weighted mean.
Default: (0.299, 0.587, 0.114).
"""
def __init__(self, out_channels=None, weights=(0.299, 0.587, 0.114)):
assert out_channels is None or out_channels > 0
self.out_channels = out_channels
assert isinstance(weights, tuple)
for item in weights:
assert isinstance(item, (float, int))
self.weights = weights
def __call__(self, results):
"""Call function to convert RGB image to grayscale image.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with grayscale image.
"""
img = results['img']
assert len(img.shape) == 3
assert img.shape[2] == len(self.weights)
weights = np.array(self.weights).reshape((1, 1, -1))
img = (img * weights).sum(2, keepdims=True)
if self.out_channels is None:
img = img.repeat(weights.shape[2], axis=2)
else:
img = img.repeat(self.out_channels, axis=2)
results['img'] = img
results['img_shape'] = img.shape
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(out_channels={self.out_channels}, ' \
f'weights={self.weights})'
return repr_str
@PIPELINES.register_module()
class AdjustGamma(object):
"""Using gamma correction to process the image.
Args:
gamma (float or int): Gamma value used in gamma correction.
Default: 1.0.
"""
def __init__(self, gamma=1.0):
assert isinstance(gamma, float) or isinstance(gamma, int)
assert gamma > 0
self.gamma = gamma
inv_gamma = 1.0 / gamma
self.table = np.array([(i / 255.0)**inv_gamma * 255
for i in np.arange(256)]).astype('uint8')
def __call__(self, results):
"""Call function to process the image with gamma correction.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Processed results.
"""
results['img'] = mmcv.lut_transform(
np.array(results['img'], dtype=np.uint8), self.table)
return results
def __repr__(self):
return self.__class__.__name__ + f'(gamma={self.gamma})'
@PIPELINES.register_module()
class SegRescale(object):
"""Rescale semantic segmentation maps.
Args:
scale_factor (float): The scale factor of the final output.
"""
def __init__(self, scale_factor=1):
self.scale_factor = scale_factor
def __call__(self, results):
"""Call function to scale the semantic segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with semantic segmentation map scaled.
"""
for key in results.get('seg_fields', []):
if self.scale_factor != 1:
results[key] = mmcv.imrescale(
results[key], self.scale_factor, interpolation='nearest')
return results
def __repr__(self):
return self.__class__.__name__ + f'(scale_factor={self.scale_factor})'
@PIPELINES.register_module()
class PhotoMetricDistortion(object):
"""Apply photometric distortion to image sequentially, every transformation
is applied with a probability of 0.5. The position of random contrast is in
second or second to last.
1. random brightness
2. random contrast (mode 0)
3. convert color from BGR to HSV
4. random saturation
5. random hue
6. convert color from HSV to BGR
7. random contrast (mode 1)
Args:
brightness_delta (int): delta of brightness.
contrast_range (tuple): range of contrast.
saturation_range (tuple): range of saturation.
hue_delta (int): delta of hue.
"""
def __init__(self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18):
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
def convert(self, img, alpha=1, beta=0):
"""Multiple with alpha and add beat with clip."""
img = img.astype(np.float32) * alpha + beta
img = np.clip(img, 0, 255)
return img.astype(np.uint8)
def brightness(self, img):
"""Brightness distortion."""
if random.randint(2):
return self.convert(
img,
beta=random.uniform(-self.brightness_delta,
self.brightness_delta))
return img
def contrast(self, img):
"""Contrast distortion."""
if random.randint(2):
return self.convert(
img,
alpha=random.uniform(self.contrast_lower, self.contrast_upper))
return img
def saturation(self, img):
"""Saturation distortion."""
if random.randint(2):
img = mmcv.bgr2hsv(img)
img[:, :, 1] = self.convert(
img[:, :, 1],
alpha=random.uniform(self.saturation_lower,
self.saturation_upper))
img = mmcv.hsv2bgr(img)
return img
def hue(self, img):
"""Hue distortion."""
if random.randint(2):
img = mmcv.bgr2hsv(img)
img[:, :,
0] = (img[:, :, 0].astype(int) +
random.randint(-self.hue_delta, self.hue_delta)) % 180
img = mmcv.hsv2bgr(img)
return img
def __call__(self, results):
"""Call function to perform photometric distortion on images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images distorted.
"""
img = results['img']
# random brightness
img = self.brightness(img)
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = random.randint(2)
if mode == 1:
img = self.contrast(img)
# random saturation
img = self.saturation(img)
# random hue
img = self.hue(img)
# random contrast
if mode == 0:
img = self.contrast(img)
results['img'] = img
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(brightness_delta={self.brightness_delta}, '
f'contrast_range=({self.contrast_lower}, '
f'{self.contrast_upper}), '
f'saturation_range=({self.saturation_lower}, '
f'{self.saturation_upper}), '
f'hue_delta={self.hue_delta})')
return repr_str
| 30,945 | 33.770787 | 79 | py |
CP2 | CP2-main/mmseg/datasets/pipelines/test_time_aug.py | import warnings
import mmcv
from ..builder import PIPELINES
from .compose import Compose
@PIPELINES.register_module()
class MultiScaleFlipAug(object):
"""Test-time augmentation with multiple scales and flipping.
An example configuration is as followed:
.. code-block::
img_scale=(2048, 1024),
img_ratios=[0.5, 1.0],
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
]
After MultiScaleFLipAug with above configuration, the results are wrapped
into lists of the same length as followed:
.. code-block::
dict(
img=[...],
img_shape=[...],
scale=[(1024, 512), (1024, 512), (2048, 1024), (2048, 1024)]
flip=[False, True, False, True]
...
)
Args:
transforms (list[dict]): Transforms to apply in each augmentation.
img_scale (None | tuple | list[tuple]): Images scales for resizing.
img_ratios (float | list[float]): Image ratios for resizing
flip (bool): Whether apply flip augmentation. Default: False.
flip_direction (str | list[str]): Flip augmentation directions,
options are "horizontal" and "vertical". If flip_direction is list,
multiple flip augmentations will be applied.
It has no effect when flip == False. Default: "horizontal".
"""
def __init__(self,
transforms,
img_scale,
img_ratios=None,
flip=False,
flip_direction='horizontal'):
self.transforms = Compose(transforms)
if img_ratios is not None:
img_ratios = img_ratios if isinstance(img_ratios,
list) else [img_ratios]
assert mmcv.is_list_of(img_ratios, float)
if img_scale is None:
# mode 1: given img_scale=None and a range of image ratio
self.img_scale = None
assert mmcv.is_list_of(img_ratios, float)
elif isinstance(img_scale, tuple) and mmcv.is_list_of(
img_ratios, float):
assert len(img_scale) == 2
# mode 2: given a scale and a range of image ratio
self.img_scale = [(int(img_scale[0] * ratio),
int(img_scale[1] * ratio))
for ratio in img_ratios]
else:
# mode 3: given multiple scales
self.img_scale = img_scale if isinstance(img_scale,
list) else [img_scale]
assert mmcv.is_list_of(self.img_scale, tuple) or self.img_scale is None
self.flip = flip
self.img_ratios = img_ratios
self.flip_direction = flip_direction if isinstance(
flip_direction, list) else [flip_direction]
assert mmcv.is_list_of(self.flip_direction, str)
if not self.flip and self.flip_direction != ['horizontal']:
warnings.warn(
'flip_direction has no effect when flip is set to False')
if (self.flip
and not any([t['type'] == 'RandomFlip' for t in transforms])):
warnings.warn(
'flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
"""Call function to apply test time augment transforms on results.
Args:
results (dict): Result dict contains the data to transform.
Returns:
dict[str: list]: The augmented data, where each value is wrapped
into a list.
"""
aug_data = []
if self.img_scale is None and mmcv.is_list_of(self.img_ratios, float):
h, w = results['img'].shape[:2]
img_scale = [(int(w * ratio), int(h * ratio))
for ratio in self.img_ratios]
else:
img_scale = self.img_scale
flip_aug = [False, True] if self.flip else [False]
for scale in img_scale:
for flip in flip_aug:
for direction in self.flip_direction:
_results = results.copy()
_results['scale'] = scale
_results['flip'] = flip
_results['flip_direction'] = direction
data = self.transforms(_results)
aug_data.append(data)
# list of dict to dict of list
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for key, val in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip})'
repr_str += f'flip_direction={self.flip_direction}'
return repr_str
| 5,173 | 37.61194 | 79 | py |
CP2 | CP2-main/mmseg/utils/logger.py | import logging
from mmcv.utils import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added. The name of the root logger is the top-level package name,
e.g., "mmseg".
Args:
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
logger = get_logger(name='mmseg', log_file=log_file, log_level=log_level)
return logger
| 899 | 31.142857 | 79 | py |
CP2 | CP2-main/mmseg/utils/collect_env.py | from mmcv.utils import collect_env as collect_base_env
from mmcv.utils import get_git_hash
import mmseg
def collect_env():
"""Collect the information of the running environments."""
env_info = collect_base_env()
env_info['MMSegmentation'] = f'{mmseg.__version__}+{get_git_hash()[:7]}'
return env_info
if __name__ == '__main__':
for name, val in collect_env().items():
print('{}: {}'.format(name, val))
| 436 | 23.277778 | 76 | py |
CP2 | CP2-main/mmseg/utils/__init__.py | from .collect_env import collect_env
from .logger import get_root_logger
__all__ = ['get_root_logger', 'collect_env']
| 119 | 23 | 44 | py |
CP2 | CP2-main/mmseg/ops/wrappers.py | import warnings
import torch.nn as nn
import torch.nn.functional as F
def resize(input,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None,
warning=True):
if warning:
if size is not None and align_corners:
input_h, input_w = tuple(int(x) for x in input.shape[2:])
output_h, output_w = tuple(int(x) for x in size)
if output_h > input_h or output_w > output_h:
if ((output_h > 1 and output_w > 1 and input_h > 1
and input_w > 1) and (output_h - 1) % (input_h - 1)
and (output_w - 1) % (input_w - 1)):
warnings.warn(
f'When align_corners={align_corners}, '
'the output would more aligned if '
f'input size {(input_h, input_w)} is `x+1` and '
f'out size {(output_h, output_w)} is `nx+1`')
return F.interpolate(input, size, scale_factor, mode, align_corners)
class Upsample(nn.Module):
def __init__(self,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None):
super(Upsample, self).__init__()
self.size = size
if isinstance(scale_factor, tuple):
self.scale_factor = tuple(float(factor) for factor in scale_factor)
else:
self.scale_factor = float(scale_factor) if scale_factor else None
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
if not self.size:
size = [int(t * self.scale_factor) for t in x.shape[-2:]]
else:
size = self.size
return resize(x, size, None, self.mode, self.align_corners)
| 1,827 | 34.843137 | 79 | py |
CP2 | CP2-main/mmseg/ops/__init__.py | from .encoding import Encoding
from .wrappers import Upsample, resize
__all__ = ['Upsample', 'resize', 'Encoding']
| 116 | 22.4 | 44 | py |
CP2 | CP2-main/mmseg/ops/encoding.py | import torch
from torch import nn
from torch.nn import functional as F
class Encoding(nn.Module):
"""Encoding Layer: a learnable residual encoder.
Input is of shape (batch_size, channels, height, width).
Output is of shape (batch_size, num_codes, channels).
Args:
channels: dimension of the features or feature channels
num_codes: number of code words
"""
def __init__(self, channels, num_codes):
super(Encoding, self).__init__()
# init codewords and smoothing factor
self.channels, self.num_codes = channels, num_codes
std = 1. / ((num_codes * channels)**0.5)
# [num_codes, channels]
self.codewords = nn.Parameter(
torch.empty(num_codes, channels,
dtype=torch.float).uniform_(-std, std),
requires_grad=True)
# [num_codes]
self.scale = nn.Parameter(
torch.empty(num_codes, dtype=torch.float).uniform_(-1, 0),
requires_grad=True)
@staticmethod
def scaled_l2(x, codewords, scale):
num_codes, channels = codewords.size()
batch_size = x.size(0)
reshaped_scale = scale.view((1, 1, num_codes))
expanded_x = x.unsqueeze(2).expand(
(batch_size, x.size(1), num_codes, channels))
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
scaled_l2_norm = reshaped_scale * (
expanded_x - reshaped_codewords).pow(2).sum(dim=3)
return scaled_l2_norm
@staticmethod
def aggregate(assignment_weights, x, codewords):
num_codes, channels = codewords.size()
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
batch_size = x.size(0)
expanded_x = x.unsqueeze(2).expand(
(batch_size, x.size(1), num_codes, channels))
encoded_feat = (assignment_weights.unsqueeze(3) *
(expanded_x - reshaped_codewords)).sum(dim=1)
return encoded_feat
def forward(self, x):
assert x.dim() == 4 and x.size(1) == self.channels
# [batch_size, channels, height, width]
batch_size = x.size(0)
# [batch_size, height x width, channels]
x = x.view(batch_size, self.channels, -1).transpose(1, 2).contiguous()
# assignment_weights: [batch_size, channels, num_codes]
assignment_weights = F.softmax(
self.scaled_l2(x, self.codewords, self.scale), dim=2)
# aggregate
encoded_feat = self.aggregate(assignment_weights, x, self.codewords)
return encoded_feat
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(Nx{self.channels}xHxW =>Nx{self.num_codes}' \
f'x{self.channels})'
return repr_str
| 2,788 | 36.186667 | 78 | py |
CP2 | CP2-main/configs/config_pretrain.py | norm_cfg = dict(type='BN', requires_grad=True)
pretrain_path = None # Please set the path to pretrained weights for Quick Tuning
model = dict(
type='EncoderDecoder',
pretrained=pretrain_path,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 1, 2),
strides=(1, 2, 2, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='ASPPHead',
in_channels=2048,
in_index=3,
channels=512,
contrast=True,
dilations=(1, 6, 12, 18),
dropout_ratio=0.1,
num_classes=21,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=None,
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| 952 | 27.029412 | 84 | py |
CP2 | CP2-main/configs/config_finetune.py | # model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
pretrain_path = '' # Please set the path to pretrained model
data_root = '' # Please set the path to your finetuing dataset (PASCAL VOC 2012)
model = dict(
type='EncoderDecoder',
pretrained=pretrain_path,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 1, 2),
strides=(1, 2, 2, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='ASPPHead',
in_channels=2048,
in_index=3,
channels=512,
dilations=(1, 6, 12, 18),
dropout_ratio=0.1,
num_classes=21,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=None,
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
# dataset settings
dataset_type = 'PascalVOCDataset'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 512),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassAug',
split='ImageSets/Segmentation/train_aug.txt',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassAug',
split='ImageSets/Segmentation/val.txt',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClassAug',
split='ImageSets/Segmentation/val.txt',
pipeline=test_pipeline))
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
cudnn_benchmark = True
# optimizer
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.)
optimizer_config = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=40000)
checkpoint_config = dict(by_epoch=False, interval=4000)
evaluation = dict(interval=4000, metric='mIoU')
| 3,664 | 29.798319 | 85 | py |
NeuralIdeals | NeuralIdeals-master/examples.py | # -*- coding: utf-8 -*-
"""
Examples of NeuralCode
AUTHORS:
- Ethan Petersen (2015-09) [initial version]
This file constructs some examples of NeuralCodes.
The examples are accessible by typing: ``neuralcodes.example()``
"""
class NeuralCodeExamples():
r"""
Some examples of neuralcodes.
Here are the available examples; you can also type
``neuralcodes.`` and hit tab to get a list:
- :meth:`Canonical`
- :meth:`Factored_Canonical`
- :meth:`Groebner`
- :meth:`RF_Structure`
EXAMPLES::
sage: nc = neuralcodes.Canonical()
Ideal (x1*x2, x0*x1 + x0, x1*x2 + x1 + x2 + 1, x0*x2) of Multivariate Polynomial Ring in x0, x1, x2 over Finite Field of size 2
sage: f = neuralcodes.Factored_Canonical()
[x2 * x1, (x1 + 1) * x0, (x2 + 1) * (x1 + 1), x2 * x0]
sage: g = neuralcodes.Groebner()
Ideal (x0*x2, x1 + x2 + 1) of Multivariate Polynomial Ring in x0, x1, x2 over Finite Field of size 2
sage: rf = neuralcodes.RF_Structure()
Intersection of U_['2', '1'] is empty
Intersection of U_['0'] is a subset of Union of U_['1']
X = Union of U_['2', '1']
Intersection of U_['2', '0'] is empty
"""
def __call__(self):
r"""
If neuralcodes() is executed, return a helpful message.
INPUT:
None
OUTPUT:
None
EXAMPLES::
sage: neuralcodes()
Try neuralcodes.FOO() where FOO is in the list:
Canonical, Factored_Canonical, Groebner, RF_Structure
"""
print 'Try neuralcodes.FOO() where FOO is in the list:\n'
print " " + ", ".join([str(i) for i in dir(neuralcodes) if i[0]!='_'])
def Canonical(self):
"""
The canonical form of the ideal corresponding to ['001','010','110'].
INPUT:
None
OUTPUT:
- Ideal
EXAMPLES::
sage: s = neuralcodes.Canonical()
Ideal (x1*x2, x0*x1 + x0, x1*x2 + x1 + x2 + 1, x0*x2) of Multivariate Polynomial Ring in x0, x1, x2 over Finite Field of size 2
"""
print 'Input:\n' + 'nc = NeuralCode([\'001\',\'010\',\'110\'])\n' + 'nc.canonical()\n\n' + 'Output:'
nc = NeuralCode(['001','010','110'])
return nc.canonical()
def Factored_Canonical(self):
"""
The factored canonical form of the ideal corresponding to ['001','010','110'].
INPUT:
None
OUTPUT:
- List
EXAMPLES::
sage: s = neuralcodes.factored_canonical()
[x2 * x1, (x1 + 1) * x0, (x2 + 1) * (x1 + 1), x2 * x0]
"""
print 'Input:\n' + 'nc = NeuralCode([\'001\',\'010\',\'110\'])\n' + 'nc.factored_canonical()\n\n' + 'Output:'
nc = NeuralCode(['001','010','110'])
return nc.factored_canonical()
def Groebner(self):
"""
The groebner basis of the ideal corresponding to ['001','010','110'].
INPUT:
None
OUTPUT:
- Ideal
EXAMPLES::
sage: s = neuralcodes.groebner_basis()
[x2 * x1, (x1 + 1) * x0, (x2 + 1) * (x1 + 1), x2 * x0]
"""
print 'Input:\n' + 'nc = NeuralCode([\'001\',\'010\',\'110\'])\n' + 'nc.groebner_basis()\n\n' + 'Output:'
nc = NeuralCode(['001','010','110'])
return nc.groebner_basis()
def RF_Structure(self):
"""
The RF structure corresponding to ['001','010','110'].
INPUT:
None
OUTPUT:
None
EXAMPLES::
sage: s = neuralcodes.canonical_RF_structure()
Intersection of U_['2', '1'] is empty
Intersection of U_['0'] is a subset of Union of U_['1']
X = Union of U_['2', '1']
Intersection of U_['2', '0'] is empty
"""
print 'Input:\n' + 'nc = NeuralCode([\'001\',\'010\',\'110\'])\n' + 'nc.canonical_RF_structure()\n\n' + 'Output:'
nc = NeuralCode(['001','010','110'])
return nc.canonical_RF_structure()
neuralcodes = NeuralCodeExamples()
| 4,237 | 12.80456 | 139 | py |
NeuralIdeals | NeuralIdeals-master/neuralcode.py | import itertools
import time
import math
from multiprocessing.pool import ThreadPool
from itertools import tee, izip
from sage.rings.polynomial import *
from sage.rings.polynomial.pbori import *
from sage.rings.ideal import *
r"""
Neural Ideals in SageMath: A package to perform computations with neural ideals associated to neural codes.
A major area in neuroscience research is the study of how the brain processes spatial information. Neurons in the brain represent external stimuli via neural codes. The neural ideal is an algebraic object that encodes the full combinatorial data of a neural code. This ideal can be expressed in a canonical form that directly translates to a minimal description of the receptive field structure intrinsic to the code. In this package we implement a novel and efficient algorithm to compute the canonical form of a neural ideal. Moreover, our package contains implementations of several other methods related to the neural ideal of a code,
including an algorithm to compute the primary decomposition of a pseudo-monomial ideal. These implementations are based on algorithms described in the following papers:
1. Curto, C., Itskov, V., Veliz-Cuba, A., Youngs, N. The Neural Ring: An Algebraic Tool for Analyzing the Intrinsic Structure of Neural Codes. Bulletin of Mathematical Biology, Volume 75, Issue 9, pp. 1571-111, 2013.
2. Ethan Petersen, Nora Youngs, Ryan Kruse, Dane Miyata, Rebecca
Garcia, Luis David Garcia Puente. Neural Ideals in SageMath. Submitted. 2016.
AUTHORS:
- Ethan Petersen (2015-09-13): initial version
- Nora Youngs (2016-09-20): v2.0
- Ryan Kruse (2015-09-13): initial version
- Dane Miyata (2015-09-13): initial version
- Rebecca Garcia (2016-09-20): v2.0
- Luis David Garcia Puente (2016-09-20): v2.0
OTHER CONTRIBUTORS:
- Ihmar Aldana (2015-09-13): initial version
EXAMPLES::
The software's main purpose is to compute canonical forms of neural ideals, and we'll begin with this example::
sage: code = NeuralCode(['001','010','110'])
sage: code.canonical()
Ideal (x1*x2, x0*x1 + x0, x1*x2 + x1 + x2 + 1, x0*x2) of Multivariate Polynomial Ring in x0, x1, x2 over Finite Field of size 2
To read RF structures, it's easiest when the generators of the canonical form are factored::
sage: code.factored_canonical()
[x2 * x1, (x1 + 1) * x0, (x2 + 1) * (x1 + 1), x2 * x0]
Or we can simply retrive the RF structure::
sage: code.canonical_RF_structure()
Intersection of U_['2', '1'] is empty
Intersection of U_['0'] is a subset of Union of U_['1']
X = Union of U_['2', '1']
Intersection of U_['2', '0'] is empty
Other methods include determining if a neural code is a simplicial code::
sage: is_simplicial(code.Codes)
False
sage: is_simplicial(['000','001','010','100','110','011','101','111'])
True
In constructing a neural code object, there are two arguments: the neural code, and an optional argument that is the term order. This term order will be used in the ring where
all member methods do computation. For example, if we change the order to 'degrevlex', groebner_basis() will compute the groebner basis with that term order.
Additionally, the canonical() method also takes optional arguments. The first will determine which algorithm will be used to compute the canonical form. Either the iterative algorithm outlined by Dr. Carina Curto and Dr. Nora Young in "Neural ring homomorphisms and maps between neural codes" will be used or their algorithm in "The Neural Ring" will be chosen. The second will determine which algorithm to use for a primary decomposition step: 'pm' will use the pseudo-monomial algorithm, 'sy' and 'gtz' will use the shimoyama-yokoyama algorithm or gianni-trager-zacharias algorithm, respectively. We also noticed that the canonical() runtime begins to rise significantly with higher dimension and more code words. To partially address this, we have parallelized portions of the algorithm used to get the canonical form. To take advantage of this, we use optional arguments::
sage: code.canonical('pm', True, 3)
Ideal (x1*x2, x0*x1 + x0, x1*x2 + x1 + x2 + 1, x0*x2) of Multivariate Polynomial Ring in x0, x1, x2 over Finite Field of size 2
The second argument is a boolean: True if we want to use parallelized portions, False if we don't. The third argument is for the number of parallel processes. There other commands that are useful for exploring neural codes:
1. generate_random_code(d) will generate a random list of codewords in "d" dimension. This is very useful for generating examples to test conjectures::
sage: generate_random_code(7)
['0001101', '1001001', '0011110', '1011101', '0001011', '0011011', '1111101', '0000010', '1011001', '0000001', '1110101', '0100110', '0000101', '0111100', '0100000', '0100011', '0010001', '1111010', '1101010', '0101011', '1000001', '1100100', '1100110']
2. all_neural_codes(d), which will return a list of all of the possible sets of code words in the specified dimension:
sage: all_neural_codes(2)
[[], ['00'], ['01'], ['10'], ['11'], ['00', '01'], ['00', '10'], ['00', '11'], ['01', '10'], ['01', '11'], ['10', '11'], ['00', '01', '10'], ['00', '01', '11'], ['00', '10', '11'], ['01', '10', '11'], ['00', '01', '10', '11']]
3. support(C) will return the support of a single codeword::
sage: support('0100011110101')
[1, 5, 6, 7, 8, 10, 12]
There is also a test suite, assert_build(), which tests whether canonical() will reproduce the results in "The Neural Ring". This method will print an error message if there is an inconsistency.
"""
#*****************************************************************************
# Copyright (C) 2013 Ethan Petersen <peterseo@rose-hulman.edu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
class NeuralCode:
r"""
Class for a neural code, associated to the neural ring.
>>> assert_build()
"""
def __init__(self, C, order='lex'):
r"""
Constructs a neural code.
The Neural Ideal vanishes at every code word in a given list of codes. Many methods have been added to compute traditional objects such as a Groebner Basis, but
utilizes the BooleanPolynomialRing and generic PolynomialRing's to account for methods not yet implemented in the Boolean Ring, but use the reductive properties (x^2 = x) of
the Boolean Ring.
INPUT:
- ``C`` -- list The list of binary strings representing
the neural code.
- ``order`` -- string (default: 'lex') the term order.
OUTPUT:
None
EXAMPLES:
sage: neural_code = NeuralCode(['000','011'])
sage: neural_code.canonical()
Ideal (x1*x2 + x1, x1*x2 + x2, x0) of Multivariate Polynomial Ring in
x0, x1, x2 over Finite Field of size 2
"""
# throws an error if an empty list was given for the codewords
if(len(C) == 0):
print "The collection of codewords must be nonempty\n"
raise RuntimeError
# temporarily store the dimension for further assertions
dimension = len(C[0])
# throws an error if the collection of code words is not a list
if "list" not in str(type(C)):
print "The collection of codewords must be a list: \n" + "Input: " + str(C) + "\nCorrect example: C = ['001','010'] \n"
raise TypeError
# throws an error if the elements of the list are not strings
for i in range(len(C)):
if not "0" in C[i] and not "1" in C[i]:
print "Each code word must be comprised of 0's or 1's\n"
raise TypeError
if(len(C[i]) != dimension):
print "Each code word must be of the same length, the dimension: " + str(dimension) + "\n"
raise TypeError
if 'str' not in str(type(C[i])):
print "Code words must be strings: \n" + "Input: " + str(C[i]) + "\n" + "Correct example: C[0] = '001'\n"
raise TypeError
self.Codes = C
self.d = dimension
self.F = PolynomialRing(GF(2), dimension, "x", order=order)
self.x = self.F.gens()
self.Boolean_Ring = BooleanPolynomialRing(dimension, "x", order=order)
self.b = self.Boolean_Ring.gens()
self.V = ["".join(x) for x in itertools.product("01", repeat=dimension)] # generates the possible configurations of code words
for i in range(len(C)):
if C[i] in self.V:
self.V.remove(C[i])
def neural_ideal(self):
r"""
Fetches the neural ideal.
INPUT:
None
OUTPUT:
An ideal in the integer mod 2 ring.
EXAMPLES:
sage: neural_code = NeuralCode(['000','011'])
sage: neural_code.neural_ideal()
Ideal (x0*x1*x2 + x0*x2 + x1*x2 + x2, x0*x1*x2 + x0*x1 + x1*x2 + x1, x0*x1*x2 + x0*x1 + x0*x2 + x0, x0*x1*x2 + x0*x2, x0*x1*x2 + x0*x1, x0*x1*x2) of Multivariate Polynomial Ring in x0, x1, x2 over Finite Field of size 2
TESTS:
>>> neural_code = NeuralCode(['000','011'])
>>> neural_code.neural_ideal()
Ideal (x0*x1*x2 + x0*x2 + x1*x2 + x2, x0*x1*x2 + x0*x1 + x1*x2 + x1, x0*x1*x2 + x0*x1 + x0*x2 + x0, x0*x1*x2 + x0*x2, x0*x1*x2 + x0*x1, x0*x1*x2) of Multivariate Polynomial Ring in x0, x1, x2 over Finite Field of size 2
"""
# rho will represent the collection of rho of v that will generate the ideal
rho = []
# iterate through the points in V
for i in range(len(self.V)):
v = self.V[i]
product = 1
# calculate rho of v for the particular point
for j in range(len(v)):
product = product * (1 - Integer(v[j]) - self.x[j])
rho.append(product)
return self.F.ideal(rho)
def _decomposition_product(self, decomp):
r"""
Computes the cartesian product of the ideals in the primary decomposition of the neural ideal.
INPUT:
- ``decomp`` -- list The list of ideals
OUTPUT:
The product as a list of ideals.
"""
product = 1
for i in range(len(decomp)):
product = product * decomp[i]
return product
def groebner_fan(self):
r"""
Returns the groebner fan, reduced to the Boolean Ring.
INPUT:
None
OUTPUT:
The groebner fan as a list of ideals.
EXAMPLES:
sage: nc = NeuralCode(['00','01'])
sage: nc.groebner_fan()
[Ideal (x0) of Multivariate Polynomial Ring in x0, x1 over Finite Field of size 2]
TESTS:
>>> nc = NeuralCode(['00','01'])
>>> nc.groebner_fan()
[Ideal (x0) of Multivariate Polynomial Ring in x0, x1 over Finite Field of size 2]
"""
if len(self.Codes) == 2 ** self.d:
return [0]
gf = self.neural_ideal().groebner_fan()
original_bases = gf.reduced_groebner_bases()
reduced_bases = []
# iterates through the elements in the reduced groebner bases, maps into boolean ring, then places nonzero elements back into the original ring
for i in range(len(original_bases)):
temp = []
for j in range(len(original_bases[i])):
temp_element = self.Boolean_Ring(original_bases[i][j])
if(temp_element != 0):
temp.append(temp_element)
if len(temp) != 0:
if temp not in reduced_bases:
reduced_bases.append(Set(temp).list())
for i in range(len(reduced_bases)):
reduced_bases[i] = self.F.ideal(reduced_bases[i])
return Set(reduced_bases).list()
def universal_groebner_basis(self):
r"""
Returns the universal groebner basis.
Takes the reduced groebner bases, maps their elements to the Boolean Ring, and places all unique elements into a list.
This list then constructs an ideal in the polynomial ring mod 2, which is returned.
INPUT:
None
OUTPUT:
The reduced groebner basis as an ideal.
EXAMPLES:
sage: nc = NeuralCode(['001','010','110','001'])
sage: nc.universal_groebner_basis()
Ideal (x1 + x2 + 1, x0*x1 + x0, x0*x2) of Multivariate Polynomial Ring in x0, x1, x2 over Finite Field of size 2
TESTS:
>>> nc = NeuralCode(['001','010','110','001'])
>>> nc.universal_groebner_basis()
Ideal (x1 + x2 + 1, x0*x1 + x0, x0*x2) of Multivariate Polynomial Ring in x0, x1, x2 over Finite Field of size 2
"""
if len(self.Codes) == 2 ** self.d:
return [0]
gf = self.neural_ideal().groebner_fan()
original_bases = gf.reduced_groebner_bases()
universal = []
# iterates through the elements in the reduced groebner bases, maps into boolean ring, then places nonzero elements back into the original ring
for i in range(len(original_bases)):
for j in range(len(original_bases[i])):
temp_element = self.Boolean_Ring(original_bases[i][j])
if(temp_element != 0):
if temp_element not in universal:
universal.append(temp_element)
return self.F.ideal(Set(universal).list())
def _parse_decomposition(self, decomp):
r"""Returns the prime ideals that make up the given decomposition."""
primes = []
for i in range(len(decomp)):
primes.append(self.F.ideal(decomp[i].gens()))
return primes
def _booleanIdealReduce(self, M):
r"""Maps the generators of a given ideal into the Boolean Ring and returns the ideal of the remaining nonzero generators."""
original = M.gens()
reduced = []
for i in range(len(original)):
temp = self.Boolean_Ring(original[i])
if(temp != 0):
reduced.append(temp)
return self.F.ideal(reduced)
def _booleanRingReduce(self, M):
r"""Maps the generators of a given ideal into the Boolean Ring and returns the ideal of the remaining nonzero generators."""
original = M
reduced = []
for i in range(len(original)):
temp = self.Boolean_Ring(original[i])
if(temp != 0):
reduced.append(temp)
return reduced
def _reduce(self, reduced_decomp_product):
r"""Returns an ideal whose generators are not multiples of each other. """
bases = Set(reduced_decomp_product.gens())
reduced_bases = []
for i in range(len(bases)):
is_divisible = false
for j in range(len(bases)):
r = bases[i].quo_rem(bases[j])
if (r[1] == 0 and i != j):
is_divisible = true
break
if(is_divisible == false):
reduced_bases.append(bases[i])
return self.F.ideal(reduced_bases)
def factored_canonical(self, algorithm = "iterative", decomposition_algorithm="gtz"):
r"""
Returns the canonical form of the neural ideal, where the generators are factored.
INPUT:
None
OUTPUT:
The canonical form in which its generators are factored as an
EXAMPLES:
sage: nc = NeuralCode(['0010','0110','1001','1010','1111'])
sage: nc.factored_canonical()
[(x2 + 1) * x1, (x3 + 1) * (x2 + 1), x3 * (x0 + 1), (x3 + 1) * x1 * x0, (x2 + 1) * (x0 + 1), x3 * x2 * (x1 + 1)]
TESTS:
>>> nc = NeuralCode(['0010','0110','1001','1010','1111'])
>>> nc.factored_canonical(algorithm="usual")
[(x2 + 1) * x1, (x3 + 1) * (x2 + 1), x3 * (x0 + 1), (x3 + 1) * x1 * x0, (x2 + 1) * (x0 + 1), x3 * x2 * (x1 + 1)]
"""
a=[]
if(self.canonical() == "Empty"):
return "Empty"
m=self.canonical(algorithm, decomposition_algorithm).gens()
for i in range(len(m)):
a.append(m[i].factor())
return a
def canonical(self, algorithm = "iterative", decomposition_algorithm = "pm", threading=False, threads = 2):
r"""
Return the canonical form of the neural code's ideal.
INPUT:
- ``algorithm`` -- string (default: 'iterative') the algorithm to be used for computing the canonical form
- ``decomposition_algorithm`` -- string (default: 'pm') the algorithm to be used for primary decomposition
- ``threading`` -- boolean (default: False) indicator to use threading for large computations
- ``threads`` -- integer (default: 2) the number of threads to use
OUTPUT:
The canonical form of the neural code as an ideal.
EXAMPLES:
sage: C = ['000111','101010','111000','111001','100110']
sage: nr = NeuralCode(C)
sage: nr.canonical()
Ideal (x0*x1 + x1, x1*x2*x5 + x2*x5, x1*x3, x2*x3 + x2 + x3 + 1, x0*x2 + x2, x0*x3*x5, x3*x4*x5 + x4*x5, x2*x3, x0*x4*x5, x2*x4 + x2 + x4 + 1, x1*x3*x5 + x1*x5 + x3*x5 + x5, x0*x4 + x0 + x4 + 1, x1*x2 + x1, x1*x4 + x1 + x4 + 1, x1*x4, x0*x1*x5 + x0*x5, x0*x2*x5 + x0*x5, x3*x4 + x3, x0*x3 + x0 + x3 + 1, x0*x5 + x0 + x5 + 1, x2*x4*x5) of Multivariate Polynomial Ring in x0, x1, x2, x3, x4, x5 over Finite Field of size 2
.. NOTE::
For very large computations (dimension greater than 8), it may be faster to use threading.
TESTS:
>>> C = ['000111','101010','111000','111001','100110']
>>> nr = NeuralCode(C)
>>> expected = nr.canonical(algorithm="usual", decomposition_algorithm="gtz")
>>> iterative_canonical = nr.canonical(algorithm="iterative")
>>> expected == iterative_canonical
True
"""
if (algorithm == "iterative"):
return iterate_canonical(self.Codes, self.F)
else:
# condition for when Groebner = Canonical
if len(self.Codes) == 1 or len(self.Codes) == (2**self.d - 1) or is_simplicial(self.Codes):
return self.groebner_basis()
# calculates an ideal in the traditional way, using the union of varieties
if (len(self.Codes) == 2 ** self.d):
return "Empty"
if self.d >= 8 and len(self.Codes) < 2**(self.d - 2):
j_c = self.traditional_neural_ideal()
else:
j_c = self.neural_ideal()
# recover the primary decomposition of the ideal J
if (decomposition_algorithm == "pm"):
primes = pm_primary_decomposition(j_c)
else:
primes = j_c.primary_decomposition(decomposition_algorithm)
# gets the list of decomposition polynomials
decomp = self._parse_decomposition(primes)
# compute the product of the decomposition ideals
M = self._decomposition_product(decomp)
########## Method using threading #########
if threading:
# splits the list of products into one for each thread
splitM = list(self._chunks(M, int(math.ceil(len(M.gens()) / threads))))
# initializes a thread pool
pool = ThreadPool(processes=threads)
# this list will hold all of the threads
async = []
# iterate through and start the threads
for i in range(threads):
async.append(pool.apply_async(self._booleanRingReduce, ([splitM[i]])))
# list to hold the final reduced polynomials
whole = []
# get all of the sublists and concatenate them
for j in range(threads):
whole = whole + async[j].get()
# create the reduced ideal
threadBooleanReduceM = self.F.ideal(whole)
if(len(threadBooleanReduceM.gens()) == 1 and threadBooleanReduceM.gens()[0] == 0):
return "Empty"
# take out multiples
canonicalForm = self._reduce(threadBooleanReduceM)
return canonicalForm
###########################################
### Method using sequential computation ###
# using Boolean Ring to reduce M
booleanReduceM = self._booleanIdealReduce(M)
# case for the zero ideal
if(len(booleanReduceM.gens()) == 1 and booleanReduceM.gens()[0] == 0):
return "Empty"
# reduce again by taking out all generators that are multiples of each other and return
canonicalForm = self._reduce(booleanReduceM)
return canonicalForm
def traditional_neural_ideal(self):
r"""
Constructs the neural ideal by using the union of varieties.
INPUT:
None
OUTPUT:
The neural ideal.
EXAMPLES:
sage: C = ['11','10','00','01']
sage: nr = NeuralCode(C)
sage: nr.traditional_neural_ideal()
Ideal (x0^4 + x0^2, x0^3*x1 + x0^3 + x0*x1 + x0, x0^3*x1 + x0*x1, x0^2*x1^2 + x0^2*x1 + x1^2 + x1, x0^3*x1 + x0^2*x1, x0^2*x1^2 + x0^2*x1 + x0*x1^2 + x0*x1, x0^2*x1^2 + x0*x1^2, x0*x1^3 + x0*x1^2 + x1^3 + x1^2, x0^3*x1 + x0^3 + x0^2*x1 + x0^2, x0^2*x1^2 + x0^2 + x0*x1^2 + x0, x0^2*x1^2 + x0^2*x1 + x0*x1^2 + x0*x1, x0*x1^3 + x0*x1 + x1^3 + x1, x0^2*x1^2 + x0^2*x1, x0*x1^3 + x0*x1, x0*x1^3 + x0*x1^2, x1^4 + x1^2) of Multivariate Polynomial Ring in x0, x1 over Finite Field of size 2
TESTS:
>>> C = ['11','10','00','01']
>>> nr = NeuralCode(C)
>>> nr.traditional_neural_ideal()
Ideal (x0^4 + x0^2, x0^3*x1 + x0^3 + x0*x1 + x0, x0^3*x1 + x0*x1, x0^2*x1^2 + x0^2*x1 + x1^2 + x1, x0^3*x1 + x0^2*x1, x0^2*x1^2 + x0^2*x1 + x0*x1^2 + x0*x1, x0^2*x1^2 + x0*x1^2, x0*x1^3 + x0*x1^2 + x1^3 + x1^2, x0^3*x1 + x0^3 + x0^2*x1 + x0^2, x0^2*x1^2 + x0^2 + x0*x1^2 + x0, x0^2*x1^2 + x0^2*x1 + x0*x1^2 + x0*x1, x0*x1^3 + x0*x1 + x1^3 + x1, x0^2*x1^2 + x0^2*x1, x0*x1^3 + x0*x1, x0*x1^3 + x0*x1^2, x1^4 + x1^2) of Multivariate Polynomial Ring in x0, x1 over Finite Field of size 2
"""
poly = []
for i in range(len(self.Codes)):
ideal = []
for j in range(len(self.Codes[i])):
neuron = self.Codes[i][j]
if neuron == '1':
ideal.append(self.x[j] + 1)
else:
ideal.append(self.x[j])
poly.append(self.F.ideal(ideal))
return self._decomposition_product(poly)
def _sigma_tau(self):
r"""Returns the sets sigma and tau, where sigma is the list of Receptive Field sets whose intersections are subsets of the union of the Receptive Field sets in Tau."""
list = self.factored_canonical()
if (list == "Empty"):
return "Empty"
all_sigma = []
all_tau = []
for i in range(len(list)):
current = str(list[i])
split = current.split('*')
sigma = []
tau = []
for j in range(len(split)):
factor = split[j].strip()
if('(' in factor):
tau.append(factor[2])
else:
if ('+' in factor):
tau.append(factor[1])
else:
sigma.append(factor[1])
all_sigma.append(sigma)
all_tau.append(tau)
return (all_sigma , all_tau)
def groebner_basis(self):
r"""
Returns the groebner basis of the neural ideal using the libsingular:std algorithm.
INPUT:
None
OUTPUT:
The groebner basis of the neural code's ideal
EXAMPLES:
sage: C = ['01010','11100','11110','01011']
sage: nr = NeuralCode(C)
sage: nr.groebner_basis()
Ideal (x0 + x2, x1 + 1, x2*x3 + x2 + x3 + 1, x2*x4, x3*x4 + x4) of Multivariate Polynomial Ring in x0, x1, x2, x3, x4 over Finite Field of size 2
TESTS:
>>> C = ['01010','11100','11110','01011']
>>> nr = NeuralCode(C)
>>> nr.groebner_basis()
Ideal (x0 + x2, x1 + 1, x2*x3 + x2 + x3 + 1, x2*x4, x3*x4 + x4) of Multivariate Polynomial Ring in x0, x1, x2, x3, x4 over Finite Field of size 2
"""
ni = self.neural_ideal()
gb = ni.groebner_basis(algorithm='libsingular:std')
reduced = []
for i in range(len(gb)):
if self.Boolean_Ring(gb[i]) != 0:
reduced.append(gb[i])
if (len(gb) == 1 and gb[0] == 0):
return "Empty"
return self.F.ideal(reduced)
def canonical_RF_structure(self):
r"""
Prints the Receptive Field structure using the canonical form of the neural ideal.
INPUT:
None
OUTPUT:
None
EXAMPLES:
sage: C = ['110','100','000','010']
sage: nr = NeuralCode(C)
sage: nr.canonical_RF_structure()
Intersection of U_['2'] is empty
TESTS:
>>> C = ['110','100','000','010']
>>> nr = NeuralCode(C)
>>> nr.canonical_RF_structure()
Intersection of U_['2'] is empty
"""
list = self._sigma_tau()
if (list == "Empty"):
print "Zero ideal : Empty"
return
sigma = list[0]
tau = list [1]
for i in range(len(sigma)):
if(len(tau[i]) == 0):
print("Intersection of U_" + str(sigma[i]) + " is empty")
elif(len(sigma[i]) == 0):
print("X = Union of U_" + str(tau[i]))
else:
print("Intersection of U_" + str(sigma[i]) + " is a subset of Union of U_" + str(tau[i]))
def _chunks(self, ideal, n):
r"""Returns a list of n-sized sublists of the given ideal's basis."""
for i in xrange(0, len(ideal.gens()), n):
yield ideal.gens()[i:i+n]
def assert_build(algorithm="usual", decomposition_algorithm="pm"):
""" Asserts that the canonical form calculations give the same results as those in "The Neural Ring" by Curto et al. Used in doctests. """
paper_example_A = [['000','100','010','001','110','101','011','111'],['000','100','010','110','101','111'],['000','100','010','001','110','101','111'],['000','100','010','110','101','011','111'],['000','100','010','110','111'],['000','100','110','101','111'],['000','100','010','101','111'],['000','100','010','001','110','111'],['000','100','001','110','011','111'],['000','100','010','101','011','111'],['000','100','110','101','011','111'],['000','100','110','111'],['000','100','010','111'],['000','100','010','001','111'],['000','110','101','011','111'],['000','100','011','111'],['000','110','101','111'],['000','100','111'],['000','110','111'],['000','111']]
paper_example_B = [['000','100','010','001','110','101'],['000','110','010','110','101'],['000','100','010','101','011'],['000','100','110','101'],['000','100','110','011'],['000','110','101']]
paper_example_C = [['000','100','010','001','110'],['000','100','010','101'],['000','100','011']]
paper_example_D = [['000','100','010','001']]
paper_example_E = [['000','100','010','001','110','101','011'],['000','100','010','110','101','011'],['000','100','110','101','011'],['000','110','011','101']]
paper_example_F = [['000','100','010','110'],['000','100','110'],['000','110']]
paper_example_G = [['000','100']]
paper_example_H = [['000']]
paper_example_I = [['000','100','010']]
all_paper = paper_example_A + paper_example_B + paper_example_C + paper_example_D + paper_example_E + paper_example_F + paper_example_G + paper_example_H + paper_example_I
F = PolynomialRing(GF(2), 3, "x")
x = F.gens()
expected_ideals = [[0], [x[2] * (x[0] + 1)], [x[2] * x[1] * (x[0] + 1)], [x[2] * (x[1] + 1) * (x[0] + 1)], [x[2] * (x[1] + 1), x[2] * (x[0] + 1)], [x[2] * (x[0] + 1), x[1] * (x[0] + 1)], [x[2] * (x[0] + 1), (x[2] + 1) * x[1] * x[0]], [x[2] * (x[1] + 1) * x[0], x[2] * x[1] * (x[0] + 1)], [x[2] * (x[1] + 1) * x[0], (x[2] + 1) * x[1] * (x[0] + 1)], [x[2] * (x[1] + 1) * (x[0] + 1), (x[2] + 1) * x[1] * x[0]], [x[2] * (x[1] + 1) * (x[0] + 1), (x[2] + 1) * x[1] * (x[0] + 1)], [x[2] * (x[1] + 1), x[2] * (x[0] + 1), x[1] * (x[0] + 1)], [x[2] * (x[1] + 1), x[2] * (x[0] + 1), (x[2] + 1) * x[1] * x[0]], [x[2] * (x[1] + 1) * x[0], (x[2] + 1) * x[1] * x[0], x[2] * x[1] * (x[0] + 1)], [(x[2] + 1) * (x[1] + 1) * x[0], x[2] * (x[1] + 1) * (x[0] + 1), (x[2] + 1) * x[1] * (x[0] + 1)], [x[2] * (x[1] + 1), (x[2] + 1) * x[1]], [(x[2] + 1) * (x[1] + 1) * x[0], x[2] * (x[0] + 1), x[1] * (x[0] + 1)], [x[2] * (x[1] + 1), (x[2] + 1) * x[1], x[2] * (x[0] + 1), x[1] * (x[0] + 1)], [x[2] * (x[1] + 1), (x[1] + 1) * x[0], x[2] * (x[0] + 1), x[1] * (x[0] + 1)], [(x[2] + 1) * x[1], x[2] * (x[0] + 1), x[1] * (x[0] + 1), (x[1] + 1) * x[0], x[2] * (x[1] + 1), (x[2] + 1) * x[0]], [x[2] * x[1]], [x[2] * x[1], x[2] * (x[0] + 1), (x[2] + 1) * (x[1] + 1) * x[0]], [x[2] * (x[1] + 1) * (x[0] + 1), x[1] * x[0]], [x[2] * x[1], x[2] * (x[0] + 1), x[1] * (x[0] + 1)], [x[2] * (x[1] + 1), x[2] * x[0], (x[2] + 1) * x[1] * (x[0] + 1)], [(x[2] + 1) * (x[1] + 1) * x[0], x[2] * (x[0] + 1), x[2] * x[1], x[1] * (x[0] + 1)], [x[2] * x[0], x[2] * x[1]], [x[2] * x[1], x[2] * (x[0] + 1), x[1] * x[0]], [x[2] * (x[1] + 1), (x[2] + 1) * x[1], x[2] * x[0], x[1] * x[0]], [x[1] * x[0], x[2] * x[0], x[2] * x[1]], [x[2] * x[1] * x[0]], [x[2] * (x[1] + 1) * (x[0] + 1), x[2] * x[1] * x[0]], [x[2] * (x[1] + 1) * (x[0] + 1), (x[2] + 1) * x[1] * (x[0] + 1), x[2] * x[1] * x[0]], [(x[2] + 1) * (x[1] + 1) * x[0], x[2] * (x[1] + 1) * (x[0] + 1), (x[2] + 1) * x[1] * (x[0] + 1), x[2] * x[1] * x[0]], [x[2]], [x[2], x[1] * (x[0] + 1)], [x[2], (x[1] + 1) * x[0], x[1] * (x[0] + 1)], [x[1], x[2]], [x[0], x[1], x[2]], [x[1] * x[0], x[2]], [x[2] * (x[1] + 1) * (x[0] + 1), x[2] * x[1] * x[0]],
[x[2] * (x[1] + 1), x[2] * x[0], (x[2] + 1) * x[1] * (x[0] + 1)], [x[2] * (x[1] + 1) * (x[0] + 1), x[1] * x[0]], [x[2] * (x[1] + 1) * (x[0] + 1), (x[2] + 1) * x[1] * (x[0] + 1), x[2] * x[1] * x[0]], [(x[2] + 1) * (x[1] + 1) * x[0], x[2] * (x[1] + 1) * (x[0] + 1), (x[2] + 1) * x[1] * (x[0] + 1), x[2] * x[1] * x[0]]]
for i in range(len(all_paper)):
neural_ideal = NeuralCode(all_paper[i])
canonical_form = neural_ideal.canonical(algorithm, decomposition_algorithm)
if (canonical_form == "Empty" and expected_ideals[i] == [0]):
continue
if (canonical_form == "Empty" and expected_ideals[i] != [0]):
print "Build failed on Neural Code " + str(all_paper[i]) + " Expected: " + str(expected_ideals[i]) + " but was " + str(gens) + ". Decomposition = " + decomp_algs[k] + "\n"
break
gens = canonical_form.gens()
if (len(gens) != len(expected_ideals[i])):
print "Build failed on Neural Code " + str(all_paper[i]) + " Expected: " + str(expected_ideals[i]) + " but was " + str(gens) + ". Decomposition = " + decomp_algs[k] + "\n"
break
for j in range(len(gens)):
if (gens[j] not in expected_ideals[i]):
print "Build failed on Neural Code " + str(all_paper[i]) + " Expected: " + str(expected_ideals[i]) + " but was " + str(gens) + ". Decomposition = " + decomp_algs[k] + "\n"
break
def generate_random_code(dimension, num_code_words=None):
r"""
Generates a random list of binary strings (code words) in the specified dimension.
INPUT:
- ``dimension`` -- integer the dimension of the boolean ring (the length of the binary strings).
- ``num_code_words`` -- optional integer the number of code words.
OUTPUT:
The code words as a list of strings.
EXAMPLES:
sage: generate_random_code(5)
['10000', '10001', '01011', '11011', '10011', '10111', '11001', '11000', '01100', '11100', '11101', '11010', '01010', '00110', '00001', '00010', '00011', '00100']
"""
all_combinations = ["".join(x) for x in itertools.product("01", repeat=dimension)] # generates all possible combinations of 0 and 1 of length dimension
code_words = []
if num_code_words is None:
num_code_words = randint(1, 2 ** dimension)
while len(code_words) != num_code_words:
code_words.append(all_combinations[randint(0, 2 ** dimension - 1)]) # randomly select from all combinations and add to code words
code_words = Set(code_words).list()
return code_words
def all_neural_codes(dimension):
r"""
Returns a list of all of the neural codes in the specified dimension.
INPUT:
- ``dimension`` -- integer the dimension of the boolean ring.
OUTPUT:
All neural codes as a list of lists of strings.
EXAMPLES:
sage: all_neural_codes(2)
[[], ['00'], ['01'], ['10'], ['11'], ['00', '01'], ['00', '10'], ['00', '11'], ['01', '10'], ['01', '11'], ['10', '11'], ['00', '01', '10'], ['00', '01', '11'], ['00', '10', '11'], ['01','10', '11'], ['00', '01', '10', '11']]
TESTS:
>>> all_neural_codes(2)
[[], ['00'], ['01'], ['10'], ['11'], ['00', '01'], ['00', '10'], ['00', '11'], ['01', '10'], ['01', '11'], ['10', '11'], ['00', '01', '10'], ['00', '01', '11'], ['00', '10', '11'], ['01', '10', '11'], ['00', '01', '10', '11']]
"""
all = ["".join(x) for x in itertools.product("01", repeat=dimension)]
return Combinations(all).list()
def support(C):
r"""
Return the support of the code word.
INPUT:
- ``C`` -- string The code word.
OUTPUT:
The support as a list.
EXAMPLES:
sage: C = '0101'
sage: support(C)
[1, 3]
TESTS:
>>> C = '0101'
>>> support(C)
[1, 3]
"""
support = []
for i in range(len(C)):
if C[i] == '1':
support.append(i)
return support
def is_simplicial(C):
r"""
Returns a boolean determining if a list of code words is a simplicial code.
INPUT:
- ``C`` -- List of binary strings(codes)
OUTPUT:
True if the code is a simplicial code, false otherwise.
EXAMPLES:
sage: is_simplicial(['00','01','10','11'])
True
sage: is_simplicial(['000','101','010','111'])
False
TESTS:
>>> is_simplicial(['00','01','10','11'])
True
>>> is_simplicial(['000','101','010','111'])
False
"""
max = 0
index = 0
for i in range(len(C)):
if len(support(C[i])) > max:
index = i
count = 0
maxSupport = support(C[index])
for j in range(len(C)):
supp = support(C[j])
if len(supp) == 0:
count = count + 1
else:
subset = True
for k in range(len(supp)):
if supp[k] not in maxSupport:
subset = False
if subset:
count = count + 1
if count == 2 ** len(maxSupport):
return True
return False
def pm_primary_decomposition(IDEAL):
#step 1 initialization step
final = []
P = []
Q = []
D = [IDEAL]
new_D = D
#step 5 recursion step (which is a loop of steps 2-4)
while D <> []:
[D, Q] = D_Q(D, Q)
#step 6 final step
P = list(set(Q))
for m in range(len(P)):
final.extend(reduced_primes_list(P, m))
return final
#Step 6: If one ideal contains the generators of another ideal in the list, then it is redundant since we are taking the intersection of these ideals. This command gets a list of the non redundant ideals
def reduced_primes_list(P, m):
plist = []
contains = False
for n in range(len(P)):
if m == n:
continue
is_in = True
for o in range(len(P[n].gens())):
if P[n].gens()[o] in P[m].gens():
continue
else:
is_in = False
break
if is_in == True:
contains = True
break
if contains == False:
plist.append(P[m])
return list(Set(plist))
#steps 2-4
def D_Q(D, Q):
new_D = []
#creating D_I
for I in D:
D_I = []
#finds the first nonlinear generator of I (step 2.1)
index = 0
for i in range(len(I.gens())):
factors = list(I.gens()[i].factor())
if len(factors) <> 1:
break
index = index + 1
#creates list of factors of that first generator (z_i's) called gen_fac_list
gen_fac = list(I.gens()[index].factor())
gen_fac_list = []
for i in range(len(gen_fac)):
gen_fac_list.append(gen_fac[i][0])
#step 3
for i in range(len(gen_fac_list)):
z = gen_fac_list[i]
D_I.append(reduced_ideal(z, I))
new_D.extend(D_I)
#step 4
E = list(Set(new_D))
D = []
for k in range(len(E)):
if is_linear(E[k]):
if E[k].is_prime():
Q.append(E[k])
else:
D.append(E[k])
return [D, Q]
#checks if the generators of an ideal are linear by checking if they have only 1 factor
def is_linear(ideal):
boolean = True
for i in range(len(ideal.gens())):
factors = list(ideal.gens()[i].factor())
if len(factors) != 1:
boolean = False
break
return boolean
#executes the reduction in step 3 for a given ideal and given z
def reduced_ideal(z, I):
L = []
M = []
N = []
#creates a list of non reduced generators
for j in range(len(I.gens())):
L.append(I.gens()[j])
#implements the condition z = 0
for j in range(len(L)):
if z.divides(L[j]):
continue
else:
M.append(L[j])
#implements the condition z+1 = 1
for j in range(len(M)):
if (z+1).divides(M[j]):
N.append(M[j].quo_rem(z+1)[0])
else:
N.append(M[j])
#sorts list to avoid duplicates and returns the ideal generated by the list
N.append(z)
N_1 = sorted(N)
return Ideal(N_1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39,713 | 39.115152 | 2,149 | py |
SA-UNet | SA-UNet-master/Dropblock.py | import keras
import keras.backend as K
class DropBlock1D(keras.layers.Layer):
"""See: https://arxiv.org/pdf/1810.12890.pdf"""
def __init__(self,
block_size,
keep_prob,
sync_channels=False,
data_format=None,
**kwargs):
"""Initialize the layer.
:param block_size: Size for each mask block.
:param keep_prob: Probability of keeping the original feature.
:param sync_channels: Whether to use the same dropout for all channels.
:param data_format: 'channels_first' or 'channels_last' (default).
:param kwargs: Arguments for parent class.
"""
super(DropBlock1D, self).__init__(**kwargs)
self.block_size = block_size
self.keep_prob = keep_prob
self.sync_channels = sync_channels
self.data_format = K.normalize_data_format(data_format)
self.input_spec = keras.engine.base_layer.InputSpec(ndim=3)
self.supports_masking = True
def get_config(self):
config = {'block_size': self.block_size,
'keep_prob': self.keep_prob,
'sync_channels': self.sync_channels,
'data_format': self.data_format}
base_config = super(DropBlock1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_mask(self, inputs, mask=None):
return mask
def compute_output_shape(self, input_shape):
return input_shape
def _get_gamma(self, feature_dim):
"""Get the number of activation units to drop"""
feature_dim = K.cast(feature_dim, K.floatx())
block_size = K.constant(self.block_size, dtype=K.floatx())
return ((1.0 - self.keep_prob) / block_size) * (feature_dim / (feature_dim - block_size + 1.0))
def _compute_valid_seed_region(self, seq_length):
positions = K.arange(seq_length)
half_block_size = self.block_size // 2
valid_seed_region = K.switch(
K.all(
K.stack(
[
positions >= half_block_size,
positions < seq_length - half_block_size,
],
axis=-1,
),
axis=-1,
),
K.ones((seq_length,)),
K.zeros((seq_length,)),
)
return K.expand_dims(K.expand_dims(valid_seed_region, axis=0), axis=-1)
def _compute_drop_mask(self, shape):
seq_length = shape[1]
mask = K.random_binomial(shape, p=self._get_gamma(seq_length))
mask *= self._compute_valid_seed_region(seq_length)
mask = keras.layers.MaxPool1D(
pool_size=self.block_size,
padding='same',
strides=1,
data_format='channels_last',
)(mask)
return 1.0 - mask
def call(self, inputs, training=None):
def dropped_inputs():
outputs = inputs
if self.data_format == 'channels_first':
outputs = K.permute_dimensions(outputs, [0, 2, 1])
shape = K.shape(outputs)
if self.sync_channels:
mask = self._compute_drop_mask([shape[0], shape[1], 1])
else:
mask = self._compute_drop_mask(shape)
outputs = outputs * mask *\
(K.cast(K.prod(shape), dtype=K.floatx()) / K.sum(mask))
if self.data_format == 'channels_first':
outputs = K.permute_dimensions(outputs, [0, 2, 1])
return outputs
return K.in_train_phase(dropped_inputs, inputs, training=training)
class DropBlock2D(keras.layers.Layer):
"""See: https://arxiv.org/pdf/1810.12890.pdf"""
def __init__(self,
block_size,
keep_prob,
sync_channels=False,
data_format=None,
**kwargs):
"""Initialize the layer.
:param block_size: Size for each mask block.
:param keep_prob: Probability of keeping the original feature.
:param sync_channels: Whether to use the same dropout for all channels.
:param data_format: 'channels_first' or 'channels_last' (default).
:param kwargs: Arguments for parent class.
"""
super(DropBlock2D, self).__init__(**kwargs)
self.block_size = block_size
self.keep_prob = keep_prob
self.sync_channels = sync_channels
self.data_format = K.normalize_data_format(data_format)
self.input_spec = keras.engine.base_layer.InputSpec(ndim=4)
self.supports_masking = True
def get_config(self):
config = {'block_size': self.block_size,
'keep_prob': self.keep_prob,
'sync_channels': self.sync_channels,
'data_format': self.data_format}
base_config = super(DropBlock2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_mask(self, inputs, mask=None):
return mask
def compute_output_shape(self, input_shape):
return input_shape
def _get_gamma(self, height, width):
"""Get the number of activation units to drop"""
height, width = K.cast(height, K.floatx()), K.cast(width, K.floatx())
block_size = K.constant(self.block_size, dtype=K.floatx())
return ((1.0 - self.keep_prob) / (block_size ** 2)) *\
(height * width / ((height - block_size + 1.0) * (width - block_size + 1.0)))
def _compute_valid_seed_region(self, height, width):
positions = K.concatenate([
K.expand_dims(K.tile(K.expand_dims(K.arange(height), axis=1), [1, width]), axis=-1),
K.expand_dims(K.tile(K.expand_dims(K.arange(width), axis=0), [height, 1]), axis=-1),
], axis=-1)
half_block_size = self.block_size // 2
valid_seed_region = K.switch(
K.all(
K.stack(
[
positions[:, :, 0] >= half_block_size,
positions[:, :, 1] >= half_block_size,
positions[:, :, 0] < height - half_block_size,
positions[:, :, 1] < width - half_block_size,
],
axis=-1,
),
axis=-1,
),
K.ones((height, width)),
K.zeros((height, width)),
)
return K.expand_dims(K.expand_dims(valid_seed_region, axis=0), axis=-1)
def _compute_drop_mask(self, shape):
height, width = shape[1], shape[2]
mask = K.random_binomial(shape, p=self._get_gamma(height, width))
mask *= self._compute_valid_seed_region(height, width)
mask = keras.layers.MaxPool2D(
pool_size=(self.block_size, self.block_size),
padding='same',
strides=1,
data_format='channels_last',
)(mask)
return 1.0 - mask
def call(self, inputs, training=None):
def dropped_inputs():
outputs = inputs
if self.data_format == 'channels_first':
outputs = K.permute_dimensions(outputs, [0, 2, 3, 1])
shape = K.shape(outputs)
if self.sync_channels:
mask = self._compute_drop_mask([shape[0], shape[1], shape[2], 1])
else:
mask = self._compute_drop_mask(shape)
outputs = outputs * mask *\
(K.cast(K.prod(shape), dtype=K.floatx()) / K.sum(mask))
if self.data_format == 'channels_first':
outputs = K.permute_dimensions(outputs, [0, 3, 1, 2])
return outputs
return K.in_train_phase(dropped_inputs, inputs, training=training) | 7,815 | 38.474747 | 103 | py |
SA-UNet | SA-UNet-master/keras_dataAug.py |
from PIL import Image, ImageEnhance, ImageOps, ImageFile
import numpy as np
import random
import threading, os, time
import logging
logger = logging.getLogger(__name__)
ImageFile.LOAD_TRUNCATED_IMAGES = True
class DataAugmentation:
def __init__(self):
pass
@staticmethod
def openImage(image):
return Image.open(image, mode="r")
@staticmethod
def randomRotation(image, label, mode=Image.BICUBIC):
random_angle = np.random.randint(1, 360)
return image.rotate(random_angle, mode), label.rotate(random_angle, Image.NEAREST)
@staticmethod
def randomCrop(image, label):
image_width = image.size[0]
image_height = image.size[1]
crop_win_size = np.random.randint(40, 68)
random_region = (
(image_width - crop_win_size) >> 1, (image_height - crop_win_size) >> 1, (image_width + crop_win_size) >> 1,
(image_height + crop_win_size) >> 1)
return image.crop(random_region), label
@staticmethod
def randomColor(image, label):
random_factor = np.random.randint(0, 31) / 10.
color_image = ImageEnhance.Color(image).enhance(random_factor)
random_factor = np.random.randint(10, 21) / 10.
brightness_image = ImageEnhance.Brightness(color_image).enhance(random_factor)
random_factor = np.random.randint(10, 21) / 10.
contrast_image = ImageEnhance.Contrast(brightness_image).enhance(random_factor)
random_factor = np.random.randint(0, 31) / 10.
return ImageEnhance.Sharpness(contrast_image).enhance(random_factor), label
@staticmethod
def randomGaussian(image, label, mean=0.2, sigma=0.3):
def gaussianNoisy(im, mean=0.2, sigma=0.3):
for _i in range(len(im)):
im[_i] += random.gauss(mean, sigma)
return im
img = np.asarray(image)
img.flags.writeable = 1
width, height = img.shape[:2]
img_r = gaussianNoisy(img[:, :, 0].flatten(), mean, sigma)
img_g = gaussianNoisy(img[:, :, 1].flatten(), mean, sigma)
img_b = gaussianNoisy(img[:, :, 2].flatten(), mean, sigma)
img[:, :, 0] = img_r.reshape([width, height])
img[:, :, 1] = img_g.reshape([width, height])
img[:, :, 2] = img_b.reshape([width, height])
return Image.fromarray(np.uint8(img)), label
@staticmethod
def saveImage(image, path):
image.save(path)
def makeDir(path):
try:
if not os.path.exists(path):
if not os.path.isfile(path):
# os.mkdir(path)
os.makedirs(path)
return 0
else:
return 1
except Exception:
print(str(Exception))
def imageOps(func_name, image, label, img_des_path, label_des_path, img_file_name, label_file_name, times=3):
funcMap = {"randomRotation": DataAugmentation.randomRotation,
"randomCrop": DataAugmentation.randomCrop,
"randomColor": DataAugmentation.randomColor,
"randomGaussian": DataAugmentation.randomGaussian
}
if funcMap.get(func_name) is None:
logger.error("%s is not exist", func_name)
return -1
for _i in range(0, times, 1):
new_image, new_label = funcMap[func_name](image, label)
DataAugmentation.saveImage(new_image, os.path.join(img_des_path, func_name + str(_i) + img_file_name))
DataAugmentation.saveImage(new_label, os.path.join(label_des_path, func_name + str(_i) + label_file_name))
opsList = {"randomRotation", "randomColor", "randomGaussian"}
def threadOPS(img_path, new_img_path, label_path, new_label_path):
# img path
print(img_path)
if os.path.isdir(img_path):
img_names = os.listdir(img_path)
print(img_names)
else:
img_names = [img_path]
# label path
if os.path.isdir(label_path):
label_names = os.listdir(label_path)
else:
label_names = [label_path]
img_num = 0
label_num = 0
# img num
for img_name in img_names:
tmp_img_name = os.path.join(img_path, img_name)
if os.path.isdir(tmp_img_name):
print('contain file folder')
exit()
else:
img_num = img_num + 1;
# label num
for label_name in label_names:
tmp_label_name = os.path.join(label_path, label_name)
if os.path.isdir(tmp_label_name):
print('contain file folder')
exit()
else:
label_num = label_num + 1
if img_num != label_num:
print('the num of img and label is not equl')
exit()
else:
num = img_num
for i in range(num):
img_name = img_names[i]
label_name = label_names[i]
tmp_img_name = os.path.join(img_path, img_name)
tmp_label_name = os.path.join(label_path, label_name)
print(tmp_img_name)
image = DataAugmentation.openImage(tmp_img_name)
label = DataAugmentation.openImage(tmp_label_name)
threadImage = [0] * 5
_index = 0
for ops_name in opsList:
threadImage[_index] = threading.Thread(target=imageOps,
args=(ops_name, image, label, new_img_path, new_label_path, img_name,
label_name))
threadImage[_index].start()
_index += 1
time.sleep(5)
# Please modify the path
if __name__ == '__main__':
threadOPS("DRIVE/train/image", #set your path of training images
"Drive/aug/image",
"DRIVE/train/label",# set your path of training labels
"DRIVE/aug/label")
| 5,744 | 30.565934 | 120 | py |
SA-UNet | SA-UNet-master/Train_chase.py | import os
import numpy as np
import cv2
from keras.callbacks import TensorBoard, ModelCheckpoint
np.random.seed(42)
import scipy.misc as mc
import matplotlib.pyplot as plt
data_location = ''
training_images_loc = data_location + 'CHASE/train/imageS/'
training_label_loc = data_location + 'CHASE/train/labelS/'
validate_images_loc = data_location + 'CHASE/validate/images/'
validate_label_loc = data_location + 'CHASE/validate/labels/'
train_files = os.listdir(training_images_loc)
train_data = []
train_label = []
validate_files = os.listdir(validate_images_loc)
validate_data = []
validate_label = []
desired_size=1008
for i in train_files:
im = mc.imread(training_images_loc + i)
label = mc.imread(training_label_loc + i.split('_')[0]+"_"+i.split('_')[1].split(".")[0] +"_1stHO.png" ,mode="L")
old_size = im.shape[:2] # old_size is in (height, width) format
delta_w = desired_size - old_size[1]
delta_h = desired_size - old_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
color = [0, 0, 0]
color2 = [0]
new_im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color)
new_label = cv2.copyMakeBorder(label, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color2)
train_data.append(cv2.resize(new_im, (desired_size, desired_size)))
temp = cv2.resize(new_label,
(desired_size, desired_size))
_, temp = cv2.threshold(temp, 127, 255, cv2.THRESH_BINARY)
train_label.append(temp)
for i in validate_files:
im = mc.imread(validate_images_loc + i)
label = mc.imread(validate_label_loc +i.split('_')[0]+'_'+ i.split('_')[1].split(".")[0] +"_1stHO.png" ,mode="L")
old_size = im.shape[:2] # old_size is in (height, width) format
delta_w = desired_size - old_size[1]
delta_h = desired_size - old_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
color = [0, 0, 0]
color2 = [0]
new_im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color)
new_label = cv2.copyMakeBorder(label, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color2)
validate_data.append(cv2.resize(new_im, (desired_size, desired_size)))
temp = cv2.resize(new_label,
(desired_size, desired_size))
_, temp = cv2.threshold(temp, 127, 255, cv2.THRESH_BINARY)
validate_label.append(temp)
train_data = np.array(train_data)
train_label = np.array(train_label)
validate_data = np.array(validate_data)
validate_label = np.array(validate_label)
x_train = train_data.astype('float32') / 255.
y_train = train_label.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), desired_size, desired_size, 3)) # adapt this if using `channels_first` image data format
y_train = np.reshape(y_train, (len(y_train), desired_size, desired_size, 1)) # adapt this if using `channels_first` im
x_validate = validate_data.astype('float32') / 255.
y_validate = validate_label.astype('float32') / 255.
x_validate = np.reshape(x_validate, (len(x_validate), desired_size, desired_size, 3)) # adapt this if using `channels_first` image data format
y_validate = np.reshape(y_validate, (len(y_validate), desired_size, desired_size, 1)) # adapt this if using `channels_first` im
TensorBoard(log_dir='./autoencoder', histogram_freq=0,
write_graph=True, write_images=True)
from SA_UNet import *
model=SA_UNet(input_size=(desired_size,desired_size,3),start_neurons=16,lr=1e-3,keep_prob=0.87,block_size=7)
weight="Model/CHASE/SA_UNet.h5"
restore=False
if restore and os.path.isfile(weight):
model.load_weights(weight)
model_checkpoint = ModelCheckpoint(weight, monitor='val_accuracy', verbose=1, save_best_only=False)
# plot_model(model, to_file='unet_resnet.png', show_shapes=False, show_layer_names=)
history=model.fit(x_train, y_train,
epochs=100, #first 100 with lr=1e-3,,and last 50 with lr=1e-4
batch_size=2,
# validation_split=0.1,
validation_data=(x_validate, y_validate),
shuffle=True,
callbacks= [TensorBoard(log_dir='./autoencoder'), model_checkpoint])
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_accuracy'])
plt.title('SA-UNet Accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validate'], loc='lower right')
plt.show()
| 4,715 | 35.84375 | 143 | py |
SA-UNet | SA-UNet-master/Eval_drive.py | 1 | 0 | 0 | py | |
SA-UNet | SA-UNet-master/util.py | def crop_to_shape(data, shape):
"""
Crops the array to the given image shape by removing the border (expects a tensor of shape [batches, nx, ny, channels].
:param data: the array to crop
:param shape: the target shape
"""
#
offset0 = (data.shape[1] - shape[1])//2
offset1 = (data.shape[2] - shape[2])//2
if offset0==0:
if data.shape[1] % 2 == 1 or shape[1] % 2 == 1:
return data[:, offset0:data.shape[1], offset1:(-offset1)]
elif data.shape[2] % 2 == 1 or shape[2] % 2 == 1:
return data[:, offset0:data.shape[1], offset1:(-offset1 - 1)]
else:
return data[:, offset0:data.shape[1], offset1:(-offset1)]
elif offset1==0:
if data.shape[1] % 2 == 1 or shape[1] % 2 == 1:
return data[:, offset0:(-offset0 - 1), offset1:data.shape[2]]
elif data.shape[2] % 2 == 1 or shape[2] % 2 == 1:
return data[:, offset0:-offset0, offset1:data.shape[2]]
else:
return data[:, offset0:-offset0, offset1:data.shape[2]]
else:
if data.shape[1] % 2 == 1 or shape[1] % 2 == 1:
return data[:, offset0:(-offset0 - 1), offset1:(-offset1)]
elif data.shape[2] % 2 == 1 or shape[2] % 2 == 1:
return data[:, offset0:-offset0, offset1:(-offset1-1)]
else:
return data[:, offset0:-offset0, offset1:(-offset1)] | 1,398 | 42.71875 | 123 | py |
SA-UNet | SA-UNet-master/Train_drive.py | import os
import cv2
from keras.callbacks import TensorBoard, ModelCheckpoint
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc.pilutil import *
data_location = ''
training_images_loc = data_location + 'DRIVE/train/images/'
training_label_loc = data_location + 'DRIVE/train/labels/'
validate_images_loc = data_location + 'DRIVE/validate/images/'
validate_label_loc = data_location + 'DRIVE/validate/labels/'
train_files = os.listdir(training_images_loc)
train_data = []
train_label = []
validate_files = os.listdir(validate_images_loc)
validate_data = []
validate_label = []
desired_size = 592
for i in train_files:
im = imread(training_images_loc + i)
label = imread(training_label_loc + i.split('_')[0] + '_manual1.png',mode="L")
old_size = im.shape[:2] # old_size is in (height, width) format
delta_w = desired_size - old_size[1]
delta_h = desired_size - old_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
color = [0, 0, 0]
color2 = [0]
new_im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color)
new_label = cv2.copyMakeBorder(label, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color2)
train_data.append(cv2.resize(new_im, (desired_size, desired_size)))
temp = cv2.resize(new_label, (desired_size, desired_size))
_, temp = cv2.threshold(temp, 127, 255, cv2.THRESH_BINARY)
train_label.append(temp)
for i in validate_files:
im = imread(validate_images_loc + i)
label = imread(validate_label_loc + i.split('_')[0] + '_manual1.png',mode="L")
old_size = im.shape[:2] # old_size is in (height, width) format
delta_w = desired_size - old_size[1]
delta_h = desired_size - old_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
color = [0, 0, 0]
color2 = [0]
new_im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color)
new_label = cv2.copyMakeBorder(label, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color2)
validate_data.append(cv2.resize(new_im, (desired_size, desired_size)))
temp = cv2.resize(new_label, (desired_size, desired_size))
_, temp = cv2.threshold(temp, 127, 255, cv2.THRESH_BINARY)
validate_label.append(temp)
train_data = np.array(train_data)
train_label = np.array(train_label)
validate_data = np.array(validate_data)
validate_label = np.array(validate_label)
x_train = train_data.astype('float32') / 255.
y_train = train_label.astype('float32') / 255.
x_train = np.reshape(x_train, (
len(x_train), desired_size, desired_size, 3)) # adapt this if using `channels_first` image data format
y_train = np.reshape(y_train, (len(y_train), desired_size, desired_size, 1)) # adapt this if using `channels_first` im
x_validate = validate_data.astype('float32') / 255.
y_validate = validate_label.astype('float32') / 255.
x_validate = np.reshape(x_validate, (
len(x_validate), desired_size, desired_size, 3)) # adapt this if using `channels_first` image data format
y_validate = np.reshape(y_validate,
(len(y_validate), desired_size, desired_size, 1)) # adapt this if using `channels_first` im
TensorBoard(log_dir='./autoencoder', histogram_freq=0,
write_graph=True, write_images=True)
from SA_UNet import *
model=SA_UNet(input_size=(desired_size,desired_size,3),start_neurons=16,lr=1e-3,keep_prob=0.82,block_size=7)
model.summary()
weight="Model/DRIVE/SA_UNet.h5"
restore=True
if restore and os.path.isfile(weight):
model.load_weights(weight)
model_checkpoint = ModelCheckpoint(weight, monitor='val_accuracy', verbose=1, save_best_only=False)
history=model.fit(x_train, y_train,
epochs=100, #first 100 with lr=1e-3,,and last 50 with lr=1e-4
batch_size=4,
# validation_split=0.05,
validation_data=(x_validate, y_validate),
shuffle=True,
callbacks= [TensorBoard(log_dir='./autoencoder'), model_checkpoint])
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_accuracy'])
plt.title('SA-UNet Accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validate'], loc='lower right')
plt.show()
| 4,541 | 35.336 | 119 | py |
SA-UNet | SA-UNet-master/flip.py | import cv2
import os
# Please modify the path
path="DRIVE/train/images"
save="Drive/flip/images/"
for name in os.listdir(path):
image = cv2.imread(path+name)
# Flipped Horizontally
h_flip = cv2.flip(image, 1)
cv2.imwrite(save+"h"+name, h_flip)
# Flipped Vertically
v_flip = cv2.flip(image, 0)
cv2.imwrite(save+"v"+name, v_flip)
# Flipped Horizontally & Vertically
hv_flip = cv2.flip(image, -1)
cv2.imwrite(save+"hv"+name, hv_flip)
| 476 | 20.681818 | 40 | py |
SA-UNet | SA-UNet-master/SA_UNet.py |
from keras.optimizers import *
from keras.models import Model
from keras.layers import Input,Conv2DTranspose, MaxPooling2D,BatchNormalization,concatenate,Activation
from Spatial_Attention import *
def Backbone(input_size=(512, 512, 3), block_size=7,keep_prob=0.9,start_neurons=16,lr=1e-3):
inputs = Input(input_size)
conv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(inputs)
conv1 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv1)
conv1= BatchNormalization()(conv1)
conv1 = Activation('relu')(conv1)
conv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(conv1)
conv1 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv1)
conv1 = BatchNormalization()(conv1)
conv1 = Activation('relu')(conv1)
pool1 = MaxPooling2D((2, 2))(conv1)
conv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(pool1)
conv2 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv2)
conv2 = BatchNormalization()(conv2)
conv2 = Activation('relu')(conv2)
conv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(conv2)
conv2 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv2)
conv2 = BatchNormalization()(conv2)
conv2 = Activation('relu')(conv2)
pool2 = MaxPooling2D((2, 2))(conv2)
conv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(pool2)
conv3 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv3)
conv3 = BatchNormalization()(conv3)
conv3 = Activation('relu')(conv3)
conv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(conv3)
conv3 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv3)
conv3 = BatchNormalization()(conv3)
conv3 = Activation('relu')(conv3)
pool3 = MaxPooling2D((2, 2))(conv3)
convm = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(pool3)
convm = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(convm)
convm = BatchNormalization()(convm)
convm = Activation('relu')(convm)
convm = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(convm)
convm = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(convm)
convm = BatchNormalization()(convm)
convm = Activation('relu')(convm)
deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(convm)
uconv3 = concatenate([deconv3, conv3])
uconv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(uconv3)
uconv3 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv3)
uconv3 = BatchNormalization()(uconv3)
uconv3 = Activation('relu')(uconv3)
uconv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(uconv3)
uconv3 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv3)
uconv3 = BatchNormalization()(uconv3)
uconv3 = Activation('relu')(uconv3)
deconv2 = Conv2DTranspose(start_neurons * 2, (3, 3), strides=(2, 2), padding="same")(uconv3)
uconv2 = concatenate([deconv2, conv2])
uconv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(uconv2)
uconv2 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv2)
uconv2 = BatchNormalization()(uconv2)
uconv2 = Activation('relu')(uconv2)
uconv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(uconv2)
uconv2 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv2)
uconv2 = BatchNormalization()(uconv2)
uconv2 = Activation('relu')(uconv2)
deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv2)
uconv1 = concatenate([deconv1, conv1])
uconv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(uconv1)
uconv1 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv1)
uconv1 = BatchNormalization()(uconv1)
uconv1 = Activation('relu')(uconv1)
uconv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(uconv1)
uconv1 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv1)
uconv1 = BatchNormalization()(uconv1)
uconv1 = Activation('relu')(uconv1)
output_layer_noActi = Conv2D(1, (1, 1), padding="same", activation=None)(uconv1)
output_layer = Activation('sigmoid')(output_layer_noActi)
model = Model(input=inputs, output=output_layer)
model.compile(optimizer=Adam(lr=lr), loss='binary_crossentropy', metrics=['accuracy'])
return model
def SA_UNet(input_size=(512, 512, 3), block_size=7,keep_prob=0.9,start_neurons=16,lr=1e-3):
inputs = Input(input_size)
conv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(inputs)
conv1 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv1)
conv1= BatchNormalization()(conv1)
conv1 = Activation('relu')(conv1)
conv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(conv1)
conv1 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv1)
conv1 = BatchNormalization()(conv1)
conv1 = Activation('relu')(conv1)
pool1 = MaxPooling2D((2, 2))(conv1)
conv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(pool1)
conv2 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv2)
conv2 = BatchNormalization()(conv2)
conv2 = Activation('relu')(conv2)
conv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(conv2)
conv2 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv2)
conv2 = BatchNormalization()(conv2)
conv2 = Activation('relu')(conv2)
pool2 = MaxPooling2D((2, 2))(conv2)
conv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(pool2)
conv3 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv3)
conv3 = BatchNormalization()(conv3)
conv3 = Activation('relu')(conv3)
conv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(conv3)
conv3 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(conv3)
conv3 = BatchNormalization()(conv3)
conv3 = Activation('relu')(conv3)
pool3 = MaxPooling2D((2, 2))(conv3)
convm = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(pool3)
convm = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(convm)
convm = BatchNormalization()(convm)
convm = Activation('relu')(convm)
convm = spatial_attention(convm)
convm = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(convm)
convm = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(convm)
convm = BatchNormalization()(convm)
convm = Activation('relu')(convm)
deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(convm)
uconv3 = concatenate([deconv3, conv3])
uconv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(uconv3)
uconv3 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv3)
uconv3 = BatchNormalization()(uconv3)
uconv3 = Activation('relu')(uconv3)
uconv3 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(uconv3)
uconv3 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv3)
uconv3 = BatchNormalization()(uconv3)
uconv3 = Activation('relu')(uconv3)
deconv2 = Conv2DTranspose(start_neurons * 2, (3, 3), strides=(2, 2), padding="same")(uconv3)
uconv2 = concatenate([deconv2, conv2])
uconv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(uconv2)
uconv2 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv2)
uconv2 = BatchNormalization()(uconv2)
uconv2 = Activation('relu')(uconv2)
uconv2 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(uconv2)
uconv2 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv2)
uconv2 = BatchNormalization()(uconv2)
uconv2 = Activation('relu')(uconv2)
deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv2)
uconv1 = concatenate([deconv1, conv1])
uconv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(uconv1)
uconv1 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv1)
uconv1 = BatchNormalization()(uconv1)
uconv1 = Activation('relu')(uconv1)
uconv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(uconv1)
uconv1 = DropBlock2D(block_size=block_size, keep_prob=keep_prob)(uconv1)
uconv1 = BatchNormalization()(uconv1)
uconv1 = Activation('relu')(uconv1)
output_layer_noActi = Conv2D(1, (1, 1), padding="same", activation=None)(uconv1)
output_layer = Activation('sigmoid')(output_layer_noActi)
model = Model(input=inputs, output=output_layer)
model.compile(optimizer=Adam(lr=lr), loss='binary_crossentropy', metrics=['accuracy'])
return model
| 9,007 | 45.43299 | 102 | py |
SA-UNet | SA-UNet-master/Spatial_Attention.py | from keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, Reshape, Dense, multiply, Permute, Concatenate, \
Conv2D, Add, Activation, Lambda,Conv1D
from Dropblock import *
def spatial_attention(input_feature):
kernel_size = 7
if K.image_data_format() == "channels_first":
channel = input_feature._keras_shape[1]
cbam_feature = Permute((2, 3, 1))(input_feature)
else:
channel = input_feature._keras_shape[-1]
cbam_feature = input_feature
avg_pool = Lambda(lambda x: K.mean(x, axis=3, keepdims=True))(cbam_feature)
assert avg_pool._keras_shape[-1] == 1
max_pool = Lambda(lambda x: K.max(x, axis=3, keepdims=True))(cbam_feature)
assert max_pool._keras_shape[-1] == 1
concat = Concatenate(axis=3)([avg_pool, max_pool])
assert concat._keras_shape[-1] == 2
cbam_feature = Conv2D(filters=1,
kernel_size=kernel_size,
strides=1,
padding='same',
activation='sigmoid',
kernel_initializer='he_normal',
use_bias=False)(concat)
assert cbam_feature._keras_shape[-1] == 1
if K.image_data_format() == "channels_first":
cbam_feature = Permute((3, 1, 2))(cbam_feature)
return multiply([input_feature, cbam_feature])
| 1,364 | 40.363636 | 118 | py |
SA-UNet | SA-UNet-master/Eval_chase.py | 1 | 0 | 0 | py | |
multeval | multeval-master/reg-test/write-sgm.py | #!/usr/bin/env python
# Stolen from METEOR's mt-diff.py tool
# (under the LGPL license)
import math, os, re, shutil, sys, tempfile
def main(argv):
# Usage
if len(argv[1:]) < 3:
print 'usage: {0} <lang> <hyps> <out_dir> <ref1> [ref2 ...]'. \
format(argv[0])
print 'langs: {0}'.format(langs)
sys.exit(1)
# Language
lang = argv[1]
# Files
hyp_file = argv[2]
work_dir = argv[3]
ref_files = argv[4:]
# Work directory
if not os.path.exists(work_dir):
os.makedirs(work_dir)
# SGML Files
hyp_sgm = os.path.join(work_dir, 'hyps')
src_sgm = os.path.join(work_dir, 'src')
ref_sgm = os.path.join(work_dir, 'ref')
# Hyp1
write_sgm(hyp_file, hyp_sgm, \
'<tstset trglang="any" setid="any" srclang="any">', '</tstset>')
# Src (ref1)
ref_len = write_sgm(ref_files[0], src_sgm, \
'<srcset trglang="any" setid="any" srclang="any">', '</srcset>')
# Ref (all refs)
write_ref_sgm(ref_files, ref_sgm, \
'<refset trglang="any" setid="any" srclang="any">', '</refset>')
def write_sgm(in_file, out_sgm, header, footer):
file_in = open(in_file)
file_out = open(out_sgm, 'w')
print >> file_out, header
print >> file_out, '<doc sysid="any" docid="any">'
i = 0
for line in file_in:
i += 1
print >> file_out, '<seg id="{0}"> {1} </seg>'.format(i, line.strip())
print >> file_out, '</doc>'
print >> file_out, footer
file_in.close()
file_out.close()
return i
def write_ref_sgm(in_files, out_sgm, header, footer):
file_out = open(out_sgm, 'w')
print >> file_out, header
sys_id = 0
for in_file in in_files:
sys_id += 1
file_in = open(in_file)
print >> file_out, '<doc sysid="{0}" docid="any">'.format(sys_id)
i = 0
for line in file_in:
i += 1
print >> file_out, '<seg id="{0}"> {1} </seg>'. \
format(i, line.strip())
print >> file_out, '</doc>'
file_in.close()
print >> file_out, footer
file_out.close()
if __name__ == '__main__' : main(sys.argv)
| 2,144 | 25.8125 | 78 | py |
pegnn | pegnn-master/train_autoencoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch_geometric.loader import DataLoader
import json
from src.datasets import CSVDataset
from src.utils.scaler import LatticeScaler
from src.utils.visualize import get_fig
from src.utils.debug import check_grad
from src.utils.io import AggregateBatch
if __name__ == "__main__":
from torch.utils.tensorboard import SummaryWriter
import argparse
import os
import random
import datetime
parser = argparse.ArgumentParser(description="train denoising model")
parser.add_argument("--hparams", "-H", default=None, help="json file")
parser.add_argument("--tensorboard", "-t", default="./runs_autoencoder")
parser.add_argument("--dataset", "-D", default="./data/mp_20")
parser.add_argument("--device", "-d", default="cuda")
parser.add_argument("--verbose", "-v", default=False, action="store_true")
parser.add_argument("--log-interval", "-l", default=128, type=int)
parser.add_argument("--debug", "-g", default=False, action="store_true")
args = parser.parse_args()
from src.models.operator.autoencoder import AutoEncoder, AutoEncoderMLP
from src.models.operator.loss import get_loss, LossLatticeParameters
from src.models.operator.utils import (
LogSpike,
AggregateMetrics,
training_iterator,
validation_iterator,
testing_iterator,
Checkpoints,
Hparams,
)
# run name
dataset_name = os.path.split(args.dataset)[1]
tday = datetime.datetime.now()
run_name = tday.strftime(
f"training_%Y_%m_%d_%H_%M_%S_{dataset_name}_{random.randint(0,1000):<03d}"
)
print("run name:", run_name)
# basic setup
device = args.device
log_interval = args.log_interval
output_directory = args.tensorboard
# setup hyperparameters
hparams = Hparams()
if args.hparams is not None:
hparams.from_json(args.hparams)
print("hyper-parameters:")
print(json.dumps(hparams.dict(), indent=4))
# setup logs
log_dir = os.path.join(output_directory, run_name)
os.makedirs(output_directory, exist_ok=True)
writer = SummaryWriter(log_dir=log_dir)
hparams.to_json(os.path.join(log_dir, "hparams.json"))
log_spike = LogSpike(log_dir, threshold=0.5, verbose=args.verbose, debug=args.debug)
log_metrics_train = AggregateMetrics(writer, "train")
log_metrics_valid = AggregateMetrics(writer, "valid")
log_metrics_test = AggregateMetrics(writer, "test")
# load data and data scaler
dataset_train = CSVDataset(
os.path.join(args.dataset, "train.csv"), verbose=args.verbose, multithread=True
)
dataset_val = CSVDataset(
os.path.join(args.dataset, "val.csv"), verbose=args.verbose, multithread=True
)
dataset_test = CSVDataset(
os.path.join(args.dataset, "test.csv"), verbose=args.verbose, multithread=True
)
dataloader_train = DataLoader(dataset_train, batch_size=hparams.batch_size)
dataloader_val = DataLoader(dataset_val, batch_size=hparams.batch_size)
dataloader_test = DataLoader(dataset_test, batch_size=hparams.batch_size)
lattice_scaler = LatticeScaler()
lattice_scaler.fit(dataloader_train, args.verbose)
lattice_scaler = lattice_scaler.to(args.device)
# setup model, loss and optimizer
model = AutoEncoder(
features=hparams.features,
knn=hparams.knn,
ops_config=hparams.ops_config,
layers=hparams.mpnn_layers,
scale_limit_weights=hparams.scale_limit_weights,
scale_hidden_dim=hparams.scale_hidden_dim,
scale_limit_actions=hparams.scale_limit_actions,
scale_reduce_rho=hparams.scale_reduce_rho,
).to(device)
if hparams.loss == "parameters_l1":
loss_fn = LossLatticeParameters(lattice_scaler=lattice_scaler, distance="l1")
elif hparams.loss == "parameters_mse":
loss_fn = LossLatticeParameters(lattice_scaler=lattice_scaler, distance="mse")
else:
raise Exception(f"unknown loss {hparams.loss}")
opti = optim.Adam(model.parameters(), lr=hparams.lr, betas=(hparams.beta1, 0.999))
# setup checkpoint and training loop
checkpoints = Checkpoints(log_dir, model, opti)
data_it, tqdm_bar = training_iterator(
dataloader_train, hparams.total_step, verbose=args.verbose
)
for opt_step, batch in data_it:
model.train()
batch = batch.to(args.device)
# training step
opti.zero_grad()
loss, metrics = get_loss(batch, model, loss_fn)
loss.backward()
check_grad(model, verbose=args.verbose, debug=args.debug)
if hparams.grad_clipping is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), hparams.grad_clipping)
opti.step()
# logs
log_spike.log(loss, opt_step, batch, model, opti)
log_metrics_train.append(loss, metrics)
if args.verbose:
tqdm_bar.set_description(log_metrics_train.preview())
# validation
if (opt_step % log_interval) == 0:
log_metrics_train.log(opt_step)
model.eval()
with torch.no_grad():
fig = None
for batch in validation_iterator(dataloader_val, verbose=args.verbose):
batch = batch.to(device)
if fig is None:
fig = get_fig(batch, model, 8, lattice_scaler=lattice_scaler)
loss, metrics = get_loss(batch, model, loss_fn)
log_metrics_valid.append(loss, metrics)
metrics = log_metrics_valid.log(opt_step)
writer.add_figure("reconstruction", fig, opt_step)
checkpoints.step(opt_step, metrics)
# testing from the best checkpoint
model = checkpoints.load_best()
model = model.to(device)
model.eval()
aggregate = AggregateBatch()
with torch.no_grad():
for batch in testing_iterator(dataloader_test, verbose=args.verbose):
batch = batch.to(device)
loss, metrics, full_batch = get_loss(
batch, model, loss_fn, return_batch=True
)
aggregate.append(*full_batch)
log_metrics_test.append(loss, metrics)
metrics = log_metrics_test.log(opt_step, hparams=hparams.dict())
with open(os.path.join(log_dir, "metrics.json"), "w") as fp:
json.dump(metrics, fp, indent=4)
aggregate.write(os.path.join(log_dir, "output/test"), verbose=args.verbose)
print("\ntest metrics:")
print(json.dumps(metrics, indent=4))
| 6,650 | 31.602941 | 88 | py |
pegnn | pegnn-master/train_benchmark.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch_geometric.loader import DataLoader
import json
from src.datasets import CSVDataset
from src.utils.scaler import LatticeScaler
from src.utils.visualize import get_fig
from src.utils.debug import check_grad
from src.utils.io import AggregateBatch
if __name__ == "__main__":
from torch.utils.tensorboard import SummaryWriter
import argparse
import os
import random
import datetime
parser = argparse.ArgumentParser(description="train denoising model")
parser.add_argument("--hparams", "-H", default=None, help="json file")
parser.add_argument("--tensorboard", "-t", default="./runs_benchmark")
parser.add_argument("--dataset", "-D", default="./data/carbon_24")
parser.add_argument("--device", "-d", default="cuda")
parser.add_argument("--verbose", "-v", default=False, action="store_true")
parser.add_argument("--log-interval", "-l", default=128, type=int)
parser.add_argument("--debug", "-g", default=False, action="store_true")
args = parser.parse_args()
from src.models.operator.denoise import Denoise
from src.models.operator.loss import (
get_loss,
LossLatticeParameters,
LossActionMatrixDistance,
LossLatticeMetric
)
from src.models.operator.utils import (
LogSpike,
AggregateMetrics,
training_iterator,
validation_iterator,
testing_iterator,
Checkpoints,
Hparams,
)
# run name
dataset_name = os.path.split(args.dataset)[1]
tday = datetime.datetime.now()
run_name = tday.strftime(
f"training_%Y_%m_%d_%H_%M_%S_{dataset_name}_{random.randint(0,1000):<03d}"
)
print("run name:", run_name)
# basic setup
device = args.device
log_interval = args.log_interval
output_directory = args.tensorboard
# setup hyperparameters
hparams = Hparams()
if args.hparams is not None:
hparams.from_json(args.hparams)
print("hyper-parameters:")
print(json.dumps(hparams.dict(), indent=4))
# setup logs
log_dir = os.path.join(output_directory, run_name)
os.makedirs(output_directory, exist_ok=True)
writer = SummaryWriter(log_dir=log_dir)
hparams.to_json(os.path.join(log_dir, "hparams.json"))
log_spike = LogSpike(log_dir, threshold=0.5,
verbose=args.verbose, debug=args.debug)
log_metrics_train = AggregateMetrics(writer, "train")
log_metrics_valid = AggregateMetrics(writer, "valid")
log_metrics_test = AggregateMetrics(writer, "test")
# load data and data scaler
dataset_train = CSVDataset(os.path.join(
args.dataset, "train.csv"), verbose=args.verbose, knn=hparams.knn, multithread=False)
dataset_val = CSVDataset(os.path.join(
args.dataset, "val.csv"), verbose=args.verbose, knn=hparams.knn, multithread=False)
dataset_test = CSVDataset(os.path.join(
args.dataset, "test.csv"), verbose=args.verbose, knn=hparams.knn, multithread=False)
dataloader_train = DataLoader(dataset_train, batch_size=hparams.batch_size)
dataloader_val = DataLoader(dataset_val, batch_size=hparams.batch_size)
dataloader_test = DataLoader(dataset_test, batch_size=hparams.batch_size)
lattice_scaler = LatticeScaler()
lattice_scaler.fit(dataloader_train, args.verbose)
lattice_scaler = lattice_scaler.to(args.device)
# setup model, loss and optimizer
model = Denoise(
features=hparams.features,
knn=hparams.knn,
ops_config=hparams.ops_config,
mpnn=hparams.mpnn_layers,
steps=hparams.steps,
scale_limit_weights=hparams.scale_limit_weights,
scale_hidden_dim=hparams.scale_hidden_dim,
scale_layers=hparams.scale_layers,
scale_limit_actions=hparams.scale_limit_actions,
scale_reduce_rho=hparams.scale_reduce_rho,
repeated=hparams.repeated,
mlp_lattice=hparams.mlp_lattice,
lattice_scaler=lattice_scaler
).to(device)
if hparams.loss == "metric_l1":
loss_fn = LossLatticeMetric(distance="l1")
elif hparams.loss == "metric_mse":
loss_fn = LossLatticeMetric(distance="mse")
elif hparams.loss == "metric_trace":
loss_fn = LossLatticeMetric(distance="trace")
elif hparams.loss == "actions_l1":
loss_fn = LossActionMatrixDistance(distance="l1")
elif hparams.loss == "actions_mse":
loss_fn = LossActionMatrixDistance(distance="mse")
elif hparams.loss == "actions_trace":
loss_fn = LossActionMatrixDistance(distance="trace")
elif hparams.loss == "parameters_l1":
loss_fn = LossLatticeParameters(
lattice_scaler=lattice_scaler, distance="l1")
elif hparams.loss == "parameters_mse":
loss_fn = LossLatticeParameters(
lattice_scaler=lattice_scaler, distance="mse")
else:
raise Exception(f"unknown loss {hparams.loss}")
opti = optim.Adam(model.parameters(), lr=hparams.lr,
betas=(hparams.beta1, 0.999))
# setup checkpoint and training loop
checkpoints = Checkpoints(log_dir, model, opti)
data_it, tqdm_bar = training_iterator(
dataloader_train, hparams.total_step, verbose=args.verbose
)
for opt_step, batch in data_it:
model.train()
batch = batch.to(args.device)
# training step
opti.zero_grad()
loss, metrics = get_loss(batch, model, loss_fn)
loss.backward()
check_grad(model, verbose=args.verbose, debug=args.debug)
if hparams.grad_clipping is not None:
torch.nn.utils.clip_grad_norm_(
model.parameters(), hparams.grad_clipping)
opti.step()
# logs
log_spike.log(loss, opt_step, batch, model, opti)
log_metrics_train.append(loss, metrics)
if args.verbose:
tqdm_bar.set_description(log_metrics_train.preview())
# validation
if (opt_step % log_interval) == 0:
log_metrics_train.log(opt_step)
model.eval()
with torch.no_grad():
fig = None
for batch in validation_iterator(dataloader_val, verbose=args.verbose):
batch = batch.to(device)
if fig is None:
fig = get_fig(batch, model, 8)
loss, metrics = get_loss(batch, model, loss_fn)
log_metrics_valid.append(loss, metrics)
metrics = log_metrics_valid.log(opt_step)
writer.add_figure("denoising", fig, opt_step)
checkpoints.step(opt_step, metrics)
# testing from the best checkpoint
model = checkpoints.load_best()
model = model.to(device)
model.eval()
aggregate = AggregateBatch()
with torch.no_grad():
for batch in testing_iterator(dataloader_test, verbose=args.verbose):
batch = batch.to(device)
loss, metrics, full_batch = get_loss(
batch, model, loss_fn, return_batch=True)
aggregate.append(*full_batch)
log_metrics_test.append(loss, metrics)
metrics = log_metrics_test.log(opt_step, hparams=hparams.dict())
with open(os.path.join(log_dir, "metrics.json"), "w") as fp:
json.dump(metrics, fp, indent=4)
aggregate.write(os.path.join(log_dir, "output/test"), verbose=args.verbose)
print("\ntest metrics:")
print(json.dumps(metrics, indent=4))
| 7,547 | 32.251101 | 93 | py |
pegnn | pegnn-master/src/models/operator/loss.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from src.datasets.data import CrystalData
from src.utils.scaler import LatticeScaler
from src.models.operator.utils import lattice_params_to_matrix_torch
from typing import Dict, Tuple
def get_metrics(batch: CrystalData, reconstructed: torch.FloatTensor, scaler: LatticeScaler) -> Dict[str, torch.FloatTensor]:
lengths_real, angles_real = scaler.get_lattices_parameters(batch.cell)
if isinstance(reconstructed, tuple):
lengths_denoised, angles_denoised = reconstructed
else:
lengths_denoised, angles_denoised = scaler.get_lattices_parameters(
reconstructed)
lengths_dist = torch.abs(lengths_denoised - lengths_real)
angles_dist = torch.abs(angles_denoised - angles_real)
return {
"lengths_error": lengths_dist.mean().detach(),
"angles_error": angles_dist.mean().detach()
}
class LossLattice(nn.Module):
def __init__(self, lattice_scaler: LatticeScaler):
super().__init__()
self.lattice_scaler = lattice_scaler
def forward(self, batch: CrystalData, reconstructed: torch.FloatTensor) -> torch.FloatTensor:
raise NotImplementedError
def get_loss(batch: CrystalData, model: nn.Module, loss_fn: LossLattice, return_batch: bool = False) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
reconstructed = model(
cell=batch.cell, x=batch.pos, z=batch.z, struct_size=batch.num_atoms
)
loss = loss_fn(batch, reconstructed)
rec = reconstructed
if isinstance(rec, tuple):
rec = loss_fn.lattice_scaler.denormalise(
rec[0], rec[1])
metrics = get_metrics(batch, rec, loss_fn.lattice_scaler)
if isinstance(rec, tuple):
rec = lattice_params_to_matrix_torch(*rec)
if return_batch:
return loss, metrics, (batch.cell, rec, batch.pos, batch.z, batch.num_atoms)
return loss, metrics
class LossLatticeParameters(LossLattice):
def __init__(self, lattice_scaler: LatticeScaler, distance: str = "l1"):
super().__init__(lattice_scaler=lattice_scaler)
assert distance in ["l1", "mse"]
self.distance = distance
def forward(self, batch: CrystalData, reconstructed: torch.FloatTensor) -> torch.FloatTensor:
param_real = self.lattice_scaler.normalise_lattice(batch.cell)
if isinstance(reconstructed, tuple):
param_reconstructed = reconstructed
else:
param_reconstructed = self.lattice_scaler.normalise_lattice(
reconstructed)
param_real = torch.cat(param_real, dim=1)
param_reconstructed = torch.cat(param_reconstructed, dim=1)
if self.distance == "l1":
return F.l1_loss(param_reconstructed, param_real)
elif self.distance == "mse":
return F.mse_loss(param_reconstructed, param_real)
| 2,878 | 32.476744 | 148 | py |
pegnn | pegnn-master/src/models/operator/utils.py | import torch
import torch.nn as nn
import tqdm
import os
import json
from dataclasses import dataclass
def save_step(spike_dir, batch, model, opti):
os.makedirs(spike_dir, exist_ok=True)
batch_dict = {
"cell": batch.cell.tolist(),
"pos": batch.pos.tolist(),
"z": batch.z.tolist(),
"num_atoms": batch.num_atoms.tolist(),
}
with open(os.path.join(spike_dir, "batch.json"), "w") as fp:
json.dump(batch_dict, fp)
grad_dict = {}
for k, p in model.named_parameters():
if p.grad is not None:
grad_dict[k] = p.grad.tolist()
with open(os.path.join(spike_dir, "grad.json"), "w") as fp:
json.dump(grad_dict, fp)
model_dict = {}
for k, p in model.named_parameters():
model_dict[k] = p.tolist()
with open(os.path.join(spike_dir, "model.json"), "w") as fp:
json.dump(model_dict, fp)
torch.save(opti.state_dict(), os.path.join(spike_dir, "opti.pt"))
class LogSpike:
def __init__(self, log_dir, threshold=0.5, verbose=False, debug=False):
self.log_dir = log_dir
self.prev_loss = None
self.verbose = verbose
self.debug = debug
self.threshold = threshold
def log(self, loss, opt_step, batch, model, opti):
if self.prev_loss is not None:
if abs((loss.item() / prev_loss) - 1.0) > self.threshold:
if self.debug:
spike_dir = os.path.join(
self.log_dir,
"spike",
f"epoch_{opt_step}_loss_{loss.item():.3f}",
)
save_step(spike_dir, batch, model, opti)
if self.verbose:
print(
f"loss spike detected (from {prev_loss:.6f} to {loss.item():.6f})"
)
prev_loss = loss.item()
class AggregateMetrics:
def __init__(self, writer, label):
self.writer = writer
self.label = label
self.loss = []
self.lengths_error = []
self.angles_error = []
def append(self, loss, metrics):
self.loss.append(loss.item())
self.lengths_error.append(metrics["lengths_error"].item())
self.angles_error.append(metrics["angles_error"].item())
def preview(self):
return " ".join(
[
f"loss: {self.loss[-1]:.4f}",
f"lengths error: {self.lengths_error[-1]:.4f}",
f"angles error: {self.angles_error[-1]:.4f}",
]
)
def log(self, opt_step, clear=True, hparams=None):
loss = torch.tensor(self.loss).mean().item()
lengths_error = torch.tensor(self.lengths_error).mean().item()
angles_error = torch.tensor(self.angles_error).mean().item()
if self.writer is not None:
self.writer.add_scalar(f"{self.label}/loss", loss, opt_step)
self.writer.add_scalar(
f"{self.label}/lengths_error", lengths_error, opt_step)
self.writer.add_scalar(
f"{self.label}/angles_error", angles_error, opt_step)
metrics = {
"loss": loss,
"lengths_error": lengths_error,
"angles_error": angles_error
}
if (self.writer is not None) and (hparams is not None):
self.writer.add_hparams(hparams, metrics)
if clear:
self.loss = []
self.lengths_error = []
self.angles_error = []
return metrics
def training_iterator(loader, total_step, verbose=True):
def data_loop(dataloader, total):
it = 0
while it < total:
for _, batch in zip(range(total - it), dataloader):
yield batch
it += len(dataloader)
data_it = data_loop(loader, total_step + 1)
if verbose:
tqdm_bar = tqdm.tqdm(data_it, total=total_step)
data_it = iter(tqdm_bar)
else:
tqdm_bar = None
return enumerate(data_it), tqdm_bar
def validation_iterator(loader, verbose=True):
if verbose:
return tqdm.tqdm(loader, desc="validation", position=1, leave=False)
return loader
def testing_iterator(loader, verbose=True):
if verbose:
return tqdm.tqdm(loader, desc="testing")
return loader
class Checkpoints:
def __init__(self, log_dir, model, opti):
self.log_dir = log_dir
self.opti = opti
self.model = model
torch.save(opti.state_dict(), os.path.join(log_dir, "opti.pt"))
torch.save(model.state_dict(), os.path.join(log_dir, "model.pt"))
self.best = float("inf")
self.filename_best = os.path.join(log_dir, "model.pt")
def step(self, opt_step, metrics):
metrics_sum = (
metrics["lengths_error"]
+ metrics["angles_error"]
)
if metrics_sum < self.best:
self.best = metrics_sum
if "best" in self.filename_best:
delete_file = self.filename_best
else:
delete_file = None
backup_filename = (
f"best_model_batch_{opt_step}_val_{self.best:.3f}".replace(
".", "_")
)
self.filename_best = os.path.join(
self.log_dir, backup_filename + ".pt")
torch.save(self.model.state_dict(), self.filename_best)
if delete_file is not None:
os.remove(delete_file)
torch.save(self.model.state_dict(),
os.path.join(self.log_dir, "model.pt"))
torch.save(self.opti.state_dict(),
os.path.join(self.log_dir, "opti.pt"))
def load_best(self):
weights = torch.load(self.filename_best,
map_location=torch.device("cpu"))
self.model.load_state_dict(weights)
return self.model
@dataclass
class Hparams:
batch_size: int = 1 << 8
total_step: int = 1 << 15
lr: float = 1e-4 # included every time in grid search
beta1: float = 0.9
grad_clipping: float = 1.0
loss: str = "parameters_l1"
knn: int = 16
features: int = 128
ops_config_type: str = "grad"
ops_config_normalize: bool = True
ops_config_edges: str = "n_ij"
ops_config_triplets: str = "n_ij|n_ik|angle"
mpnn_layers: int = 8
steps: int = 4
scale_limit_weights: float = 0.0 # included every time in grid search
scale_hidden_dim: int = 256
scale_layers: int = 1
scale_limit_actions: float = 0.5 # included every time in grid search
scale_reduce_rho: str = "mean"
repeated: bool = False
mlp_lattice: bool = False
@property
def ops_config(self):
def split(s, delimiter):
if len(s) > 0:
return s.split(delimiter)
return []
return {
"type": self.ops_config_type,
"normalize": self.ops_config_normalize,
"edges": split(self.ops_config_edges, "|"),
"triplets": split(self.ops_config_triplets, "|"),
}
def from_json(self, file_name):
with open(file_name, "r") as fp:
hparams = json.load(fp)
for key, value in hparams.items():
assert key in self.__dict__
self.__dict__[key] = value
def to_json(self, file_name):
with open(file_name, "w") as fp:
json.dump(self.__dict__, fp, indent=4)
def dict(self):
return self.__dict__
def build_mlp(in_dim, hidden_dim, fc_num_layers, out_dim):
mods = [nn.Linear(in_dim, hidden_dim), nn.ReLU()]
for i in range(fc_num_layers - 1):
mods += [nn.Linear(hidden_dim, hidden_dim), nn.ReLU()]
mods += [nn.Linear(hidden_dim, out_dim)]
return nn.Sequential(*mods)
def lattice_params_to_matrix_torch(lengths, angles):
"""Batched torch version to compute lattice matrix from params.
lengths: torch.Tensor of shape (N, 3), unit A
angles: torch.Tensor of shape (N, 3), unit degree
"""
angles_r = torch.deg2rad(angles)
coses = torch.cos(angles_r)
sins = torch.sin(angles_r)
val = (coses[:, 0] * coses[:, 1] - coses[:, 2]) / (sins[:, 0] * sins[:, 1])
# Sometimes rounding errors result in values slightly > 1.
val = torch.clamp(val, -1., 1.)
gamma_star = torch.arccos(val)
vector_a = torch.stack([
lengths[:, 0] * sins[:, 1],
torch.zeros(lengths.size(0), device=lengths.device),
lengths[:, 0] * coses[:, 1]], dim=1)
vector_b = torch.stack([
-lengths[:, 1] * sins[:, 0] * torch.cos(gamma_star),
lengths[:, 1] * sins[:, 0] * torch.sin(gamma_star),
lengths[:, 1] * coses[:, 0]], dim=1)
vector_c = torch.stack([
torch.zeros(lengths.size(0), device=lengths.device),
torch.zeros(lengths.size(0), device=lengths.device),
lengths[:, 2]], dim=1)
return torch.stack([vector_a, vector_b, vector_c], dim=1)
| 8,969 | 28.409836 | 90 | py |
pegnn | pegnn-master/src/models/operator/denoise.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import src.models.layers.operator.gnn as ops
from src.models.operator.utils import build_mlp, lattice_params_to_matrix_torch
from src.utils.geometry import Geometry
from torch_scatter import scatter_mean
class Denoise(nn.Module):
def __init__(
self,
features: int,
knn: int,
ops_config: dict,
mpnn: int,
steps: int,
scale_limit_weights: float,
scale_hidden_dim: int,
scale_layers: int,
scale_limit_actions: float,
scale_reduce_rho: str,
repeated: bool,
mlp_lattice: bool = False,
lattice_scaler=None,
):
super(Denoise, self).__init__()
self.knn = knn
self.steps = steps
self.repeated = repeated
self.mlp_lattice = mlp_lattice
self.embedding = nn.Embedding(100, features)
self.mpnn = nn.ModuleList(
[ops.MPNN(features=features) for _ in range(mpnn)])
self.I = nn.Parameter(torch.eye(3), requires_grad=False)
if self.mlp_lattice:
assert lattice_scaler is not None
self.lattice_scaler = lattice_scaler
self.lattice_pred = build_mlp(features, 128, 4, 6)
elif self.repeated:
self.update = ops.MPNN(features=features)
self.actions = ops.Actions(
features,
knn,
ops_config,
scale_k=scale_limit_weights,
hidden_dim=scale_hidden_dim,
n_layers=scale_layers,
limit_actions=scale_limit_actions,
reduce_rho=scale_reduce_rho,
)
else:
self.update = nn.ModuleList(
[ops.MPNN(features=features) for _ in range(self.steps)]
)
self.actions = nn.ModuleList(
[
ops.Actions(
features,
knn,
ops_config,
scale_k=scale_limit_weights,
hidden_dim=scale_hidden_dim,
n_layers=scale_layers,
limit_actions=scale_limit_actions,
reduce_rho=scale_reduce_rho,
)
for _ in range(self.steps)
]
)
self.it = 0
def actions_init(self, cell: torch.FloatTensor) -> torch.FloatTensor:
return self.I.unsqueeze(0).repeat(cell.shape[0], 1, 1)
@property
def device(self):
return self.embedding.weight.device
def forward(
self,
cell: torch.FloatTensor,
x: torch.FloatTensor,
z: torch.FloatTensor,
struct_size: torch.FloatTensor,
edge_index: torch.LongTensor = None,
edge_attr: torch.LongTensor = None,
step: int = None,
):
geometry = Geometry(cell, struct_size, x % 1, knn=self.knn,
edge_index=edge_index, edge_attr=edge_attr)
if step is None:
step = self.steps
h = self.embedding(z)
for l in self.mpnn:
h = l(geometry, h)
if self.mlp_lattice:
latent = scatter_mean(h, geometry.batch, dim=0)
lattice = self.lattice_pred(latent)
lengths, angles = self.lattice_scaler.denormalise(
lattice[:, :3], lattice[:, 3:]
)
cell_prime = lattice_params_to_matrix_torch(lengths, angles)
action = torch.bmm(cell_prime, torch.inverse(cell))
return cell_prime, [cell_prime], [action]
else:
action_rho = self.actions_init(cell)
rho_list = []
actions_list = []
if self.repeated:
actions = self.actions
update = self.update
for i in range(step):
if not self.repeated:
actions = self.actions[i]
update = self.update[i]
h = update(geometry, h)
edges_weights, triplets_weights = actions(geometry, h)
rho_prime, action = actions.apply(
geometry, edges_weights, triplets_weights
)
action_rho = torch.bmm(action, action_rho)
rho_prime = torch.bmm(action_rho, cell)
rho_list.append(rho_prime)
actions_list.append(action_rho)
geometry.rho = rho_prime
geometry.update_vectors()
return geometry.rho, rho_list, actions_list
| 4,657 | 29.051613 | 79 | py |
pegnn | pegnn-master/src/models/operator/autoencoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import src.models.layers.operator.gnn as ops
from src.models.operator.utils import build_mlp
from src.utils.geometry import Geometry
from torch_scatter import scatter_mean
from typing import Tuple
class AutoEncoder(nn.Module):
def __init__(
self,
features: int,
knn: int,
ops_config: dict,
layers: int,
scale_limit_weights: float,
scale_hidden_dim: int,
scale_limit_actions: float,
scale_reduce_rho: str,
):
super(AutoEncoder, self).__init__()
self.knn = knn
self.layers = layers
self.embedding = nn.Embedding(100, features)
self.mpnn = nn.ModuleList([ops.MPNN(features=features) for _ in range(layers)])
self.I = nn.Parameter(torch.eye(3), requires_grad=False)
self.update = nn.ModuleList(
[ops.MPNN(features=features) for _ in range(layers)]
)
self.actions = nn.ModuleList(
[
ops.Actions(
features,
knn,
ops_config,
scale_k=scale_limit_weights,
hidden_dim=scale_hidden_dim,
n_layers=1,
limit_actions=scale_limit_actions,
reduce_rho=scale_reduce_rho,
)
for _ in range(layers)
]
)
def actions_init(self, cell: torch.FloatTensor) -> torch.FloatTensor:
return self.I.unsqueeze(0).repeat(cell.shape[0], 1, 1)
@property
def device(self):
return self.embedding.weight.device
def forward(
self,
cell: torch.FloatTensor,
x: torch.FloatTensor,
z: torch.FloatTensor,
struct_size: torch.LongTensor,
):
cell = self.actions_init(cell)
geometry = Geometry(cell, struct_size, x % 1, knn=self.knn)
geometry.filter_triplets(geometry.triplets_sin_ijk.abs() > 1e-3)
h = self.embedding(z)
for l in self.mpnn:
h = l(geometry, h)
action_rho = self.actions_init(cell)
rho_list = []
actions_list = []
for i in range(self.layers):
actions = self.actions[i]
update = self.update[i]
h = update(geometry, h)
edges_weights, triplets_weights = actions(geometry, h)
rho_prime, action = actions.apply(geometry, edges_weights, triplets_weights)
action_rho = torch.bmm(action, action_rho)
rho_prime = torch.bmm(action_rho, cell)
rho_list.append(rho_prime)
actions_list.append(action_rho)
geometry.cell = rho_prime
geometry.update_vectors()
return geometry.cell # , rho_list, actions_list
class AutoEncoderMLP(nn.Module):
def __init__(
self,
features: int,
knn: int,
layers: int,
lattice_scaler=None,
):
super(AutoEncoderMLP, self).__init__()
self.knn = knn
self.layers = layers
self.lattice_scaler = lattice_scaler
self.embedding = nn.Embedding(100, features)
self.I = nn.Parameter(torch.eye(3), requires_grad=False)
self.mpnn = nn.ModuleList([ops.MPNN(features=features) for _ in range(layers)])
self.lattice_pred = build_mlp(features, 128, 4, 6)
@property
def device(self):
return self.embedding.weight.device
def actions_init(self, cell: torch.FloatTensor) -> torch.FloatTensor:
return self.I.unsqueeze(0).repeat(cell.shape[0], 1, 1)
def forward(
self,
cell: torch.FloatTensor,
x: torch.FloatTensor,
z: torch.FloatTensor,
struct_size: torch.LongTensor,
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
cell = self.actions_init(cell)
geometry = Geometry(cell, struct_size, x % 1, knn=self.knn)
h = self.embedding(z)
for l in self.mpnn:
h = l(geometry, h)
latent = scatter_mean(h, geometry.batch, dim=0)
lattice = self.lattice_pred(latent)
return (lattice[:, :3], lattice[:, 3:])
| 4,223 | 25.236025 | 88 | py |
pegnn | pegnn-master/src/models/layers/random.py | import torch
import torch.nn as nn
class RandomMatrixSL3Z(nn.Module):
def __init__(self):
super().__init__()
generators = torch.tensor(
[
[[1, 0, 1], [0, -1, -1], [0, 1, 0]],
[[0, 1, 0], [0, 0, 1], [1, 0, 0]],
[[0, 1, 0], [1, 0, 0], [-1, -1, -1]],
],
dtype=torch.float32,
)
generators = torch.cat((generators, torch.inverse(generators)), dim=0)
self.generators = nn.Parameter(generators, requires_grad=False)
@property
def device(self):
return self.generators.device
def forward(self, batch_size, e=5):
n = 1 << e
g_idx = torch.randint(
0, self.generators.shape[0], (batch_size * n,), device=self.device
)
M = self.generators[g_idx]
for _ in range(e):
M = M.view(2, -1, 3, 3)
M = torch.bmm(M[0], M[1])
return torch.round(M)
def apply_sl3z(g, rho, x, batch):
rho_prime = torch.bmm(rho, torch.inverse(g))
x_prime = (torch.bmm(g[batch], x.unsqueeze(2)) % 1).squeeze(2)
return rho_prime, x_prime
class RandomSLZ(nn.Module):
def __init__(self):
super().__init__()
self.generator = RandomMatrixSL3Z()
def forward(self, rho, x, batch):
g = self.generator(rho.shape[0])
rho_prime = torch.bmm(rho, g)
x_prime = (torch.bmm(torch.inverse(g)[batch], x.unsqueeze(2)) % 1).squeeze(2)
return rho_prime, x_prime
| 1,510 | 25.982143 | 85 | py |
pegnn | pegnn-master/src/models/layers/operator/gnn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_scatter import scatter
from typing import Tuple
from src.utils.geometry import Geometry
from src.utils.shape import build_shapes, assert_tensor_match, shape
from src.models.layers.operator.operator import Operator, make_operator
class EdgeProj(nn.Module):
def __init__(
self,
features: int,
hidden_dim: int,
n_layers: int,
output_dim: int,
cutoff: float = 10,
step: float = 0.1,
bias: bool = False,
):
super(EdgeProj, self).__init__()
if output_dim is None:
output_dim = features
self.cutoff = cutoff
self.step = step
self.mu = nn.Parameter(
torch.arange(0, self.cutoff, self.step, dtype=torch.float32)
)
layers = [
nn.Linear(2 * features + self.mu.shape[0], hidden_dim, bias=False),
nn.SiLU(),
]
for i in range(n_layers):
layers.extend([nn.Linear(hidden_dim, hidden_dim, bias=False), nn.SiLU()])
layers.append(nn.Linear(hidden_dim, output_dim, bias=bias))
self.mlp = nn.Sequential(*layers)
self.reset_parameters()
def reset_parameters(self):
for layer in self.mlp:
if isinstance(layer, nn.Linear):
torch.nn.init.xavier_normal_(layer.weight)
def get_last_layer(self):
last_layer = None
for layer in self.mlp:
if isinstance(layer, nn.Linear):
last_layer = layer
return last_layer
def forward(self, h, src, dst, edge_norm):
d_ij_emb = torch.exp(
-1 / self.step * (self.mu[None, :] - edge_norm[:, None]).pow(2)
)
inputs = torch.cat((h[src], h[dst], d_ij_emb), dim=-1)
return self.mlp(inputs)
class FaceProj(nn.Module):
def __init__(
self,
features: int,
hidden_dim: int,
n_layers: int,
output_dim: int,
cutoff: float = 10,
step: float = 0.1,
bias: bool = False,
):
super(FaceProj, self).__init__()
if output_dim is None:
output_dim = features
self.cutoff = cutoff
self.step = step
self.mu = nn.Parameter(
torch.arange(0, self.cutoff, self.step, dtype=torch.float32)
)
layers = [
nn.Linear(3 * features + self.mu.shape[0] * 2 + 2, hidden_dim, bias=False),
nn.SiLU(),
]
for i in range(n_layers):
layers.extend([nn.Linear(hidden_dim, hidden_dim, bias=False), nn.SiLU()])
layers.append(nn.Linear(hidden_dim, output_dim, bias=bias))
self.mlp = nn.Sequential(*layers)
self.reset_parameters()
def reset_parameters(self):
for layer in self.mlp:
if isinstance(layer, nn.Linear):
torch.nn.init.xavier_normal_(layer.weight)
def get_last_layer(self):
last_layer = None
for layer in self.mlp:
if isinstance(layer, nn.Linear):
last_layer = layer
return last_layer
def forward(self, h, triplets, norm_ij, norm_ik, cos_ijk, sin_ijk):
d_ij_emb = torch.exp(
-1 / self.step * (self.mu[None, :] - norm_ij[:, None]).pow(2)
)
d_ik_emb = torch.exp(
-1 / self.step * (self.mu[None, :] - norm_ik[:, None]).pow(2)
)
inputs = torch.cat(
(
h[triplets.src],
h[triplets.dst_i],
h[triplets.dst_j],
d_ij_emb,
d_ik_emb,
cos_ijk.unsqueeze(1),
sin_ijk.unsqueeze(1),
),
dim=1,
)
return self.mlp(inputs)
class UpdateFeatures(nn.GRU):
def __init__(self, features: int):
super(UpdateFeatures, self).__init__(features, features, 1, batch_first=False)
def forward(self, h: torch.FloatTensor, mi: torch.FloatTensor):
_, h_prime = super().forward(mi.unsqueeze(0), h.unsqueeze(0))
return h_prime.squeeze(0)
class Actions(nn.Module):
def __init__(
self,
features: int,
knn: int,
ops_config: dict,
scale_k: float,
hidden_dim: int,
n_layers: int,
limit_actions: float,
reduce_rho: str,
):
super(Actions, self).__init__()
self.ops = make_operator(ops_config)
self.knn = knn
self.limit_actions = limit_actions
self.reduce_rho = reduce_rho
self.interact_edges = EdgeProj(
features,
output_dim=self.ops.edges_dim,
hidden_dim=hidden_dim,
n_layers=n_layers,
bias=True,
)
self.interact_triplets = FaceProj(
features,
output_dim=self.ops.triplets_dim,
hidden_dim=hidden_dim,
n_layers=n_layers,
bias=True,
)
self.I = nn.Parameter(torch.eye(3), requires_grad=False)
self.scale_k = scale_k
self.act_scale = nn.Tanh()
self.reset_parameters()
def reset_parameters(self):
self.interact_edges.reset_parameters()
self.interact_triplets.reset_parameters()
def apply(
self,
geometry: Geometry,
edges_weights: torch.FloatTensor,
triplets_weights: torch.FloatTensor,
check_tensor: bool = True,
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.LongTensor]:
# type checking and size evaluation
if check_tensor:
shapes = assert_tensor_match(
(geometry.cell, shape("b", 3, 3, dtype=torch.float32)),
(geometry.batch, shape("n", dtype=torch.long)),
(geometry.edges.src, shape("e", dtype=torch.long)),
(geometry.edges.dst, shape("e", dtype=torch.long)),
(geometry.edges.cell, shape("e", 3, dtype=torch.long)),
(geometry.triplets.src, shape("t", dtype=torch.long)),
(geometry.triplets.dst_i, shape("t", dtype=torch.long)),
(geometry.triplets.cell_i, shape("t", 3, dtype=torch.long)),
(geometry.triplets.dst_j, shape("t", dtype=torch.long)),
(geometry.triplets.cell_j, shape("t", 3, dtype=torch.long)),
)
else:
shapes = build_shapes(
{
"b": geometry.cell.shape[0],
"n": geometry.batch.shape[0],
"e": geometry.edges.src.shape[0],
"t": geometry.triplets.src.shape[0],
}
)
# calculating actions
edges_ops, triplets_ops = self.ops.forward(geometry)
# aggregation
if edges_ops is not None:
weighted_ops = (edges_ops * edges_weights[:, :, None, None]).sum(dim=1)
actions_edges = scatter(
weighted_ops,
geometry.batch_edges,
dim=0,
dim_size=shapes.b,
reduce=self.reduce_rho,
)
else:
actions_edges = None
if triplets_ops is not None:
weighted_ops = (triplets_ops * triplets_weights[:, :, None, None]).sum(
dim=1
)
actions_triplets = scatter(
weighted_ops,
geometry.batch_triplets,
dim=0,
dim_size=shapes.b,
reduce=self.reduce_rho,
)
else:
actions_triplets = None
# action
if (actions_edges is not None) and (actions_triplets is not None):
actions_rho = actions_edges + actions_triplets
elif actions_edges is not None:
actions_rho = actions_edges
elif actions_triplets is not None:
actions_rho = actions_triplets
if self.limit_actions != 0.0:
actions_rho = self.limit_actions * torch.tanh(
actions_rho / self.limit_actions
)
actions_rho = self.I + actions_rho
rho_prime = torch.bmm(actions_rho, geometry.cell)
return rho_prime, actions_rho
def forward(
self,
geometry: Geometry,
h: torch.FloatTensor,
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
if self.ops.edges_dim > 0:
edges_weights = self.interact_edges(
h, geometry.edges.src, geometry.edges.dst, geometry.edges_r_ij
)
else:
edges_weights = None
if self.ops.triplets_dim > 0:
triplets_weights = self.interact_triplets(
h,
geometry.triplets,
geometry.triplets_r_ij,
geometry.triplets_r_ik,
geometry.triplets_cos_ijk,
geometry.triplets_sin_ijk,
)
else:
triplets_weights = None
if self.scale_k != 0.0:
if edges_weights is not None:
edges_weights = self.scale_k * self.act_scale(edges_weights)
if triplets_weights is not None:
triplets_weights = self.scale_k * self.act_scale(triplets_weights)
return edges_weights, triplets_weights
class MPNN(nn.Module):
def __init__(self, features: int):
super(MPNN, self).__init__()
self.message_f = EdgeProj(
features, hidden_dim=features, n_layers=0, output_dim=features
)
self.update_f = UpdateFeatures(features)
self.reset_parameters()
def reset_parameters(self):
self.message_f.reset_parameters()
self.update_f.reset_parameters()
def forward(self, geometry: Geometry, h: torch.FloatTensor):
# message passing
mij = self.message_f(
h, geometry.edges.src, geometry.edges.dst, geometry.edges_r_ij
)
mi = scatter(mij, geometry.edges.src, dim=0, reduce="mean", dim_size=h.shape[0])
h_prime = self.update_f(h, mi)
return h_prime
| 10,092 | 29.492447 | 88 | py |
pegnn | pegnn-master/src/models/layers/operator/operator.py | import torch
import torch.nn as nn
from src.utils.geometry import Geometry
from src.models.layers.operator.grad import Grad
import enum
from typing import List
import abc
class Operator(nn.Module):
def __init__(self, operators_edges, operators_triplets, normalize: bool = True):
super().__init__()
self.operators_edges = operators_edges
self.operators_triplets = operators_triplets
self.normalize = normalize
@property
def edges_dim(self):
return len(self.operators_edges)
@property
def triplets_dim(self):
return len(self.operators_triplets)
def forward(self, geometry):
raise NotImplementedError
class OpKetBra(Operator):
class AbstractOperator(metaclass=abc.ABCMeta):
@abc.abstractclassmethod
def op(
self, u: torch.FloatTensor, v: torch.FloatTensor = None
) -> torch.FloatTensor:
pass
class OperatorVij(AbstractOperator):
def op(self, u, v=None):
return torch.bmm(u.unsqueeze(2), u.unsqueeze(1))
class OperatorVik(AbstractOperator):
def op(self, u, v=None):
return torch.bmm(v.unsqueeze(2), v.unsqueeze(1))
class OperatorVijk(AbstractOperator):
def op(self, u, v=None):
return torch.bmm(u.unsqueeze(2), v.unsqueeze(1))
class OperatorVikj(AbstractOperator):
def op(self, u, v=None):
return torch.bmm(v.unsqueeze(2), u.unsqueeze(1))
class OperatorVijkSym(AbstractOperator):
def op(self, u, v=None):
return 0.5 * (
torch.bmm(u.unsqueeze(2), v.unsqueeze(1))
+ torch.bmm(v.unsqueeze(2), u.unsqueeze(1))
)
def __init__(self, operators_edges, operators_triplets, normalize: bool = True):
assert isinstance(normalize, bool)
assert isinstance(operators_edges, set)
assert isinstance(operators_triplets, set)
for op in operators_edges:
assert isinstance(op, (OpKetBra.OperatorVij,))
for op in operators_triplets:
assert isinstance(op, OpKetBra.AbstractOperator)
super().__init__(
operators_edges=operators_edges,
operators_triplets=operators_triplets,
normalize=normalize,
)
def forward(self, geometry):
edges_ops = []
triplets_ops = []
for op in self.operators_edges:
if self.normalize:
edges_ops.append(op.op(geometry.edges_u_ij))
else:
edges_ops.append(op.op(geometry.edges_v_ij))
for op in self.operators_triplets:
if self.normalize:
triplets_ops.append(
op.op(geometry.triplets_u_ij, geometry.triplets_u_ik)
)
else:
triplets_ops.append(
op.op(geometry.triplets_v_ij, geometry.triplets_v_ik)
)
if len(edges_ops) > 0:
edges_ops = torch.stack(edges_ops, dim=1)
else:
edges_ops = None
if len(triplets_ops) > 0:
triplets_ops = torch.stack(triplets_ops, dim=1)
else:
triplets_ops = None
return edges_ops, triplets_ops
class OpSymSkew(Operator):
class AbstractOperator(metaclass=abc.ABCMeta):
@abc.abstractclassmethod
def op(
self, u: torch.FloatTensor, v: torch.FloatTensor = None
) -> torch.FloatTensor:
pass
class OperatorVij(AbstractOperator):
def op(self, u, v=None):
return u[:, :, None]+u[:, None, :]
class OperatorVik(AbstractOperator):
def op(self, u, v=None):
return v[:, :, None]+v[:, None, :]
class OperatorVijk(AbstractOperator):
def op(self, u, v=None):
return u[:, :, None]+v[:, None, :]
class OperatorVikj(AbstractOperator):
def op(self, u, v=None):
return v[:, :, None]+u[:, None, :]
class OperatorVijkSym(AbstractOperator):
def op(self, u, v=None):
return 0.5 * (
u[:, :, None]+v[:, None, :] + v[:, :, None]+u[:, None, :]
)
def __init__(self, operators_edges, operators_triplets, normalize: bool = True):
assert isinstance(normalize, bool)
assert isinstance(operators_edges, set)
assert isinstance(operators_triplets, set)
for op in operators_edges:
assert isinstance(op, (OpSymSkew.OperatorVij,))
for op in operators_triplets:
assert isinstance(op, OpSymSkew.AbstractOperator)
super().__init__(
operators_edges=operators_edges,
operators_triplets=operators_triplets,
normalize=normalize,
)
def forward(self, geometry):
edges_ops = []
triplets_ops = []
for op in self.operators_edges:
if self.normalize:
edges_ops.append(op.op(geometry.edges_u_ij))
else:
edges_ops.append(op.op(geometry.edges_v_ij))
for op in self.operators_triplets:
if self.normalize:
triplets_ops.append(
op.op(geometry.triplets_u_ij, geometry.triplets_u_ik)
)
else:
triplets_ops.append(
op.op(geometry.triplets_v_ij, geometry.triplets_v_ik)
)
if len(edges_ops) > 0:
edges_ops = torch.stack(edges_ops, dim=1)
else:
edges_ops = None
if len(triplets_ops) > 0:
triplets_ops = torch.stack(triplets_ops, dim=1)
else:
triplets_ops = None
return edges_ops, triplets_ops
class OpGrad(Operator):
class AbstractOperator(metaclass=abc.ABCMeta):
def __init__(self):
self.grad = None
def set_grad(self, grad):
self.grad = grad
@abc.abstractclassmethod
def op(
self, cell: torch.FloatTensor, x_ij: torch.FloatTensor, x_ik: torch.FloatTensor = None
) -> torch.FloatTensor:
pass
class OperatorNormij(AbstractOperator):
def op(self, cell: torch.FloatTensor, x_ij: torch.FloatTensor, x_ik: torch.FloatTensor = None):
return self.grad.grad_distance(cell, x_ij)[0]
class OperatorNormijSym(AbstractOperator):
def op(self, cell: torch.FloatTensor, x_ij: torch.FloatTensor, x_ik: torch.FloatTensor = None):
return self.grad.grad_distance_sym(cell, x_ij)[0]
class OperatorNormik(AbstractOperator):
def op(self, cell: torch.FloatTensor, x_ij: torch.FloatTensor, x_ik: torch.FloatTensor = None):
return self.grad.grad_distance(cell, x_ik)[0]
class OperatorNormikSym(AbstractOperator):
def op(self, cell: torch.FloatTensor, x_ij: torch.FloatTensor, x_ik: torch.FloatTensor = None):
return self.grad.grad_distance_sym(cell, x_ik)[0]
class OperatorAngle(AbstractOperator):
def op(self, cell: torch.FloatTensor, x_ij: torch.FloatTensor, x_ik: torch.FloatTensor = None):
return self.grad.grad_angle(cell, x_ij, x_ik)[0]
class OperatorAngleSym(AbstractOperator):
def op(self, cell: torch.FloatTensor, x_ij: torch.FloatTensor, x_ik: torch.FloatTensor = None):
return self.grad.grad_angle_sym(cell, x_ij, x_ik)[0]
class OperatorArea(AbstractOperator):
def op(self, cell: torch.FloatTensor, x_ij: torch.FloatTensor, x_ik: torch.FloatTensor = None):
return self.grad.grad_area(cell, x_ij, x_ik)[0]
class OperatorAreaSym(AbstractOperator):
def op(self, cell: torch.FloatTensor, x_ij: torch.FloatTensor, x_ik: torch.FloatTensor = None):
return self.grad.grad_area_sym(cell, x_ij, x_ik)[0]
def __init__(self, operators_edges, operators_triplets, normalize: bool = True):
assert isinstance(normalize, bool)
assert isinstance(operators_edges, set)
assert isinstance(operators_triplets, set)
for op in operators_edges:
assert isinstance(
op, (OpGrad.OperatorNormij, OpGrad.OperatorNormijSym))
for op in operators_triplets:
assert isinstance(op, OpGrad.AbstractOperator)
super().__init__(
operators_edges=operators_edges,
operators_triplets=operators_triplets,
normalize=normalize,
)
self.grad = Grad()
for op in self.operators_edges:
op.set_grad(self.grad)
for op in self.operators_triplets:
op.set_grad(self.grad)
def forward(self, geometry):
edges_ops = []
triplets_ops = []
for op in self.operators_edges:
edges_ops.append(
op.op(geometry.cell[geometry.batch_edges], geometry.edges_e_ij))
for op in self.operators_triplets:
triplets_ops.append(
op.op(geometry.cell[geometry.batch_triplets],
geometry.triplets_e_ij, geometry.triplets_e_ik)
)
if len(edges_ops) > 0:
edges_ops = torch.stack(edges_ops, dim=1)
else:
edges_ops = None
if len(triplets_ops) > 0:
triplets_ops = torch.stack(triplets_ops, dim=1)
else:
triplets_ops = None
return edges_ops, triplets_ops
def make_operator(config):
assert "type" in config
assert "normalize" in config
assert "edges" in config
assert "triplets" in config
assert config["type"] in ["ket-bra", "sym-skew", "grad"]
if config["type"] == "ket-bra":
operators_edges = set()
operators_triplets = set()
ops_dict = {
"v_ij": OpKetBra.OperatorVij,
"v_ik": OpKetBra.OperatorVik,
"v_ijk": OpKetBra.OperatorVijk,
"v_ikj": OpKetBra.OperatorVikj,
"v_ijk_sym": OpKetBra.OperatorVijkSym,
}
for op in config["edges"]:
assert op in ["v_ij"]
operators_edges.add(ops_dict[op]())
for op in config["triplets"]:
assert op in ops_dict
operators_triplets.add(ops_dict[op]())
ops = OpKetBra(
operators_edges, operators_triplets, normalize=config["normalize"]
)
elif config["type"] == "sym-skew":
operators_edges = set()
operators_triplets = set()
ops_dict = {
"v_ij": OpSymSkew.OperatorVij,
"v_ik": OpSymSkew.OperatorVik,
"v_ijk": OpSymSkew.OperatorVijk,
"v_ikj": OpSymSkew.OperatorVikj,
"v_ijk_sym": OpSymSkew.OperatorVijkSym,
}
for op in config["edges"]:
assert op in ["v_ij"]
operators_edges.add(ops_dict[op]())
for op in config["triplets"]:
assert op in ops_dict
operators_triplets.add(ops_dict[op]())
ops = OpSymSkew(
operators_edges, operators_triplets, normalize=config["normalize"]
)
elif config["type"] == "grad":
operators_edges = set()
operators_triplets = set()
ops_dict = {
"n_ij": OpGrad.OperatorNormij,
"n_ij_sym": OpGrad.OperatorNormijSym,
"n_ik": OpGrad.OperatorNormik,
"n_ik_sym": OpGrad.OperatorNormikSym,
"angle": OpGrad.OperatorAngle,
"angle_sym": OpGrad.OperatorAngleSym,
"area": OpGrad.OperatorArea,
"area_sym": OpGrad.OperatorAreaSym,
}
for op in config["edges"]:
assert op in ["n_ij", "n_ij_sym"]
operators_edges.add(ops_dict[op]())
for op in config["triplets"]:
assert op in ops_dict
operators_triplets.add(ops_dict[op]())
ops = OpGrad(
operators_edges, operators_triplets, normalize=config["normalize"]
)
return ops
| 11,933 | 31.254054 | 103 | py |
pegnn | pegnn-master/src/models/layers/operator/grad_unittest.py | import torch
import torch.nn as nn
from torch.autograd.functional import jacobian
from .grad import Grad
import unittest
import time
class TestGrad(unittest.TestCase):
batch_size = 1024
verbose = True
def log(self, *args, **kwargs):
if TestGrad.verbose:
print(*args, **kwargs)
def assertAlmostEqualsTensors(self, x, y, places):
if not isinstance(x, torch.Tensor):
x = torch.tensor(x)
if not isinstance(y, torch.Tensor):
y = torch.tensor(y)
max_error = (x-y).abs().max().item()
self.log(f"max error: {max_error:.5f}")
self.assertAlmostEqual(max_error, 0, places)
def batched_jacobian(self, fn, inputs):
lst_grad = [[] for _ in inputs]
for vars in zip(*inputs):
grad_vars = jacobian(fn, vars)
for idx, g in enumerate(grad_vars):
lst_grad[idx].append(g)
for idx, g in enumerate(lst_grad):
lst_grad[idx] = torch.stack(g, dim=0)
return tuple(lst_grad)
def test_norm(self):
torch.manual_seed(0)
x = torch.randn(TestGrad.batch_size, 3)
grad_fn = Grad()
(gt_grad_x,) = self.batched_jacobian(lambda u: u.norm(), (x,))
grad_x = grad_fn.jacobian_norm(x)
self.assertAlmostEqualsTensors(gt_grad_x, grad_x, places=6)
def test_dot(self):
torch.manual_seed(0)
x = torch.randn(TestGrad.batch_size, 3)
y = torch.randn(TestGrad.batch_size, 3)
grad_fn = Grad()
(gt_grad_x, gt_grad_y) = self.batched_jacobian(
lambda x, y: x.dot(y), (x, y))
grad_x, grad_y = grad_fn.jacobian_dot(x, y)
self.assertAlmostEqualsTensors(gt_grad_x, grad_x, places=6)
self.assertAlmostEqualsTensors(gt_grad_y, grad_y, places=6)
def test_cross_norm(self):
torch.manual_seed(0)
x = torch.randn(TestGrad.batch_size, 3)
y = torch.randn(TestGrad.batch_size, 3)
grad_fn = Grad()
(gt_grad_x, gt_grad_y) = self.batched_jacobian(
lambda x, y: torch.cross(x, y).norm(), (x, y))
grad_x, grad_y = grad_fn.jacobian_cross_norm(x, y)
self.assertAlmostEqualsTensors(gt_grad_x, grad_x, places=6)
self.assertAlmostEqualsTensors(gt_grad_y, grad_y, places=6)
def test_matrix_vector(self):
torch.manual_seed(0)
m = torch.randn(TestGrad.batch_size, 3, 3)
u = torch.randn(TestGrad.batch_size, 3)
grad_fn = Grad()
(gt_grad_m, gt_grad_u) = self.batched_jacobian(
lambda x, y: x @ y, (m, u))
grad_m, grad_u = grad_fn.jacobian_mu(m, u)
self.assertAlmostEqualsTensors(gt_grad_m, grad_m, places=6)
self.assertAlmostEqualsTensors(gt_grad_u, grad_u, places=6)
grad_m = grad_fn.jacobian_m(u)
self.assertAlmostEqualsTensors(gt_grad_m, grad_m, places=6)
def test_atan2(self):
torch.manual_seed(0)
x = torch.randn(TestGrad.batch_size)
y = torch.randn(TestGrad.batch_size)
grad_fn = Grad()
(gt_grad_y, gt_grad_x) = self.batched_jacobian(
lambda y, x: torch.atan2(y, x), (y, x))
grad_y, grad_x = grad_fn.jacobian_atan2(y, x)
self.assertAlmostEqualsTensors(gt_grad_x, grad_x, places=5)
self.assertAlmostEqualsTensors(gt_grad_y, grad_y, places=5)
def test_atan2(self):
torch.manual_seed(0)
x = torch.randn(TestGrad.batch_size)
y = torch.randn(TestGrad.batch_size)
grad_fn = Grad()
(gt_grad_y, gt_grad_x) = self.batched_jacobian(
lambda y, x: torch.atan2(y, x), (y, x))
grad_y, grad_x = grad_fn.jacobian_atan2(y, x)
self.assertAlmostEqualsTensors(gt_grad_x, grad_x, places=5)
self.assertAlmostEqualsTensors(gt_grad_y, grad_y, places=5)
def test_angle_vector(self):
torch.manual_seed(0)
u = torch.randn(TestGrad.batch_size, 3)
v = torch.randn(TestGrad.batch_size, 3)
grad_fn = Grad()
(gt_grad_u, gt_grad_v) = self.batched_jacobian(
lambda x, y: torch.atan2(torch.cross(x, y).norm(), x.dot(y)),
(u, v))
grad_u, grad_v = grad_fn.jacobian_angle_vector(u, v)
self.assertAlmostEqualsTensors(gt_grad_u, grad_u, places=5)
self.assertAlmostEqualsTensors(gt_grad_v, grad_v, places=5)
def test_distance(self):
torch.manual_seed(0)
grad_fn = Grad()
g = torch.randn(TestGrad.batch_size, 3, 3)
rho = torch.matrix_exp(torch.randn(TestGrad.batch_size, 3, 3))
x_ij = torch.randn(TestGrad.batch_size, 3)
def get_distance(g, rho, xij):
u = g @ rho @ xij
return u.norm()
t0 = time.time()
grad_g, grad_x_i, grad_x_j = grad_fn.grad_distance(
rho, x_ij, g=g)
t1 = time.time()
self.log(f"grad distance {t1-t0:.6f}sec")
(gt_grad_g, _, gt_grad_x_ij) = self.batched_jacobian(
lambda g, rho, xij: get_distance(
g, rho, xij), (g, rho, x_ij)
)
gt_grad_x_i = -gt_grad_x_ij
gt_grad_x_j = gt_grad_x_ij
self.assertAlmostEqualsTensors(gt_grad_g, grad_g, places=3)
self.assertAlmostEqualsTensors(gt_grad_x_i, grad_x_i, places=3)
self.assertAlmostEqualsTensors(gt_grad_x_j, grad_x_j, places=3)
def test_distance_sym(self):
torch.manual_seed(0)
grad_fn = Grad()
g = torch.randn(TestGrad.batch_size, 3, 3)
rho = torch.matrix_exp(torch.randn(TestGrad.batch_size, 3, 3))
x_ij = torch.randn(TestGrad.batch_size, 3)
def get_distance(g, rho, xij):
u = (g + g.t()) @ rho @ xij
return u.norm()
t0 = time.time()
grad_g, grad_x_i, grad_x_j = grad_fn.grad_distance_sym(
rho, x_ij, g=g)
t1 = time.time()
self.log(f"grad distance sym {t1-t0:.6f}sec")
(gt_grad_g, _, gt_grad_x_ij) = self.batched_jacobian(
lambda g, rho, xij: get_distance(
g, rho, xij), (g, rho, x_ij)
)
gt_grad_x_i = -gt_grad_x_ij
gt_grad_x_j = gt_grad_x_ij
self.assertAlmostEqualsTensors(gt_grad_g, grad_g, places=3)
self.assertAlmostEqualsTensors(gt_grad_x_i, grad_x_i, places=3)
self.assertAlmostEqualsTensors(gt_grad_x_j, grad_x_j, places=3)
def test_area(self):
torch.manual_seed(0)
grad_fn = Grad()
g = torch.randn(TestGrad.batch_size, 3, 3)
rho = torch.matrix_exp(torch.randn(TestGrad.batch_size, 3, 3))
x_ij = torch.randn(TestGrad.batch_size, 3)
x_ik = torch.randn(TestGrad.batch_size, 3)
def get_area(g, rho, xij, xik):
u = g @ rho @ xij
v = g @ rho @ xik
return 0.5 * torch.cross(u, v).norm()
t0 = time.time()
grad_g, grad_x_i, grad_x_j, grad_x_k = grad_fn.grad_area(
rho, x_ij, x_ik, g=g)
t1 = time.time()
self.log(f"grad area {t1-t0:.6f}sec")
(gt_grad_g, _, gt_grad_x_ij, gt_grad_x_ik) = self.batched_jacobian(
lambda g, rho, xij, xik: get_area(
g, rho, xij, xik), (g, rho, x_ij, x_ik)
)
gt_grad_x_i = -(gt_grad_x_ij+gt_grad_x_ik)
gt_grad_x_j = gt_grad_x_ij
gt_grad_x_k = gt_grad_x_ik
self.assertAlmostEqualsTensors(gt_grad_g, grad_g, places=2)
self.assertAlmostEqualsTensors(gt_grad_x_i, grad_x_i, places=2)
self.assertAlmostEqualsTensors(gt_grad_x_j, grad_x_j, places=2)
self.assertAlmostEqualsTensors(gt_grad_x_k, grad_x_k, places=2)
def test_area_sym(self):
torch.manual_seed(0)
grad_fn = Grad()
g = torch.randn(TestGrad.batch_size, 3, 3)
rho = torch.matrix_exp(torch.randn(TestGrad.batch_size, 3, 3))
x_ij = torch.randn(TestGrad.batch_size, 3)
x_ik = torch.randn(TestGrad.batch_size, 3)
def get_area(g, rho, xij, xik):
u = (g + g.t()) @ rho @ xij
v = (g + g.t()) @ rho @ xik
return 0.5 * torch.cross(u, v).norm()
t0 = time.time()
grad_g, grad_x_i, grad_x_j, grad_x_k = grad_fn.grad_area_sym(
rho, x_ij, x_ik, g=g)
t1 = time.time()
self.log(f"grad area sym {t1-t0:.6f}sec")
(gt_grad_g, _, gt_grad_x_ij, gt_grad_x_ik) = self.batched_jacobian(
lambda g, rho, xij, xik: get_area(
g, rho, xij, xik), (g, rho, x_ij, x_ik)
)
gt_grad_x_i = -(gt_grad_x_ij+gt_grad_x_ik)
gt_grad_x_j = gt_grad_x_ij
gt_grad_x_k = gt_grad_x_ik
self.assertAlmostEqualsTensors(gt_grad_g, grad_g, places=2)
self.assertAlmostEqualsTensors(gt_grad_x_i, grad_x_i, places=2)
self.assertAlmostEqualsTensors(gt_grad_x_j, grad_x_j, places=2)
self.assertAlmostEqualsTensors(gt_grad_x_k, grad_x_k, places=2)
def test_angle(self):
torch.manual_seed(0)
grad_fn = Grad()
g = torch.randn(TestGrad.batch_size, 3, 3)
rho = torch.matrix_exp(torch.randn(TestGrad.batch_size, 3, 3))
x_ij = torch.randn(TestGrad.batch_size, 3)
x_ik = torch.randn(TestGrad.batch_size, 3)
def get_angle(g, rho, xij, xik):
u = g @ rho @ xij
v = g @ rho @ xik
return torch.atan2(torch.cross(u, v).norm(), u.dot(v))
t0 = time.time()
grad_g, grad_x_i, grad_x_j, grad_x_k = grad_fn.grad_angle(
rho, x_ij, x_ik, g=g)
t1 = time.time()
self.log(f"grad angle {t1-t0:.6f}sec")
(gt_grad_g, _, gt_grad_x_ij, gt_grad_x_ik) = self.batched_jacobian(
lambda g, rho, xij, xik: get_angle(
g, rho, xij, xik), (g, rho, x_ij, x_ik)
)
gt_grad_x_i = -(gt_grad_x_ij+gt_grad_x_ik)
gt_grad_x_j = gt_grad_x_ij
gt_grad_x_k = gt_grad_x_ik
self.assertAlmostEqualsTensors(gt_grad_g, grad_g, places=4)
self.assertAlmostEqualsTensors(gt_grad_x_i, grad_x_i, places=4)
self.assertAlmostEqualsTensors(gt_grad_x_j, grad_x_j, places=4)
self.assertAlmostEqualsTensors(gt_grad_x_k, grad_x_k, places=4)
def test_angle_sym(self):
torch.manual_seed(0)
grad_fn = Grad()
g = torch.randn(TestGrad.batch_size, 3, 3)
rho = torch.matrix_exp(torch.randn(TestGrad.batch_size, 3, 3))
x_ij = torch.randn(TestGrad.batch_size, 3)
x_ik = torch.randn(TestGrad.batch_size, 3)
def get_angle(g, rho, xij, xik):
u = (g + g.t()) @ rho @ xij
v = (g + g.t()) @ rho @ xik
return torch.atan2(torch.cross(u, v).norm(), u.dot(v))
t0 = time.time()
grad_g, grad_x_i, grad_x_j, grad_x_k = grad_fn.grad_angle_sym(
rho, x_ij, x_ik, g=g)
t1 = time.time()
self.log(f"grad angle sym {t1-t0:.6f}sec")
(gt_grad_g, _, gt_grad_x_ij, gt_grad_x_ik) = self.batched_jacobian(
lambda g, rho, xij, xik: get_angle(
g, rho, xij, xik), (g, rho, x_ij, x_ik)
)
gt_grad_x_i = -(gt_grad_x_ij+gt_grad_x_ik)
gt_grad_x_j = gt_grad_x_ij
gt_grad_x_k = gt_grad_x_ik
self.assertAlmostEqualsTensors(gt_grad_g, grad_g, places=3)
self.assertAlmostEqualsTensors(gt_grad_x_i, grad_x_i, places=3)
self.assertAlmostEqualsTensors(gt_grad_x_j, grad_x_j, places=3)
self.assertAlmostEqualsTensors(gt_grad_x_k, grad_x_k, places=3)
if __name__ == "__main__":
unittest.main()
| 11,560 | 31.566197 | 75 | py |
pegnn | pegnn-master/src/models/layers/operator/grad.py | import torch
import torch.nn as nn
class Grad(nn.Module):
def __init__(self):
super().__init__()
self.I = nn.Parameter(torch.eye(3), requires_grad=False)
self.K = nn.Parameter(torch.tensor([[[0, 0, 0], [0, 0, 1], [0, -1, 0]], [[0, 0, -1], [0, 0, 0], [
1, 0, 0]], [[0, 1, 0], [-1, 0, 0], [0, 0, 0]]], dtype=torch.float32), requires_grad=False)
def jacobian_atan2(self, y, x):
diff_x = -y / (x ** 2 + y ** 2)
diff_y = x / (x ** 2 + y ** 2)
return diff_y, diff_x
def jacobian_dot(self, x, y):
return y.clone(), x.clone()
def jacobian_norm(self, x):
return x / x.norm(dim=1)[:, None]
def jacobian_cross_norm(self, x, y):
diff_cross_x = (self.K[None]*y[:, None, None, :]).sum(dim=3)
diff_cross_y = -(self.K[None]*x[:, None, None, :]).sum(dim=3)
diff_norm = self.jacobian_norm(torch.cross(x, y))
diff_x = torch.bmm(diff_norm.unsqueeze(1), diff_cross_x).squeeze(1)
diff_y = torch.bmm(diff_norm.unsqueeze(1), diff_cross_y).squeeze(1)
return diff_x, diff_y
def jacobian_m(self, u):
diff_m = self.I[None, :, :, None]*u[:, None, None, :]
return diff_m
def jacobian_mu(self, m, u):
diff_m = self.I[None, :, :, None]*u[:, None, None, :]
diff_u = m.clone()
return diff_m, diff_u
def jacobian_angle_vector(self, u, v):
diff_atan2_y, diff_atan2_x = self.jacobian_atan2(
torch.cross(u, v).norm(dim=1), (u*v).sum(dim=1))
diff_cross_norm_u, diff_cross_norm_v = self.jacobian_cross_norm(u, v)
diff_dot_u, diff_dot_v = self.jacobian_dot(u, v)
diff_u = diff_atan2_y[:, None] * diff_cross_norm_u + \
diff_atan2_x[:, None] * diff_dot_u
diff_v = diff_atan2_y[:, None] * diff_cross_norm_v + \
diff_atan2_x[:, None] * diff_dot_v
return diff_u, diff_v
def grad_distance(self, rho, x_ij, g=None):
if g is None:
rho_prime = rho
else:
rho_prime = torch.bmm(g, rho)
u = torch.bmm(rho_prime, x_ij.unsqueeze(2)).squeeze(2)
diff_u = self.jacobian_norm(u)
diff_g_u = self.jacobian_m(
torch.bmm(rho, x_ij.unsqueeze(2)).squeeze(2))
diff_g = torch.einsum("bi,bijk->bjk", diff_u, diff_g_u)
diff_x = torch.bmm(diff_u.unsqueeze(1), rho_prime).squeeze(1)
diff_x_i = -diff_x
diff_x_j = diff_x
return diff_g, diff_x_i, diff_x_j
def grad_distance_sym(self, rho, x_ij, g=None):
if g is None:
rho_prime = rho
else:
rho_prime = torch.bmm((g + torch.transpose(g, 1, 2)), rho)
u = torch.bmm(rho_prime, x_ij.unsqueeze(2)).squeeze(2)
diff_u = self.jacobian_norm(u)
diff_g_u = self.jacobian_m(
torch.bmm(rho, x_ij.unsqueeze(2)).squeeze(2))
diff_g_demi = torch.einsum("bi,bijk->bjk", diff_u, diff_g_u)
diff_g = diff_g_demi + torch.transpose(diff_g_demi, 1, 2)
diff_x = torch.bmm(diff_u.unsqueeze(1), rho_prime).squeeze(1)
diff_x_i = -diff_x
diff_x_j = diff_x
return diff_g, diff_x_i, diff_x_j
def grad_area(self, rho, x_ij, x_ik, g=None):
if g is None:
rho_prime = rho
else:
rho_prime = torch.bmm(g, rho)
u = torch.bmm(rho_prime, x_ij.unsqueeze(2)).squeeze(2)
v = torch.bmm(rho_prime, x_ik.unsqueeze(2)).squeeze(2)
diff_u, diff_v = self.jacobian_cross_norm(u, v)
diff_g_u = self.jacobian_m(
torch.bmm(rho, x_ij.unsqueeze(2)).squeeze(2))
diff_g_v = self.jacobian_m(
torch.bmm(rho, x_ik.unsqueeze(2)).squeeze(2))
diff_g = 0.5 * (torch.einsum("bi,bijk->bjk", diff_u, diff_g_u) +
torch.einsum("bi,bijk->bjk", diff_v, diff_g_v))
diff_vect = rho_prime
diff_x_i = -0.5 * (torch.bmm(diff_u.unsqueeze(1), diff_vect).squeeze(1) +
torch.bmm(diff_v.unsqueeze(1), diff_vect).squeeze(1))
diff_x_j = 0.5 * torch.bmm(diff_u.unsqueeze(1), diff_vect).squeeze(1)
diff_x_k = 0.5 * torch.bmm(diff_v.unsqueeze(1), diff_vect).squeeze(1)
return diff_g, diff_x_i, diff_x_j, diff_x_k
def grad_area_sym(self, rho, x_ij, x_ik, g=None):
if g is None:
rho_prime = rho
else:
rho_prime = torch.bmm((g + torch.transpose(g, 1, 2)), rho)
u = torch.bmm(rho_prime, x_ij.unsqueeze(2)).squeeze(2)
v = torch.bmm(rho_prime, x_ik.unsqueeze(2)).squeeze(2)
diff_u, diff_v = self.jacobian_cross_norm(u, v)
diff_g_u = self.jacobian_m(
torch.bmm(rho, x_ij.unsqueeze(2)).squeeze(2))
diff_g_v = self.jacobian_m(
torch.bmm(rho, x_ik.unsqueeze(2)).squeeze(2))
diff_g_demi = torch.einsum(
"bi,bijk->bjk", diff_u, diff_g_u)+torch.einsum("bi,bijk->bjk", diff_v, diff_g_v)
diff_g = 0.5 * (diff_g_demi + torch.transpose(diff_g_demi, 1, 2))
diff_vect = rho_prime
diff_x_i = -0.5 * (torch.bmm(diff_u.unsqueeze(1), diff_vect).squeeze(1) +
torch.bmm(diff_v.unsqueeze(1), diff_vect).squeeze(1))
diff_x_j = 0.5 * torch.bmm(diff_u.unsqueeze(1), diff_vect).squeeze(1)
diff_x_k = 0.5 * torch.bmm(diff_v.unsqueeze(1), diff_vect).squeeze(1)
return diff_g, diff_x_i, diff_x_j, diff_x_k
def grad_angle(self, rho, x_ij, x_ik, g=None):
if g is None:
rho_prime = rho
else:
rho_prime = torch.bmm(g, rho)
u = torch.bmm(rho_prime, x_ij.unsqueeze(2)).squeeze(2)
v = torch.bmm(rho_prime, x_ik.unsqueeze(2)).squeeze(2)
diff_u, diff_v = self.jacobian_angle_vector(u, v)
diff_g_u = self.jacobian_m(
torch.bmm(rho, x_ij.unsqueeze(2)).squeeze(2))
diff_g_v = self.jacobian_m(
torch.bmm(rho, x_ik.unsqueeze(2)).squeeze(2))
diff_g = (torch.einsum("bi,bijk->bjk", diff_u, diff_g_u) +
torch.einsum("bi,bijk->bjk", diff_v, diff_g_v))
diff_vect = rho_prime
diff_x_i = -(torch.bmm(diff_u.unsqueeze(1), diff_vect).squeeze(1) +
torch.bmm(diff_v.unsqueeze(1), diff_vect).squeeze(1))
diff_x_j = torch.bmm(diff_u.unsqueeze(1), diff_vect).squeeze(1)
diff_x_k = torch.bmm(diff_v.unsqueeze(1), diff_vect).squeeze(1)
return diff_g, diff_x_i, diff_x_j, diff_x_k
def grad_angle_sym(self, rho, x_ij, x_ik, g=None):
if g is None:
rho_prime = rho
else:
rho_prime = torch.bmm((g + torch.transpose(g, 1, 2)), rho)
u = torch.bmm(rho_prime, x_ij.unsqueeze(2)).squeeze(2)
v = torch.bmm(rho_prime, x_ik.unsqueeze(2)).squeeze(2)
diff_u, diff_v = self.jacobian_angle_vector(u, v)
diff_g_u = self.jacobian_m(
torch.bmm(rho, x_ij.unsqueeze(2)).squeeze(2))
diff_g_v = self.jacobian_m(
torch.bmm(rho, x_ik.unsqueeze(2)).squeeze(2))
diff_g_demi = (torch.einsum("bi,bijk->bjk", diff_u, diff_g_u) +
torch.einsum("bi,bijk->bjk", diff_v, diff_g_v))
diff_g = diff_g_demi + torch.transpose(diff_g_demi, 1, 2)
diff_vect = rho_prime
diff_x_i = -(torch.bmm(diff_u.unsqueeze(1), diff_vect).squeeze(1) +
torch.bmm(diff_v.unsqueeze(1), diff_vect).squeeze(1))
diff_x_j = torch.bmm(diff_u.unsqueeze(1), diff_vect).squeeze(1)
diff_x_k = torch.bmm(diff_v.unsqueeze(1), diff_vect).squeeze(1)
return diff_g, diff_x_i, diff_x_j, diff_x_k
| 7,661 | 36.014493 | 120 | py |
pegnn | pegnn-master/src/datasets/data.py | from __future__ import annotations
import torch
import torch.nn.functional as F
from torch_geometric.data import Data
class CrystalData(Data):
def __init__(self, *args, **kwargs):
if "pos_cart" in kwargs:
assert isinstance(kwargs["cell"], torch.FloatTensor)
assert isinstance(kwargs["pos_cart"], torch.FloatTensor)
assert isinstance(kwargs["num_atoms"], torch.LongTensor)
cell = kwargs["cell"]
pos_cart = kwargs["pos_cart"]
num_atoms = kwargs["num_atoms"]
batch = torch.arange(
num_atoms.shape[0], dtype=torch.long, device=num_atoms.device
).repeat_interleave(num_atoms)
pos = (
torch.matmul(pos_cart.unsqueeze(1), torch.inverse(cell)[batch]).squeeze(
1
)
% 1
)
kwargs["pos"] = pos
del kwargs["pos_cart"]
super(CrystalData, self).__init__(*args, **kwargs)
self._pos_cart = None
@property
def cell(self) -> torch.FloatTensor:
return super(CrystalData, self).cell
def set_cell(self, cell):
self.cell = cell
self._pos_cart = None
@property
def pos(self) -> torch.FloatTensor:
return super(CrystalData, self).pos
def set_pos(self, pos: torch.FloatTensor):
self.pos = pos % 1
self._pos_cart = None
@property
def device(self) -> torch.device:
return self.cell.device
@property
def cell_lengths(self) -> torch.FloatTensor:
return self.cell.norm(dim=2).t()
@property
def cell_angles(self) -> torch.FloatTensor:
angles = torch.zeros_like(self.cell_lengths)
i = torch.tensor([0, 1, 2], dtype=torch.long, device=self.device)
j = torch.tensor([1, 2, 0], dtype=torch.long, device=self.device)
k = torch.tensor([2, 0, 1], dtype=torch.long, device=self.device)
cross = torch.cross(self.cell[:, j], self.cell[:, k], dim=2)
dot = (self.cell[:, j] * self.cell[:, k]).sum(dim=2)
angles[i, :] = torch.rad2deg(torch.atan2(cross.norm(dim=2), dot).t())
inv_mask = (cross * self.cell[:, i]).sum(dim=2) < 0
angles[inv_mask.t()] *= -1
return angles
@property
def pos_cart(self) -> torch.FloatTensor:
if self._pos_cart is None:
self._pos_cart = torch.matmul(
self.pos.unsqueeze(1), self.cell[self.batch]
).squeeze(1)
return self._pos_cart
def set_pos_cart(self, pos_cart: torch.FloatTensor, keep_inside: bool = True):
pos = torch.matmul(
pos_cart.unsqueeze(1), torch.inverse(self.cell)[self.batch]
).squeeze(1)
if keep_inside:
pos %= 1.0
self.pos = pos
self._pos_cart = None
| 2,855 | 27.848485 | 88 | py |
pegnn | pegnn-master/src/datasets/__init__.py | from .csv_dataset import CSVDataset
__all__ = ["CSVDataset"]
| 62 | 14.75 | 35 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.